From 86dc7cc804d47d33aca7daf8ddf4107d6eaa5f3c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 29 Nov 2023 13:34:25 -0500 Subject: [PATCH 001/222] Kafka init Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../assigned_hostgroups.local.map.yaml | 1 + pillar/kafka/nodes.sls | 30 +++++ pillar/logstash/nodes.sls | 2 +- pillar/top.sls | 10 ++ salt/allowed_states.map.jinja | 11 +- salt/docker/defaults.yaml | 8 ++ salt/docker/soc_docker.yaml | 1 + salt/firewall/containers.map.jinja | 5 + salt/firewall/defaults.yaml | 94 +++++++++++++ salt/firewall/soc_firewall.yaml | 64 +++++++++ salt/kafka/config.sls | 101 ++++++++++++++ salt/kafka/enabled.sls | 46 +++++++ salt/kafka/etc/server.properties.jinja | 123 ++++++++++++++++++ salt/kafka/init.sls | 9 ++ salt/kafka/sostatus.sls | 21 +++ salt/kafka/storage.sls | 31 +++++ .../sbin_jinja/so-kafka-generate-keystore | 16 +++ salt/logstash/defaults.yaml | 4 + salt/logstash/enabled.sls | 5 +- .../config/so/0800_input_kafka.conf.jinja | 26 ++++ .../config/so/0899_output_kafka.conf.jinja | 22 ++++ salt/logstash/soc_logstash.yaml | 2 + salt/manager/tools/sbin/so-firewall-minion | 3 + salt/manager/tools/sbin/so-kafka-clusterid | 22 ++++ salt/manager/tools/sbin/so-minion | 4 + salt/ssl/init.sls | 122 +++++++++++++++++ salt/ssl/remove.sls | 17 +++ salt/top.sls | 10 ++ salt/vars/kafkanode.map.jinja | 1 + setup/so-functions | 8 +- setup/so-setup | 10 ++ setup/so-whiptail | 5 +- 32 files changed, 828 insertions(+), 6 deletions(-) create mode 100644 pillar/kafka/nodes.sls create mode 100644 salt/kafka/config.sls create mode 100644 salt/kafka/enabled.sls create mode 100644 salt/kafka/etc/server.properties.jinja create mode 100644 salt/kafka/init.sls create mode 100644 salt/kafka/sostatus.sls create mode 100644 salt/kafka/storage.sls create mode 100644 salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore create mode 100644 salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja create mode 100644 salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja create mode 100644 salt/manager/tools/sbin/so-kafka-clusterid create mode 100644 salt/vars/kafkanode.map.jinja diff --git a/files/firewall/assigned_hostgroups.local.map.yaml b/files/firewall/assigned_hostgroups.local.map.yaml index 07f389af03..c6eb199c37 100644 --- a/files/firewall/assigned_hostgroups.local.map.yaml +++ b/files/firewall/assigned_hostgroups.local.map.yaml @@ -21,3 +21,4 @@ role: standalone: searchnode: sensor: + kafkanode: \ No newline at end of file diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls new file mode 100644 index 0000000000..a7d97ac9ce --- /dev/null +++ b/pillar/kafka/nodes.sls @@ -0,0 +1,30 @@ +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode', fun='network.ip_addrs', tgt_type='compound') %} +{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} + +{% set existing_ids = [] %} +{% for node in pillar_kafkanodes.values() %} + {% if node.get('id') %} + {% do existing_ids.append(node['nodeid']) %} + {% endif %} +{% endfor %} +{% set all_possible_ids = range(1, 256)|list %} + +{% set available_ids = [] %} +{% for id in all_possible_ids %} + {% if id not in existing_ids %} + {% do available_ids.append(id) %} + {% endif %} +{% endfor %} + +{% set final_nodes = pillar_kafkanodes.copy() %} + +{% for minionid, ip in current_kafkanodes.items() %} + {% set hostname = minionid.split('_')[0] %} + {% if hostname not in final_nodes %} + {% set new_id = available_ids.pop(0) %} + {% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} + {% endif %} +{% endfor %} + +kafka: + nodes: {{ final_nodes|tojson }} \ No newline at end of file diff --git a/pillar/logstash/nodes.sls b/pillar/logstash/nodes.sls index 8d3bdab650..3b75a5caec 100644 --- a/pillar/logstash/nodes.sls +++ b/pillar/logstash/nodes.sls @@ -2,7 +2,7 @@ {% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %} {% for minionid, ip in salt.saltutil.runner( 'mine.get', - tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ', + tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet or G@role:so-kafkanode ', fun='network.ip_addrs', tgt_type='compound') | dictsort() %} diff --git a/pillar/top.sls b/pillar/top.sls index 4893c44f90..49e493ec83 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -65,6 +65,7 @@ base: - soctopus.adv_soctopus - minions.{{ grains.id }} - minions.adv_{{ grains.id }} + - kafka.nodes '*_sensor': - healthcheck.sensor @@ -241,6 +242,15 @@ base: - minions.{{ grains.id }} - minions.adv_{{ grains.id }} + '*_kafkanode': + - logstash.nodes + - logstash.soc_logstash + - logstash.adv_logstash + - minions.{{ grains.id }} + - minions.adv_{{ grains.id }} + - secrets + - kafka.nodes + '*_import': - secrets - elasticsearch.index_templates diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index a3c5c75ab6..11dfde8246 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -187,6 +187,15 @@ 'schedule', 'docker_clean' ], + 'so-kafkanode': [ + 'kafka', + 'logstash', + 'ssl', + 'telegraf', + 'firewall', + 'schedule', + 'docker_clean' + ], 'so-desktop': [ ], }, grain='role') %} @@ -203,7 +212,7 @@ {% do allowed_states.append('strelka') %} {% endif %} - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %} + {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import', 'so-kafkanode'] %} {% do allowed_states.append('elasticsearch') %} {% endif %} diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index e39feaf06f..3155841c95 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -201,3 +201,11 @@ docker: custom_bind_mounts: [] extra_hosts: [] extra_env: [] + 'so-kafka': + final_octet: 88 + port_bindings: + - 0.0.0.0:9092:9092 + - 0.0.0.0:2181:2181 + custom_bind_mounts: [] + extra_hosts: [] + extra_env: [] \ No newline at end of file diff --git a/salt/docker/soc_docker.yaml b/salt/docker/soc_docker.yaml index d227a3e85b..87751010ea 100644 --- a/salt/docker/soc_docker.yaml +++ b/salt/docker/soc_docker.yaml @@ -68,3 +68,4 @@ docker: so-steno: *dockerOptions so-suricata: *dockerOptions so-zeek: *dockerOptions + so-kafka: *dockerOptions \ No newline at end of file diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index 617b4a2168..b19f66355d 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -87,6 +87,11 @@ 'so-logstash', 'so-redis', ] %} +{% elif GLOBALS.role == 'so-kafkanode' %} +{% set NODE_CONTAINERS = [ + 'so-logstash', + 'so-kafka', +] %} {% elif GLOBALS.role == 'so-idh' %} {% set NODE_CONTAINERS = [ diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index ff127c419e..112e0eaaa4 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -19,6 +19,7 @@ firewall: manager: [] managersearch: [] receiver: [] + kafkanode: [] searchnode: [] self: [] sensor: [] @@ -90,6 +91,11 @@ firewall: tcp: - 8086 udp: [] + kafka: + tcp: + - 9092 + - 9093 + udp: [] kibana: tcp: - 5601 @@ -441,6 +447,15 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + kafkanode: + portgroups: + - yum + - docker_registry + - influxdb + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + - sensoroni analyst: portgroups: - nginx @@ -513,6 +528,9 @@ firewall: receiver: portgroups: - salt_manager + kafkanode: + portgroups: + - salt_manager desktop: portgroups: - salt_manager @@ -629,6 +647,15 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + kafkanode: + portgroups: + - yum + - docker_registry + - influxdb + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update + - sensoroni analyst: portgroups: - nginx @@ -1339,6 +1366,73 @@ firewall: portgroups: [] customhostgroup9: portgroups: [] + kafkanode: + chain: + DOCKER-USER: + hostgroups: + searchnode: + portgroups: + - kafka + kafkanode: + portgroups: + - kafka + customhostgroup0: + portgroups: [] + customhostgroup1: + portgroups: [] + customhostgroup2: + portgroups: [] + customhostgroup3: + portgroups: [] + customhostgroup4: + portgroups: [] + customhostgroup5: + portgroups: [] + customhostgroup6: + portgroups: [] + customhostgroup7: + portgroups: [] + customhostgroup8: + portgroups: [] + customhostgroup9: + portgroups: [] + INPUT: + hostgroups: + anywhere: + portgroups: + - ssh + dockernet: + portgroups: + - all + localhost: + portgroups: + - all + self: + portgroups: + - syslog + syslog: + portgroups: + - syslog + customhostgroup0: + portgroups: [] + customhostgroup1: + portgroups: [] + customhostgroup2: + portgroups: [] + customhostgroup3: + portgroups: [] + customhostgroup4: + portgroups: [] + customhostgroup5: + portgroups: [] + customhostgroup6: + portgroups: [] + customhostgroup7: + portgroups: [] + customhostgroup8: + portgroups: [] + customhostgroup9: + portgroups: [] idh: chain: DOCKER-USER: diff --git a/salt/firewall/soc_firewall.yaml b/salt/firewall/soc_firewall.yaml index 209484b6ef..7d250737a2 100644 --- a/salt/firewall/soc_firewall.yaml +++ b/salt/firewall/soc_firewall.yaml @@ -34,6 +34,7 @@ firewall: heavynode: *hostgroupsettings idh: *hostgroupsettings import: *hostgroupsettings + kafkanode: *hostgroupsettings localhost: *ROhostgroupsettingsadv manager: *hostgroupsettings managersearch: *hostgroupsettings @@ -115,6 +116,9 @@ firewall: influxdb: tcp: *tcpsettings udp: *udpsettings + kafka: + tcp: *tcpsettings + udp: *udpsettings kibana: tcp: *tcpsettings udp: *udpsettings @@ -363,6 +367,8 @@ firewall: portgroups: *portgroupsdocker endgame: portgroups: *portgroupsdocker + kafkanode: + portgroups: *portgroupsdocker analyst: portgroups: *portgroupsdocker desktop: @@ -454,6 +460,8 @@ firewall: portgroups: *portgroupsdocker syslog: portgroups: *portgroupsdocker + kafkanode: + portgroups: *portgroupsdocker analyst: portgroups: *portgroupsdocker desktop: @@ -938,6 +946,62 @@ firewall: portgroups: *portgroupshost customhostgroup9: portgroups: *portgroupshost + kafkanode: + chain: + DOCKER-USER: + hostgroups: + searchnode: + portgroups: *portgroupsdocker + kafkanode: + portgroups: *portgroupsdocker + customhostgroup0: + portgroups: *portgroupsdocker + customhostgroup1: + portgroups: *portgroupsdocker + customhostgroup2: + portgroups: *portgroupsdocker + customhostgroup3: + portgroups: *portgroupsdocker + customhostgroup4: + portgroups: *portgroupsdocker + customhostgroup5: + portgroups: *portgroupsdocker + customhostgroup6: + portgroups: *portgroupsdocker + customhostgroup7: + portgroups: *portgroupsdocker + customhostgroup8: + portgroups: *portgroupsdocker + customhostgroup9: + portgroups: *portgroupsdocker + INPUT: + hostgroups: + anywhere: + portgroups: *portgroupshost + dockernet: + portgroups: *portgroupshost + localhost: + portgroups: *portgroupshost + customhostgroup0: + portgroups: *portgroupshost + customhostgroup1: + portgroups: *portgroupshost + customhostgroup2: + portgroups: *portgroupshost + customhostgroup3: + portgroups: *portgroupshost + customhostgroup4: + portgroups: *portgroupshost + customhostgroup5: + portgroups: *portgroupshost + customhostgroup6: + portgroups: *portgroupshost + customhostgroup7: + portgroups: *portgroupshost + customhostgroup8: + portgroups: *portgroupshost + customhostgroup9: + portgroups: *portgroupshost idh: chain: diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls new file mode 100644 index 0000000000..8caaa01cd1 --- /dev/null +++ b/salt/kafka/config.sls @@ -0,0 +1,101 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set kafka_ips_logstash = [] %} +{% set kafka_ips_kraft = [] %} +{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} +{% set kafka_ip = GLOBALS.node_ip %} + +{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set combined = [] %} +{% for hostname, data in nodes.items() %} + {% do combined.append(data.nodeid ~ "@" ~ hostname) %} +{% endfor %} +{% set kraft_controller_quorum_voters = ','.join(combined) %} + +{# Create list for kafka <-> logstash/searchnode communcations #} +{% for node, node_data in kafkanodes.items() %} +{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} +{% endfor %} +{% set kafka_server_list = "','".join(kafka_ips_logstash) %} + +{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #} +{% for node, node_data in kafkanodes.items() %} +{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %} +{% endfor %} +{% set kraft_server_list = "','".join(kafka_ips_kraft) %} + + +include: + - ssl + +kafka_group: + group.present: + - name: kafka + - gid: 960 + +kafka: + user.present: + - uid: 960 + - gid: 960 + +{# Future tools to query kafka directly / show consumer groups +kafka_sbin_tools: + file.recurse: + - name: /usr/sbin + - source: salt://kafka/tools/sbin + - user: 960 + - group: 960 + - file_mode: 755 #} + +kakfa_log_dir: + file.directory: + - name: /opt/so/log/kafka + - user: 960 + - group: 960 + - makedirs: True + +kafka_data_dir: + file.directory: + - name: /nsm/kafka/data + - user: 960 + - group: 960 + - makedirs: True + +{# When docker container is created an added to registry. Update so-kafka-generate-keystore script #} +kafka_keystore_script: + cmd.script: + - source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore + - tempalte: jinja + - cwd: /opt/so + - defaults: + GLOBALS: {{ GLOBALS }} + +kafka_kraft_server_properties: + file.managed: + - source: salt://kafka/etc/server.properties.jinja + - name: /opt/so/conf/kafka/server.properties + - template: jinja + - defaults: + kafka_nodeid: {{ kafka_nodeid }} + kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }} + kafka_ip: {{ kafka_ip }} + - user: 960 + - group: 960 + - makedirs: True + - show_changes: False + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} \ No newline at end of file diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls new file mode 100644 index 0000000000..1bf7dcf8bc --- /dev/null +++ b/salt/kafka/enabled.sls @@ -0,0 +1,46 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} + +include: + - kafka.sostatus + - kafka.config + - kafka.storage + +so-kafka: + docker_container.running: + - image: so-kafka + - hostname: so-kafka + - name: so-kafka + - networks: + - sobridge: + - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} + - user: kafka + - port_bindings: + {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} + - {{ BINDING }} + {% endfor %} + - binds: + - /etc/pki/kafka.jks:/etc/pki/kafka.jks + - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts + - /nsm/kafka/data/:/nsm/kafka/data/:rw + - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + +delete_so-kafka_so-status.disabled: + file.uncomment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-kafka$ + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} \ No newline at end of file diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja new file mode 100644 index 0000000000..ad5ac67a9f --- /dev/null +++ b/salt/kafka/etc/server.properties.jinja @@ -0,0 +1,123 @@ +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker,controller + +# The node id associated with this instance's roles +node.id={{ kafka_nodeid }} + +# The connect string for the controller quorum +controller.quorum.voters={{ kraft_controller_quorum_voters }} + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. +# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. +# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), +# with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 + +# Name of listener used for communication between brokers. +inter.broker.listener.name=BROKER + +# Listener name, hostname and port the broker will advertise to clients. +# If not set, it uses the value for "listeners". +advertised.listeners=BROKER://{{ kafka_ip }}:9092 + +# A comma-separated list of the names of the listeners used by the controller. +# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol +# This is required if running in KRaft mode. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL + +#SSL configuration +ssl.keystore.location=/etc/pki/kafka.jks +ssl.keystore.pasword=changeit +ssl.keystore.type=JKS +ssl.truststore.location=/etc/pki/java/sos/cacerts +ssl.truststore.password=changeit + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/nsm/kafka/data + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 \ No newline at end of file diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls new file mode 100644 index 0000000000..653cd4b88e --- /dev/null +++ b/salt/kafka/init.sls @@ -0,0 +1,9 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{# Create map.jinja to enable / disable kafka from UI #} +{# Temporarily just enable kafka #} +include: + - kafka.enabled diff --git a/salt/kafka/sostatus.sls b/salt/kafka/sostatus.sls new file mode 100644 index 0000000000..4c75199649 --- /dev/null +++ b/salt/kafka/sostatus.sls @@ -0,0 +1,21 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} + +append_so-kafka_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-kafka + - unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} \ No newline at end of file diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls new file mode 100644 index 0000000000..dc114ef4fa --- /dev/null +++ b/salt/kafka/storage.sls @@ -0,0 +1,31 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%} + +{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} +{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} +{% else %} +kafka_storage_init: + cmd.run: + - name: | + docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh so-kafka format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/server.properties +kafka_rm_kafkainit: + cmd.run: + - name: | + docker rm so-kafkainit +{% endif %} + + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + +{% endif %} \ No newline at end of file diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore new file mode 100644 index 0000000000..69bb6ad871 --- /dev/null +++ b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore @@ -0,0 +1,16 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +. /usr/sbin/so-common + +if [ ! -f /etc/pki/kafka.jks ]; then + docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool so-kafka -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srsstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -alias kafkastore -noprompt + docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks + docker rm so-kafka-keystore +else + exit 0 +fi \ No newline at end of file diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index e4c18cc647..b7382090e6 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -19,6 +19,8 @@ logstash: - search fleet: - fleet + kafkanode: + - kafkanode defined_pipelines: fleet: - so/0012_input_elastic_agent.conf.jinja @@ -37,6 +39,8 @@ logstash: - so/0900_input_redis.conf.jinja - so/9805_output_elastic_agent.conf.jinja - so/9900_output_endgame.conf.jinja + kafkanode: + - so/0899_output_kafka.conf.jinja custom0: [] custom1: [] custom2: [] diff --git a/salt/logstash/enabled.sls b/salt/logstash/enabled.sls index c76f81d214..96e29b25a1 100644 --- a/salt/logstash/enabled.sls +++ b/salt/logstash/enabled.sls @@ -75,10 +75,13 @@ so-logstash: {% else %} - /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro {% endif %} - {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode'] %} + {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode', 'so-kafkanode' ] %} - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro {% endif %} + {% if GLOBALS.role in ['so-kafkanode'] %} + - /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro + {% endif %} {% if GLOBALS.role == 'so-eval' %} - /nsm/zeek:/nsm/zeek:ro - /nsm/suricata:/suricata:ro diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja new file mode 100644 index 0000000000..c1429319a5 --- /dev/null +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -0,0 +1,26 @@ +{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} +{% set broker_ips = [] %} +{% for node, node_data in kafka_brokers.items() %} + {% do broker_ips.append(node_data['ip'] + ":9092") %} +{% endfor %} + +{% set bootstrap_servers = "','".join(broker_ips) %} + + +#Run on searchnodes ingest kafka topic(s) group_id allows load balancing of event ingest to all searchnodes +input { + kafka { + codec => json + #Can ingest multiple topics. Set to a value from SOC UI? + topics => ['logstash-topic',] + group_id => 'searchnodes' + security_protocol => 'SSL' + bootstrap_servers => {{ bootstrap_servers }} + ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' + ssl_keystore_password => '' + ssl_keystore_type => 'PKCS12' + ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' + # Set password as a pillar to avoid bad optics? This is default truststore for grid + ssl_truststore_password => 'changeit' + } +} \ No newline at end of file diff --git a/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja new file mode 100644 index 0000000000..ff9a6f6ee7 --- /dev/null +++ b/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja @@ -0,0 +1,22 @@ +{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} +{% set broker_ips = [] %} +{% for node, node_data in kafka_brokers.items() %} + {% do broker_ips.append(node_data['ip'] + ":9092") %} +{% endfor %} + +{% set bootstrap_servers = "','".join(broker_ips) %} + +#Run on kafka broker logstash writes to topic 'logstash-topic' +output { + kafka { + codec => json + topic_id => 'logstash-topic' + bootstrap_servers => '{{ bootstrap_servers }}' + security_protocol => 'SSL' + ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' + ssl_keystore_password => '' + ssl_keystore_type => 'PKCS12' + ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' + ssl_truststore_password => 'changeit' + } +} \ No newline at end of file diff --git a/salt/logstash/soc_logstash.yaml b/salt/logstash/soc_logstash.yaml index bcb99bad55..144094eb1a 100644 --- a/salt/logstash/soc_logstash.yaml +++ b/salt/logstash/soc_logstash.yaml @@ -16,6 +16,7 @@ logstash: manager: *assigned_pipelines managersearch: *assigned_pipelines fleet: *assigned_pipelines + kafkanode: *assigned_pipelines defined_pipelines: receiver: &defined_pipelines description: List of pipeline configurations assign to this group. @@ -26,6 +27,7 @@ logstash: fleet: *defined_pipelines manager: *defined_pipelines search: *defined_pipelines + kafkanode: *defined_pipelines custom0: *defined_pipelines custom1: *defined_pipelines custom2: *defined_pipelines diff --git a/salt/manager/tools/sbin/so-firewall-minion b/salt/manager/tools/sbin/so-firewall-minion index 66a0afcea7..3357e5185a 100755 --- a/salt/manager/tools/sbin/so-firewall-minion +++ b/salt/manager/tools/sbin/so-firewall-minion @@ -79,6 +79,9 @@ fi 'RECEIVER') so-firewall includehost receiver "$IP" --apply ;; + 'KAFKANODE') + so-firewall includehost kafkanode "$IP" --apply + ;; 'DESKTOP') so-firewall includehost desktop "$IP" --apply ;; diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid new file mode 100644 index 0000000000..64833a0d24 --- /dev/null +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -0,0 +1,22 @@ +#!/bin/bash + +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +local_salt_dir=/opt/so/saltstack/local + +if [[ -f /usr/sbin/so-common ]]; then + source /usr/sbin/so-common +else + source $(dirname $0)/../../../common/tools/sbin/so-common +fi + +if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then + kafka_cluster_id=$(get_random_value 22) + echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls +else + echo 'kafka_cluster_id exists' + salt-call pillar.get secrets +fi \ No newline at end of file diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index edc0b14040..c610985897 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -556,6 +556,10 @@ function createRECEIVER() { add_telegraf_to_minion } +function createKAFKANODE() { + add_logstash_to_minion + # add_telegraf_to_minion +} function testConnection() { retry 15 3 "salt '$MINION_ID' test.ping" True diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index ef93a9072f..2a71cd8538 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -664,6 +664,128 @@ elastickeyperms: {%- endif %} +# Roles will need to be modified. Below is just for testing encrypted kafka pipelines +# Remove so-manager. Just inplace for testing +{% if grains['role'] in ['so-manager', 'so-kafkanode', 'so-searchnode'] %} +# Create a cert for Redis encryption +kafka_key: + x509.private_key_managed: + - name: /etc/pki/kafka.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka.key') -%} + - prereq: + - x509: /etc/pki/kafka.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_crt: + x509.certificate_managed: + - name: /etc/pki/kafka.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: elasticfleet + - private_key: /etc/pki/kafka.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:changeit" + - onchanges: + - x509: /etc/pki/kafka.key + +# Kafka needs a keystore so just creating a new key / cert for that purpose +etc_kafka_logstash_key: + x509.private_key_managed: + - name: /etc/pki/kafka-logstash.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kakfa-logstash.key') -%} + - prereq: + - x509: etc_kafka_logstash_crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +etc_kafka_logstash_crt: + x509.certificate_managed: + - name: /etc/pki/kafka-logstash.crt + - ca_server: {{ ca_server }} + - signing_policy: elasticfleet + - private_key: /etc/pki/kafka-logstash.key + - CN: {{ GLOBALS.hostname }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:" + - onchanges: + - x509: etc_kafka_logstash_key + +kafka_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.crt + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_cert_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.crt + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_keystore_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.p12 + - mode: 640 + - user: 960 + - group: 939 + +kafka_keystore_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.p12 + - mode: 640 + - user: 960 + - group: 939 + +{% endif %} {% else %} {{sls}}_state_not_allowed: diff --git a/salt/ssl/remove.sls b/salt/ssl/remove.sls index 43a2452888..bb4562300b 100644 --- a/salt/ssl/remove.sls +++ b/salt/ssl/remove.sls @@ -67,3 +67,20 @@ fleet_crt: fbcertdir: file.absent: - name: /opt/so/conf/filebeat/etc/pki + +kafka_crt: + file.absent: + - name: /etc/pki/kafka.crt +kafka_key: + file.absent: + - name: /etc/pki/kafka.key + +kafka_logstash_crt: + file.absent: + - name: /etc/pki/kafka-logstash.crt +kafka_logstash_key: + file.absent: + - name: /etc/pki/kafka-logstash.key +kafka_logstash_keystore: + file.absent: + - name: /etc/pki/kafka-logstash.p12 diff --git a/salt/top.sls b/salt/top.sls index 2323731a12..cd1b92e5c9 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -255,6 +255,16 @@ base: - elasticfleet.install_agent_grid - docker_clean + '*_kafkanode and G@saltversion:{{saltversion}}': + - match: compound + - kafka + - logstash + - ssl + - telegraf + - firewall + - docker_clean + - elasticfleet.install_agent_grid + '*_idh and G@saltversion:{{saltversion}}': - match: compound - ssl diff --git a/salt/vars/kafkanode.map.jinja b/salt/vars/kafkanode.map.jinja new file mode 100644 index 0000000000..396cefcc90 --- /dev/null +++ b/salt/vars/kafkanode.map.jinja @@ -0,0 +1 @@ +{% set ROLE_GLOBALS = {} %} \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index fc0876248a..76887c81c2 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1242,6 +1242,7 @@ generate_passwords(){ REDISPASS=$(get_random_value) SOCSRVKEY=$(get_random_value 64) IMPORTPASS=$(get_random_value) + KAFKACLUSTERID=$(get_random_value 22) } generate_interface_vars() { @@ -1269,7 +1270,7 @@ get_redirect() { get_minion_type() { local minion_type case "$install_type" in - 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER') + 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER' | 'KAFKANODE') minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]') ;; esac @@ -1663,6 +1664,8 @@ process_installtype() { is_import=true elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true + elif [ "$install_type" = 'KAFKANODE' ]; then + is_kafka=true elif [ "$install_type" = 'DESKTOP' ]; then if [ "$setup_type" != 'desktop' ]; then exec bash so-setup desktop @@ -2105,7 +2108,8 @@ secrets_pillar(){ " playbook_automation: $PLAYBOOKAUTOMATIONPASS"\ " playbook_automation_api_key: "\ " import_pass: $IMPORTPASS"\ - " influx_pass: $INFLUXPASS" > $local_salt_dir/pillar/secrets.sls + " influx_pass: $INFLUXPASS"\ + " kafka_cluster_id: $KAFKACLUSTERID" > $local_salt_dir/pillar/secrets.sls fi } diff --git a/setup/so-setup b/setup/so-setup index 14d6b2304b..bc64cd9d19 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -574,6 +574,16 @@ if ! [[ -f $install_opt_file ]]; then check_manager_connection set_minion_info whiptail_end_settings + + elif [[ $is_kafka ]]; then + info "Setting up as node type Kafka broker" + #check_requirements "kafka" + networking_needful + collect_mngr_hostname + add_mngr_ip_to_hosts + check_manager_connection + set_minion_info + whiptail_end_settings fi if [[ $waitforstate ]]; then diff --git a/setup/so-whiptail b/setup/so-whiptail index c55e2db8f4..4553ebd335 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -640,13 +640,14 @@ whiptail_install_type_dist_existing() { Note: Heavy nodes (HEAVYNODE) are NOT recommended for most users. EOM - install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 6 \ + install_type=$(whiptail --title "$whiptail_title" --menu "$node_msg" 19 75 7 \ "SENSOR" "Create a forward only sensor " \ "SEARCHNODE" "Add a search node with parsing " \ "FLEET" "Dedicated Elastic Fleet Node " \ "HEAVYNODE" "Sensor + Search Node " \ "IDH" "Intrusion Detection Honeypot Node " \ "RECEIVER" "Receiver Node " \ + "KAFKANODE" "Kafka Broker + Kraft controller" \ 3>&1 1>&2 2>&3 # "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" \ # TODO # "WARMNODE" "Add Warm Node to existing Hot or Search node" \ # TODO @@ -677,6 +678,8 @@ whiptail_install_type_dist_existing() { is_import=true elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true + elif [ "$install_type" = 'KAFKANODE' ]; then + is_kafka=true elif [ "$install_type" = 'DESKTOP' ]; then if [ "$setup_type" != 'desktop' ]; then exec bash so-setup desktop From 26abe9067154676df31060bec3b3981ca5af0e05 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 12:19:46 -0400 Subject: [PATCH 002/222] Removed duplicate kafka setup Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- setup/so-setup | 9 --------- 1 file changed, 9 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index a50fea19d7..191b25ef20 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -629,15 +629,6 @@ if ! [[ -f $install_opt_file ]]; then set_minion_info whiptail_end_settings - elif [[ $is_kafka ]]; then - info "Setting up as node type Kafka broker" - #check_requirements "kafka" - networking_needful - collect_mngr_hostname - add_mngr_ip_to_hosts - check_manager_connection - set_minion_info - whiptail_end_settings fi if [[ $waitforstate ]]; then From 780ad9eb10ebdd20258f0b3daa875167eb6b78b0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 2 Apr 2024 15:50:25 -0400 Subject: [PATCH 003/222] add kafka to manager nodes --- salt/allowed_states.map.jinja | 3 ++- salt/top.sls | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 460e3e9eb3..6fa60c2ea2 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -101,7 +101,8 @@ 'utility', 'schedule', 'docker_clean', - 'stig' + 'stig', + 'kafka' ], 'so-managersearch': [ 'salt.master', diff --git a/salt/top.sls b/salt/top.sls index b418768ad4..289dd462b0 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -106,6 +106,7 @@ base: - utility - elasticfleet - stig + - kafka '*_standalone and G@saltversion:{{saltversion}}': - match: compound From f7534a0ae3dbc8ec34556571afb0bdbc314d6172 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 2 Apr 2024 16:01:12 -0400 Subject: [PATCH 004/222] make manager download so-kafka container --- salt/common/tools/sbin/so-image-common | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 752ec20e08..faf8d50a94 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -50,6 +50,7 @@ container_list() { "so-idh" "so-idstools" "so-influxdb" + "so-kafka" "so-kibana" "so-kratos" "so-logstash" From 1b49c8540e466f9b9c602b77245dac58cc5609e4 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:32:15 -0400 Subject: [PATCH 005/222] Fix kafka keystore script Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 208 +++++++++--------- .../sbin_jinja/so-kafka-generate-keystore | 2 +- 2 files changed, 109 insertions(+), 101 deletions(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 8caaa01cd1..ddf2777a18 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -1,101 +1,109 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} - -{% set kafka_ips_logstash = [] %} -{% set kafka_ips_kraft = [] %} -{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} -{% set kafka_ip = GLOBALS.node_ip %} - -{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set combined = [] %} -{% for hostname, data in nodes.items() %} - {% do combined.append(data.nodeid ~ "@" ~ hostname) %} -{% endfor %} -{% set kraft_controller_quorum_voters = ','.join(combined) %} - -{# Create list for kafka <-> logstash/searchnode communcations #} -{% for node, node_data in kafkanodes.items() %} -{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} -{% endfor %} -{% set kafka_server_list = "','".join(kafka_ips_logstash) %} - -{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #} -{% for node, node_data in kafkanodes.items() %} -{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %} -{% endfor %} -{% set kraft_server_list = "','".join(kafka_ips_kraft) %} - - -include: - - ssl - -kafka_group: - group.present: - - name: kafka - - gid: 960 - -kafka: - user.present: - - uid: 960 - - gid: 960 - -{# Future tools to query kafka directly / show consumer groups -kafka_sbin_tools: - file.recurse: - - name: /usr/sbin - - source: salt://kafka/tools/sbin - - user: 960 - - group: 960 - - file_mode: 755 #} - -kakfa_log_dir: - file.directory: - - name: /opt/so/log/kafka - - user: 960 - - group: 960 - - makedirs: True - -kafka_data_dir: - file.directory: - - name: /nsm/kafka/data - - user: 960 - - group: 960 - - makedirs: True - -{# When docker container is created an added to registry. Update so-kafka-generate-keystore script #} -kafka_keystore_script: - cmd.script: - - source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore - - tempalte: jinja - - cwd: /opt/so - - defaults: - GLOBALS: {{ GLOBALS }} - -kafka_kraft_server_properties: - file.managed: - - source: salt://kafka/etc/server.properties.jinja - - name: /opt/so/conf/kafka/server.properties - - template: jinja - - defaults: - kafka_nodeid: {{ kafka_nodeid }} - kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }} - kafka_ip: {{ kafka_ip }} - - user: 960 - - group: 960 - - makedirs: True - - show_changes: False - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set kafka_ips_logstash = [] %} +{% set kafka_ips_kraft = [] %} +{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} +{% set kafka_ip = GLOBALS.node_ip %} + +{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set combined = [] %} +{% for hostname, data in nodes.items() %} + {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} +{% endfor %} +{% set kraft_controller_quorum_voters = ','.join(combined) %} + +{# Create list for kafka <-> logstash/searchnode communcations #} +{% for node, node_data in kafkanodes.items() %} +{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} +{% endfor %} +{% set kafka_server_list = "','".join(kafka_ips_logstash) %} + +{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #} +{% for node, node_data in kafkanodes.items() %} +{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %} +{% endfor %} +{% set kraft_server_list = "','".join(kafka_ips_kraft) %} + + +include: + - ssl + +kafka_group: + group.present: + - name: kafka + - gid: 960 + +kafka: + user.present: + - uid: 960 + - gid: 960 + +{# Future tools to query kafka directly / show consumer groups +kafka_sbin_tools: + file.recurse: + - name: /usr/sbin + - source: salt://kafka/tools/sbin + - user: 960 + - group: 960 + - file_mode: 755 #} + +kafka_sbin_jinja_tools: + file.recurse: + - name: /usr/sbin + - source: salt://kafka/tools/sbin_jinja + - user: 960 + - group: 960 + - file_mode: 755 + - template: jinja + +kakfa_log_dir: + file.directory: + - name: /opt/so/log/kafka + - user: 960 + - group: 960 + - makedirs: True + +kafka_data_dir: + file.directory: + - name: /nsm/kafka/data + - user: 960 + - group: 960 + - makedirs: True + +kafka_keystore_script: + cmd.script: + - source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore + - template: jinja + - cwd: /opt/so + - defaults: + GLOBALS: {{ GLOBALS }} + +kafka_kraft_server_properties: + file.managed: + - source: salt://kafka/etc/server.properties.jinja + - name: /opt/so/conf/kafka/server.properties + - template: jinja + - defaults: + kafka_nodeid: {{ kafka_nodeid }} + kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }} + kafka_ip: {{ kafka_ip }} + - user: 960 + - group: 960 + - makedirs: True + - show_changes: False + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore index 69bb6ad871..1809c7a934 100644 --- a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore +++ b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore @@ -8,7 +8,7 @@ . /usr/sbin/so-common if [ ! -f /etc/pki/kafka.jks ]; then - docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool so-kafka -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srsstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -alias kafkastore -noprompt + docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks docker rm so-kafka-keystore else From b032eed22a94a80a62e83f4dfe79914523a7024c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:34:06 -0400 Subject: [PATCH 006/222] Update kafka to use manager docker registry Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/common/tools/sbin/so-image-common | 3 +- salt/kafka/enabled.sls | 91 +++++++++++++------------- 2 files changed, 48 insertions(+), 46 deletions(-) diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 7900b3c529..d322c8e9b9 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -67,7 +67,8 @@ container_list() { "so-strelka-manager" "so-suricata" "so-telegraf" - "so-zeek" + "so-zeek" + "so-kafka" ) else TRUSTED_CONTAINERS=( diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 1bf7dcf8bc..31d375e235 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -1,46 +1,47 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'docker/docker.map.jinja' import DOCKER %} - -include: - - kafka.sostatus - - kafka.config - - kafka.storage - -so-kafka: - docker_container.running: - - image: so-kafka - - hostname: so-kafka - - name: so-kafka - - networks: - - sobridge: - - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} - - user: kafka - - port_bindings: - {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} - - {{ BINDING }} - {% endfor %} - - binds: - - /etc/pki/kafka.jks:/etc/pki/kafka.jks - - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts - - /nsm/kafka/data/:/nsm/kafka/data/:rw - - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties - -delete_so-kafka_so-status.disabled: - file.uncomment: - - name: /opt/so/conf/so-status/so-status.conf - - regex: ^so-kafka$ - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'docker/docker.map.jinja' import DOCKER %} +{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} + +include: + - kafka.sostatus + - kafka.config + - kafka.storage + +so-kafka: + docker_container.running: + - image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} + - hostname: so-kafka + - name: so-kafka + - networks: + - sobridge: + - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} + - user: kafka + - port_bindings: + {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} + - {{ BINDING }} + {% endfor %} + - binds: + - /etc/pki/kafka.jks:/etc/pki/kafka.jks + - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts + - /nsm/kafka/data/:/nsm/kafka/data/:rw + - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + +delete_so-kafka_so-status.disabled: + file.uncomment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-kafka$ + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file From 643d4831c10b09714021ae97e19e0ea679433b94 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:35:14 -0400 Subject: [PATCH 007/222] CRLF -> LF Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/init.sls | 18 +++++----- salt/kafka/sostatus.sls | 40 ++++++++++----------- salt/manager/tools/sbin/so-kafka-clusterid | 42 +++++++++++----------- 3 files changed, 50 insertions(+), 50 deletions(-) diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index 653cd4b88e..903c66867f 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -1,9 +1,9 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{# Create map.jinja to enable / disable kafka from UI #} -{# Temporarily just enable kafka #} -include: - - kafka.enabled +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{# Create map.jinja to enable / disable kafka from UI #} +{# Temporarily just enable kafka #} +include: + - kafka.enabled diff --git a/salt/kafka/sostatus.sls b/salt/kafka/sostatus.sls index 4c75199649..37c868a461 100644 --- a/salt/kafka/sostatus.sls +++ b/salt/kafka/sostatus.sls @@ -1,21 +1,21 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} - -append_so-kafka_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-kafka - - unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} + +append_so-kafka_so-status.conf: + file.append: + - name: /opt/so/conf/so-status/so-status.conf + - text: so-kafka + - unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 64833a0d24..7199732473 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -1,22 +1,22 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -local_salt_dir=/opt/so/saltstack/local - -if [[ -f /usr/sbin/so-common ]]; then - source /usr/sbin/so-common -else - source $(dirname $0)/../../../common/tools/sbin/so-common -fi - -if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then - kafka_cluster_id=$(get_random_value 22) - echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls -else - echo 'kafka_cluster_id exists' - salt-call pillar.get secrets +#!/bin/bash + +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +local_salt_dir=/opt/so/saltstack/local + +if [[ -f /usr/sbin/so-common ]]; then + source /usr/sbin/so-common +else + source $(dirname $0)/../../../common/tools/sbin/so-common +fi + +if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then + kafka_cluster_id=$(get_random_value 22) + echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls +else + echo 'kafka_cluster_id exists' + salt-call pillar.get secrets fi \ No newline at end of file From 7f5741c43b9eac3f0409a1eaa7dbe44fc70140d2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:36:22 -0400 Subject: [PATCH 008/222] Fix kafka storage setup Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/storage.sls | 60 +++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index dc114ef4fa..778c054e27 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -1,31 +1,31 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%} - -{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} -{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} -{% else %} -kafka_storage_init: - cmd.run: - - name: | - docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh so-kafka format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/server.properties -kafka_rm_kafkainit: - cmd.run: - - name: | - docker rm so-kafkainit -{% endif %} - - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%} + +{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} +{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} +{% else %} +kafka_storage_init: + cmd.run: + - name: | + docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/newserver.properties +kafka_rm_kafkainit: + cmd.run: + - name: | + docker rm so-kafkainit +{% endif %} + + +{% else %} + +{{sls}}_state_not_allowed: + test.fail_without_changes: + - name: {{sls}}_state_not_allowed + {% endif %} \ No newline at end of file From 82830c81733a925bf9c6c4748946d53ba1039358 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 2 Apr 2024 16:37:39 -0400 Subject: [PATCH 009/222] Fix typos and fix error related to elasticsearch saltstate being called from logstash state. Logstash will be removed from kafkanodes in future Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/etc/server.properties.jinja | 244 ++++++++++++------------- salt/logstash/config.sls | 2 +- 2 files changed, 123 insertions(+), 123 deletions(-) diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index ad5ac67a9f..eb60eda601 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,123 +1,123 @@ -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. See config/kraft/README.md for details. -# - -############################# Server Basics ############################# - -# The role of this server. Setting this puts us in KRaft mode -process.roles=broker,controller - -# The node id associated with this instance's roles -node.id={{ kafka_nodeid }} - -# The connect string for the controller quorum -controller.quorum.voters={{ kraft_controller_quorum_voters }} - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. -# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. -# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), -# with PLAINTEXT listener name, and port 9092. -# FORMAT: -# listeners = listener_name://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 -listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 - -# Name of listener used for communication between brokers. -inter.broker.listener.name=BROKER - -# Listener name, hostname and port the broker will advertise to clients. -# If not set, it uses the value for "listeners". -advertised.listeners=BROKER://{{ kafka_ip }}:9092 - -# A comma-separated list of the names of the listeners used by the controller. -# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol -# This is required if running in KRaft mode. -controller.listener.names=CONTROLLER - -# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details -listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL - -#SSL configuration -ssl.keystore.location=/etc/pki/kafka.jks -ssl.keystore.pasword=changeit -ssl.keystore.type=JKS -ssl.truststore.location=/etc/pki/java/sos/cacerts -ssl.truststore.password=changeit - -# The number of threads that the server uses for receiving requests from the network and sending responses to the network -num.network.threads=3 - -# The number of threads that the server uses for processing requests, which may include disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma separated list of directories under which to store log files -log.dirs=/nsm/kafka/data - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion due to age -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log unless the remaining -# segments drop below log.retention.bytes. Functions independently of log.retention.hours. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies +# This configuration file is intended for use in KRaft mode, where +# Apache ZooKeeper is not present. See config/kraft/README.md for details. +# + +############################# Server Basics ############################# + +# The role of this server. Setting this puts us in KRaft mode +process.roles=broker,controller + +# The node id associated with this instance's roles +node.id={{ kafka_nodeid }} + +# The connect string for the controller quorum +controller.quorum.voters={{ kraft_controller_quorum_voters }} + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. +# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. +# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), +# with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 + +# Name of listener used for communication between brokers. +inter.broker.listener.name=BROKER + +# Listener name, hostname and port the broker will advertise to clients. +# If not set, it uses the value for "listeners". +advertised.listeners=BROKER://{{ kafka_ip }}:9092 + +# A comma-separated list of the names of the listeners used by the controller. +# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol +# This is required if running in KRaft mode. +controller.listener.names=CONTROLLER + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL + +#SSL configuration +ssl.keystore.location=/etc/pki/kafka.jks +ssl.keystore.password=changeit +ssl.keystore.type=JKS +ssl.truststore.location=/etc/pki/java/sos/cacerts +ssl.truststore.password=changeit + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/nsm/kafka/data + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies log.retention.check.interval.ms=300000 \ No newline at end of file diff --git a/salt/logstash/config.sls b/salt/logstash/config.sls index 8a59c83b7e..402d1ef206 100644 --- a/salt/logstash/config.sls +++ b/salt/logstash/config.sls @@ -12,7 +12,7 @@ include: - ssl - {% if GLOBALS.role not in ['so-receiver','so-fleet'] %} + {% if GLOBALS.role not in ['so-receiver','so-fleet', 'so-kafkanode'] %} - elasticsearch {% endif %} From 4e142e0212ec15994c429cbd1b90c8936e5daca1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 2 Apr 2024 16:47:35 -0400 Subject: [PATCH 010/222] put alphabetical --- salt/common/tools/sbin/so-image-common | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index f1b8b86da7..03051cb5f7 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -66,7 +66,6 @@ container_list() { "so-suricata" "so-telegraf" "so-zeek" - "so-kafka" ) else TRUSTED_CONTAINERS=( From 639bf050812e98ce6155465314a6fa9614d615b8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 08:52:26 -0400 Subject: [PATCH 011/222] add so-manager to kafka.nodes pillar --- pillar/kafka/nodes.sls | 4 ++-- salt/kafka/config.sls | 11 +++++------ .../kafka/tools/sbin_jinja/so-kafka-generate-keystore | 2 +- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls index a7d97ac9ce..8fdd3ee079 100644 --- a/pillar/kafka/nodes.sls +++ b/pillar/kafka/nodes.sls @@ -1,4 +1,4 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode', fun='network.ip_addrs', tgt_type='compound') %} +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} {% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} {% set existing_ids = [] %} @@ -27,4 +27,4 @@ {% endfor %} kafka: - nodes: {{ final_nodes|tojson }} \ No newline at end of file + nodes: {{ final_nodes|tojson }} diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index ddf2777a18..c8d6f66e01 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -63,6 +63,8 @@ kafka_sbin_jinja_tools: - group: 960 - file_mode: 755 - template: jinja + - defaults: + GLOBALS: {{ GLOBALS }} kakfa_log_dir: file.directory: @@ -79,12 +81,9 @@ kafka_data_dir: - makedirs: True kafka_keystore_script: - cmd.script: - - source: salt://kafka/tools/sbin_jinja/so-kafka-generate-keystore - - template: jinja + cmd.run: + - name: /usr/sbin/so-kafka-generate-keystore - cwd: /opt/so - - defaults: - GLOBALS: {{ GLOBALS }} kafka_kraft_server_properties: file.managed: @@ -106,4 +105,4 @@ kafka_kraft_server_properties: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore index 1809c7a934..26f1883774 100644 --- a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore +++ b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore @@ -13,4 +13,4 @@ if [ ! -f /etc/pki/kafka.jks ]; then docker rm so-kafka-keystore else exit 0 -fi \ No newline at end of file +fi From 8e47cc73a5429b5f6c4d106867c81276b19b13c0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 08:54:17 -0400 Subject: [PATCH 012/222] kafka.nodes pillar to lf --- pillar/kafka/nodes.sls | 60 +++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 30 deletions(-) diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls index 8fdd3ee079..b1842834c7 100644 --- a/pillar/kafka/nodes.sls +++ b/pillar/kafka/nodes.sls @@ -1,30 +1,30 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} -{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} - -{% set existing_ids = [] %} -{% for node in pillar_kafkanodes.values() %} - {% if node.get('id') %} - {% do existing_ids.append(node['nodeid']) %} - {% endif %} -{% endfor %} -{% set all_possible_ids = range(1, 256)|list %} - -{% set available_ids = [] %} -{% for id in all_possible_ids %} - {% if id not in existing_ids %} - {% do available_ids.append(id) %} - {% endif %} -{% endfor %} - -{% set final_nodes = pillar_kafkanodes.copy() %} - -{% for minionid, ip in current_kafkanodes.items() %} - {% set hostname = minionid.split('_')[0] %} - {% if hostname not in final_nodes %} - {% set new_id = available_ids.pop(0) %} - {% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} - {% endif %} -{% endfor %} - -kafka: - nodes: {{ final_nodes|tojson }} +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} +{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} + +{% set existing_ids = [] %} +{% for node in pillar_kafkanodes.values() %} + {% if node.get('id') %} + {% do existing_ids.append(node['nodeid']) %} + {% endif %} +{% endfor %} +{% set all_possible_ids = range(1, 256)|list %} + +{% set available_ids = [] %} +{% for id in all_possible_ids %} + {% if id not in existing_ids %} + {% do available_ids.append(id) %} + {% endif %} +{% endfor %} + +{% set final_nodes = pillar_kafkanodes.copy() %} + +{% for minionid, ip in current_kafkanodes.items() %} + {% set hostname = minionid.split('_')[0] %} + {% if hostname not in final_nodes %} + {% set new_id = available_ids.pop(0) %} + {% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} + {% endif %} +{% endfor %} + +kafka: + nodes: {{ final_nodes|tojson }} From db106f8ca1ef7126b15e5945555fe3961796059c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 10:22:47 -0400 Subject: [PATCH 013/222] listen on 0.0.0.0 for CONTROLLER --- salt/kafka/etc/server.properties.jinja | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index eb60eda601..3ed878766d 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -23,7 +23,11 @@ controller.quorum.voters={{ kraft_controller_quorum_voters }} # listeners = listener_name://host_name:port # EXAMPLE: # listeners = PLAINTEXT://your.host.name:9092 -listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 + +# using 0.0.0.0 eliminates issues with binding to 9093 +listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://0.0.0.0:9093 +#listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 + # Name of listener used for communication between brokers. inter.broker.listener.name=BROKER @@ -120,4 +124,4 @@ log.segment.bytes=1073741824 # The interval at which log segments are checked to see if they can be deleted according # to the retention policies -log.retention.check.interval.ms=300000 \ No newline at end of file +log.retention.check.interval.ms=300000 From c3f02a698ebd7d7afb5af09e1caa4a7442a773dc Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 10:23:36 -0400 Subject: [PATCH 014/222] add kafka nodes as extra hosts for the container --- salt/kafka/enabled.sls | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 31d375e235..5070719505 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -23,6 +23,15 @@ so-kafka: - sobridge: - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} - user: kafka + - extra_hosts: + {% for node in KAFKANODES %} + - {{ node }}:{{ KAFKANODES[node].ip }} + {% endfor %} + {% if DOCKER.containers['so-kafka'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %} + - {{ XTRAHOST }} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} - {{ BINDING }} @@ -44,4 +53,4 @@ delete_so-kafka_so-status.disabled: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} From ed6137a76a7fb3de136a9fc9b75c558637b25d34 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 10:24:10 -0400 Subject: [PATCH 015/222] allow sensor and searchnode to connect to manager kafka ports --- salt/firewall/defaults.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index bc380a17d7..bf3a003f31 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -405,6 +405,7 @@ firewall: - docker_registry - influxdb - sensoroni + - kafka searchnode: portgroups: - redis @@ -418,6 +419,7 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + - kafka heavynode: portgroups: - redis From 18f95e867f798b57d70c8d8cfe329d7b53af6cf7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 10:24:53 -0400 Subject: [PATCH 016/222] port 9093 for kafka docker --- salt/docker/defaults.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index fea6c718f0..ff130853a2 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -189,7 +189,7 @@ docker: final_octet: 88 port_bindings: - 0.0.0.0:9092:9092 - - 0.0.0.0:2181:2181 + - 0.0.0.0:9093:9093 custom_bind_mounts: [] extra_hosts: [] - extra_env: [] \ No newline at end of file + extra_env: [] From b863060df12ac99d51a5a37ca4a544dc216c2393 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 11:05:24 -0400 Subject: [PATCH 017/222] kafka broker and listener on 0.0.0.0 --- salt/kafka/etc/server.properties.jinja | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index 3ed878766d..0032e1b32c 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -24,8 +24,8 @@ controller.quorum.voters={{ kraft_controller_quorum_voters }} # EXAMPLE: # listeners = PLAINTEXT://your.host.name:9092 -# using 0.0.0.0 eliminates issues with binding to 9093 -listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://0.0.0.0:9093 +# using 0.0.0.0 eliminates issues with binding to 9092 and 9093 in initial testing +listeners=BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 #listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 From dc27bbb01dd7b3e97980160572b7f217aa0bfa7a Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 3 Apr 2024 14:30:52 -0400 Subject: [PATCH 018/222] Set kafka heap size. To be later configured from SOC Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/enabled.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 5070719505..723f2be9ac 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -23,6 +23,8 @@ so-kafka: - sobridge: - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} - user: kafka + - environment: + - KAFKA_HEAP_OPTS=-Xmx2G -Xms1G - extra_hosts: {% for node in KAFKANODES %} - {{ node }}:{{ KAFKANODES[node].ip }} From 13105c4ab31f045acbef2986c59bc4bf431aa2ec Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 3 Apr 2024 14:34:07 -0400 Subject: [PATCH 019/222] Generate certs for use with elasticfleet kafka output policy Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/ssl/init.sls | 40 +++++++++------------------------------- 1 file changed, 9 insertions(+), 31 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 2a71cd8538..e7b01bcd21 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -702,27 +702,26 @@ kafka_crt: - onchanges: - x509: /etc/pki/kafka.key -# Kafka needs a keystore so just creating a new key / cert for that purpose -etc_kafka_logstash_key: +elasticfleet_kafka_key: x509.private_key_managed: - - name: /etc/pki/kafka-logstash.key + - name: /etc/pki/elasticfleet-kafka.keyn - keysize: 4096 - backup: True - new: True - {% if salt['file.file_exists']('/etc/pki/kakfa-logstash.key') -%} + {% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%} - prereq: - - x509: etc_kafka_logstash_crt + - x509: elasticfleet_kafka_crt {%- endif %} - retry: attempts: 5 interval: 30 -etc_kafka_logstash_crt: +elasticfleet_kafka_crt: x509.certificate_managed: - - name: /etc/pki/kafka-logstash.crt + - name: /etc/pki/elasticfleet-kafka.crt - ca_server: {{ ca_server }} - signing_policy: elasticfleet - - private_key: /etc/pki/kafka-logstash.key + - private_key: /etc/pki/elasticfleet-kafka.key - CN: {{ GLOBALS.hostname }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - days_remaining: 0 @@ -732,10 +731,6 @@ etc_kafka_logstash_crt: - retry: attempts: 5 interval: 30 - cmd.run: - - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:" - - onchanges: - - x509: etc_kafka_logstash_key kafka_key_perms: file.managed: @@ -756,7 +751,7 @@ kafka_crt_perms: kafka_logstash_cert_perms: file.managed: - replace: False - - name: /etc/pki/kafka-logstash.crt + - name: /etc/pki/elasticfleet-kafka.crt - mode: 640 - user: 960 - group: 939 @@ -764,27 +759,10 @@ kafka_logstash_cert_perms: kafka_logstash_key_perms: file.managed: - replace: False - - name: /etc/pki/kafka-logstash.key - - mode: 640 - - user: 960 - - group: 939 - -kafka_logstash_keystore_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka-logstash.p12 + - name: /etc/pki/elasticfleet-kafka.key - mode: 640 - user: 960 - group: 939 - -kafka_keystore_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka.p12 - - mode: 640 - - user: 960 - - group: 939 - {% endif %} {% else %} From 1b8584d4bbbce5432a3114c8096cd757b3c871d4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 3 Apr 2024 15:36:35 -0400 Subject: [PATCH 020/222] allow manager to manager on kafka ports --- salt/firewall/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index bf3a003f31..e51bf58256 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -370,6 +370,7 @@ firewall: - elastic_agent_update - localrules - sensoroni + - kafka fleet: portgroups: - elasticsearch_rest From 7a6b72ebac68f899af097eb5b575dc88e291977b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 4 Apr 2024 15:46:11 -0400 Subject: [PATCH 021/222] add so-kafka to manager for firewall --- salt/firewall/containers.map.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index 566f55a407..7efb9abab2 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -27,6 +27,7 @@ 'so-elastic-fleet', 'so-elastic-fleet-package-registry', 'so-influxdb', + 'so-kafka', 'so-kibana', 'so-kratos', 'so-logstash', From 4c5b42b898c062ff76dbbdd5eb24d81d3fb315e9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 4 Apr 2024 15:47:01 -0400 Subject: [PATCH 022/222] restart container on server config changes --- salt/kafka/enabled.sls | 2 ++ salt/ssl/init.sls | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 723f2be9ac..49a0a9bbd0 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -43,6 +43,8 @@ so-kafka: - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts - /nsm/kafka/data/:/nsm/kafka/data/:rw - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + - watch: + - file: kafka_kraft_server_properties delete_so-kafka_so-status.disabled: file.uncomment: diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index e7b01bcd21..3b5ebb2b0a 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -704,7 +704,7 @@ kafka_crt: elasticfleet_kafka_key: x509.private_key_managed: - - name: /etc/pki/elasticfleet-kafka.keyn + - name: /etc/pki/elasticfleet-kafka.key - keysize: 4096 - backup: True - new: True From 40b08d737c9025c8c73ba580d1078b474342ee42 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 4 Apr 2024 16:16:53 -0400 Subject: [PATCH 023/222] Generate kafka keystore on changes to kafka.key Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 15 ++++++++++++--- .../tools/sbin_jinja/so-kafka-generate-keystore | 11 ++++------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index c8d6f66e01..dedc68fe8e 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -80,10 +80,19 @@ kafka_data_dir: - group: 960 - makedirs: True -kafka_keystore_script: +kafka_generate_keystore: cmd.run: - - name: /usr/sbin/so-kafka-generate-keystore - - cwd: /opt/so + - name: "/usr/sbin/so-kafka-generate-keystore" + - onchanges: + - x509: /etc/pki/kafka.key + +kafka_keystore_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.jks + - mode: 640 + - user: 960 + - group: 939 kafka_kraft_server_properties: file.managed: diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore index 26f1883774..8ae9d6db2f 100644 --- a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore +++ b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore @@ -7,10 +7,7 @@ . /usr/sbin/so-common -if [ ! -f /etc/pki/kafka.jks ]; then - docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt - docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks - docker rm so-kafka-keystore -else - exit 0 -fi +# Generate a new keystore +docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt +docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks +docker rm so-kafka-keystore \ No newline at end of file From 436cbc1f0615228ee75bf674ca3e97e0106dcfe5 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 4 Apr 2024 16:21:29 -0400 Subject: [PATCH 024/222] Add kafka signing_policy for client/server auth. Add kafka-client cert on manager so manager can interact with kafka using its own cert Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/ca/files/signing_policies.conf | 14 ++++++ salt/ssl/init.sls | 77 +++++++++++++++++++++++++++-- 2 files changed, 86 insertions(+), 5 deletions(-) diff --git a/salt/ca/files/signing_policies.conf b/salt/ca/files/signing_policies.conf index 6f1b1f1720..7f9c68750f 100644 --- a/salt/ca/files/signing_policies.conf +++ b/salt/ca/files/signing_policies.conf @@ -70,3 +70,17 @@ x509_signing_policies: - authorityKeyIdentifier: keyid,issuer:always - days_valid: 820 - copypath: /etc/pki/issued_certs/ + kafka: + - minions: '*' + - signing_private_key: /etc/pki/ca.key + - signing_cert: /etc/pki/ca.crt + - C: US + - ST: Utah + - L: Salt Lake City + - basicConstraints: "critical CA:false" + - keyUsage: "digitalSignature, keyEncipherment" + - subjectKeyIdentifier: hash + - authorityKeyIdentifier: keyid,issuer:always + - extendedKeyUsage: "serverAuth, clientAuth" + - days_valid: 820 + - copypath: /etc/pki/issued_certs/ diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index e7b01bcd21..a99b030ff0 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -687,7 +687,7 @@ kafka_crt: - name: /etc/pki/kafka.crt - ca_server: {{ ca_server }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - - signing_policy: elasticfleet + - signing_policy: kafka - private_key: /etc/pki/kafka.key - CN: {{ GLOBALS.hostname }} - days_remaining: 0 @@ -704,7 +704,7 @@ kafka_crt: elasticfleet_kafka_key: x509.private_key_managed: - - name: /etc/pki/elasticfleet-kafka.keyn + - name: /etc/pki/elasticfleet-kafka.key - keysize: 4096 - backup: True - new: True @@ -720,7 +720,7 @@ elasticfleet_kafka_crt: x509.certificate_managed: - name: /etc/pki/elasticfleet-kafka.crt - ca_server: {{ ca_server }} - - signing_policy: elasticfleet + - signing_policy: kafka - private_key: /etc/pki/elasticfleet-kafka.key - CN: {{ GLOBALS.hostname }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} @@ -731,7 +731,58 @@ elasticfleet_kafka_crt: - retry: attempts: 5 interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-kafka.key -topk8 -out /etc/pki/elasticfleet-kafka.p8 -nocrypt" + - onchanges: + - x509: elasticfleet_kafka_key +{% if grains['role'] in ['so-manager'] %} +kafka_client_key: + x509.private_key_managed: + - name: /etc/pki/kafka-client.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka-client.key') -%} + - prereq: + - x509: /etc/pki/kafka-client.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_client_crt: + x509.certificate_managed: + - name: /etc/pki/kafka-client.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka-client.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + +kafka_client_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-client.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_client_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-client.crt + - mode: 640 + - user: 960 + - group: 939 +{% endif %} kafka_key_perms: file.managed: - replace: False @@ -748,7 +799,23 @@ kafka_crt_perms: - user: 960 - group: 939 -kafka_logstash_cert_perms: +kafka_pkcs8_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.p8 + - mode: 640 + - user: 960 + - group: 939 + +kafka_pkcs12_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka.p12 + - mode: 640 + - user: 960 + - group: 939 + +elasticfleet_kafka_cert_perms: file.managed: - replace: False - name: /etc/pki/elasticfleet-kafka.crt @@ -756,7 +823,7 @@ kafka_logstash_cert_perms: - user: 960 - group: 939 -kafka_logstash_key_perms: +elasticfleet_kafka_key_perms: file.managed: - replace: False - name: /etc/pki/elasticfleet-kafka.key From 735cfb4c29d5ec3e96ab2a81edb024af00465b0b Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 4 Apr 2024 16:45:58 -0400 Subject: [PATCH 025/222] Autogenerate kafka topics when a message it sent to non-existing topic Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/etc/server.properties.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index 0032e1b32c..486feb2142 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -28,6 +28,7 @@ controller.quorum.voters={{ kraft_controller_quorum_voters }} listeners=BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 #listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 +auto.create.topics.enable=true # Name of listener used for communication between brokers. inter.broker.listener.name=BROKER From 433309ef1a901591b4b6b1f60009cd25db5d6272 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 5 Apr 2024 09:35:12 -0400 Subject: [PATCH 026/222] Generate kafka cluster id if it doesn't exist Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/storage.sls | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index 778c054e27..e99455e3d8 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -6,7 +6,13 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id')%} +{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id', default=None) %} + +{% if kafka_cluster_id is none %} +generate_kafka_cluster_id: + cmd.run: + - name: /usr/sbin/so-kafka-clusterid +{% endif %} {# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} {% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} From 721e04f793701e635c957b5f0f23f189a2083cd8 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 5 Apr 2024 13:37:14 -0400 Subject: [PATCH 027/222] initial logstash input from kafka over ssl Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/logstash/enabled.sls | 2 - .../config/so/0800_input_kafka.conf.jinja | 55 +++++++++-------- salt/ssl/init.sls | 59 +++++++++++++++++++ 3 files changed, 89 insertions(+), 27 deletions(-) diff --git a/salt/logstash/enabled.sls b/salt/logstash/enabled.sls index 798b1984a4..fcc2ec190c 100644 --- a/salt/logstash/enabled.sls +++ b/salt/logstash/enabled.sls @@ -78,8 +78,6 @@ so-logstash: {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode', 'so-kafkanode' ] %} - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro - {% endif %} - {% if GLOBALS.role in ['so-kafkanode'] %} - /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro {% endif %} {% if GLOBALS.role == 'so-eval' %} diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index c1429319a5..957f7da191 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -1,26 +1,31 @@ -{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} -{% set broker_ips = [] %} -{% for node, node_data in kafka_brokers.items() %} - {% do broker_ips.append(node_data['ip'] + ":9092") %} -{% endfor %} - -{% set bootstrap_servers = "','".join(broker_ips) %} - - -#Run on searchnodes ingest kafka topic(s) group_id allows load balancing of event ingest to all searchnodes -input { - kafka { - codec => json - #Can ingest multiple topics. Set to a value from SOC UI? - topics => ['logstash-topic',] - group_id => 'searchnodes' - security_protocol => 'SSL' - bootstrap_servers => {{ bootstrap_servers }} - ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' - ssl_keystore_password => '' - ssl_keystore_type => 'PKCS12' - ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' - # Set password as a pillar to avoid bad optics? This is default truststore for grid - ssl_truststore_password => 'changeit' - } +{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} +{% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %} +{% set broker_ips = [] %} +{% for node, node_data in kafka_brokers.items() %} + {% do broker_ips.append(node_data['ip'] + ":9092") %} +{% endfor %} + +{# For testing kafka stuff from manager not dedicated kafkanodes #} +{% for node, node_data in kafka_on_mngr.items() %} + {% do broker_ips.append(node_data['ip'] + ":9092") %} +{% endfor %} +{% set bootstrap_servers = "','".join(broker_ips) %} + + +#Run on searchnodes ingest kafka topic(s) group_id allows load balancing of event ingest to all searchnodes +input { + kafka { + codec => json + #Can ingest multiple topics. Set to a value from SOC UI? + topics => ['ea-logs'] + group_id => 'searchnodes' + security_protocol => 'SSL' + bootstrap_servers => '{{ bootstrap_servers }}' + ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' + ssl_keystore_password => 'changeit' + ssl_keystore_type => 'PKCS12' + ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' + # Set password as a pillar to avoid bad optics? This is default truststore for grid + ssl_truststore_password => 'changeit' + } } \ No newline at end of file diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index a99b030ff0..90f9cc64f6 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -736,6 +736,40 @@ elasticfleet_kafka_crt: - onchanges: - x509: elasticfleet_kafka_key +kafka_logstash_key: + x509.private_key_managed: + - name: /etc/pki/kafka-logstash.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%} + - prereq: + - x509: /etc/pki/kafka-logstash.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_logstash_crt: + x509.certificate_managed: + - name: /etc/pki/kafka-logstash.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka-logstash.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:changeit" + - onchanges: + - x509: /etc/pki/kafka-logstash.key + {% if grains['role'] in ['so-manager'] %} kafka_client_key: x509.private_key_managed: @@ -783,6 +817,7 @@ kafka_client_crt_perms: - user: 960 - group: 939 {% endif %} + kafka_key_perms: file.managed: - replace: False @@ -799,6 +834,30 @@ kafka_crt_perms: - user: 960 - group: 939 +kafka_logstash_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.crt + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_pkcs12_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.p12 + - mode: 640 + - user: 960 + - group: 931 + kafka_pkcs8_perms: file.managed: - replace: False From 65274e89d7c8741fe63536e030483c2e2af21665 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 5 Apr 2024 15:38:00 -0400 Subject: [PATCH 028/222] Add client_id to logstash pipeline. To identify which searchnode is pulling messages Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 957f7da191..0260b774e1 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -19,6 +19,7 @@ input { #Can ingest multiple topics. Set to a value from SOC UI? topics => ['ea-logs'] group_id => 'searchnodes' + client_id => '{{ GLOBALS.hostname }}' security_protocol => 'SSL' bootstrap_servers => '{{ bootstrap_servers }}' ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' From d67ebabc951cfaf226b176f6eb458ec2b4c35127 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 8 Apr 2024 16:38:03 -0400 Subject: [PATCH 029/222] Remove logstash output to kafka pipeline. Add additional topics for searchnodes to ingest and add partition/offset info to event Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../config/so/0800_input_kafka.conf.jinja | 15 ++++++++----- .../config/so/0899_output_kafka.conf.jinja | 22 ------------------- 2 files changed, 10 insertions(+), 27 deletions(-) delete mode 100644 salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 0260b774e1..1391ce9834 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -11,13 +11,10 @@ {% endfor %} {% set bootstrap_servers = "','".join(broker_ips) %} - -#Run on searchnodes ingest kafka topic(s) group_id allows load balancing of event ingest to all searchnodes input { kafka { codec => json - #Can ingest multiple topics. Set to a value from SOC UI? - topics => ['ea-logs'] + topics => ['default-logs', 'kratos-logs', 'soc-logs', 'strelka-logs', 'suricata-logs', 'zeek-logs'] group_id => 'searchnodes' client_id => '{{ GLOBALS.hostname }}' security_protocol => 'SSL' @@ -26,7 +23,15 @@ input { ssl_keystore_password => 'changeit' ssl_keystore_type => 'PKCS12' ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' - # Set password as a pillar to avoid bad optics? This is default truststore for grid ssl_truststore_password => 'changeit' + decorate_events => true + tags => [ "elastic-agent", "input-{{ GLOBALS.hostname}}", "kafka" ] + } +} +filter { + if ![metadata] { + mutate { + rename => { "@metadata" => "metadata" } } + } } \ No newline at end of file diff --git a/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja deleted file mode 100644 index ff9a6f6ee7..0000000000 --- a/salt/logstash/pipelines/config/so/0899_output_kafka.conf.jinja +++ /dev/null @@ -1,22 +0,0 @@ -{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} -{% set broker_ips = [] %} -{% for node, node_data in kafka_brokers.items() %} - {% do broker_ips.append(node_data['ip'] + ":9092") %} -{% endfor %} - -{% set bootstrap_servers = "','".join(broker_ips) %} - -#Run on kafka broker logstash writes to topic 'logstash-topic' -output { - kafka { - codec => json - topic_id => 'logstash-topic' - bootstrap_servers => '{{ bootstrap_servers }}' - security_protocol => 'SSL' - ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12' - ssl_keystore_password => '' - ssl_keystore_type => 'PKCS12' - ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts' - ssl_truststore_password => 'changeit' - } -} \ No newline at end of file From 6217a7b9a94a972ecab3e76a436cadf76ce3f4ab Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 09:27:21 -0400 Subject: [PATCH 030/222] add defaults and jijafy kafka config --- salt/kafka/config.map.jinja | 8 ++ salt/kafka/config.sls | 12 +-- salt/kafka/defaults.yaml | 39 ++++++++ salt/kafka/enabled.sls | 17 ++-- salt/kafka/etc/client.properties.jinja | 2 + salt/kafka/etc/server.properties.jinja | 130 +------------------------ salt/kafka/map.jinja | 15 +++ 7 files changed, 81 insertions(+), 142 deletions(-) create mode 100644 salt/kafka/config.map.jinja create mode 100644 salt/kafka/defaults.yaml create mode 100644 salt/kafka/etc/client.properties.jinja create mode 100644 salt/kafka/map.jinja diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja new file mode 100644 index 0000000000..ab43d84a9c --- /dev/null +++ b/salt/kafka/config.map.jinja @@ -0,0 +1,8 @@ +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'kafka/map.jinja' import KAFKAMERGED %} + +{% set KAFKACONFIG = {} %} +{% for k, v in KAFKAMERGED.config.keys() %} +{% do KAFKACONFIG.update({k | replace("_x_", "."): v}) %} +{% endfor %} + diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index dedc68fe8e..523681ba09 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -94,19 +94,17 @@ kafka_keystore_perms: - user: 960 - group: 939 -kafka_kraft_server_properties: +{% for sc in ['server', 'client'] %} +kafka_kraft_{{type}}_properties: file.managed: - - source: salt://kafka/etc/server.properties.jinja - - name: /opt/so/conf/kafka/server.properties + - source: salt://kafka/etc/{{sc}}.properties.jinja + - name: /opt/so/conf/kafka/{{sc}}.properties - template: jinja - - defaults: - kafka_nodeid: {{ kafka_nodeid }} - kraft_controller_quorum_voters: {{ kraft_controller_quorum_voters }} - kafka_ip: {{ kafka_ip }} - user: 960 - group: 960 - makedirs: True - show_changes: False +{% endfor %} {% else %} diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml new file mode 100644 index 0000000000..7828f0536b --- /dev/null +++ b/salt/kafka/defaults.yaml @@ -0,0 +1,39 @@ +kafka: + enabled: False + config: + server: + advertised_x_listeners: BROKER://10.66.166.231:9092 + auto_x_create_x_topics_x_enable: true + controller_x_listener_x_names: CONTROLLER + controller_x_quorum_x_voters: + inter_x_broker_x_listener_x_name: BROKER + listeners: BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 + listener_x_security_x_protocol_x_map: CONTROLLER:SSL,BROKER:SSL + log_x_dirs: /nsm/kafka/data + log_x_retention_x_check_x_interval_x_ms: 300000 + log_x_retention_x_hours: 168 + log_x_segment_x_bytes: 1073741824 + node_x_id: + num_x_io_x_threads: 8 + num_x_network_x_threads: 3 + num_x_partitions: 1 + num_x_recovery_x_threads_x_per_x_data_x_dir: 1 + offsets_x_topic_x_replication_x_factor: 1 + process_x_roles: broker,controller + socket_x_receive_x_buffer_x_bytes: 102400 + socket_x_request_x_max_x_bytes: 104857600 + socket_x_send_x_buffer_x_bytes: 102400 + ssl_x_keystore_x_location: /etc/pki/kafka.jks + ssl_x_keystore_x_password: changeit + ssl_x_keystore_x_type: JKS + ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts + ssl_x_truststore_x_password: changeit + transaction_x_state_x_log_x_min_x_isr: 1 + transaction_x_state_x_log_x_replication_x_factor: 1 + client: + security_x_protocol: SSL + ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts + ssl_x_truststore_x_password: changeit + ssl_x_keystore_x_location: /etc/pki/kafka.jks + ssl_x_keystore_x_type: JKS + ssl_x_keystore_x_password: changeit diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 49a0a9bbd0..c2fca70db4 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -26,14 +26,14 @@ so-kafka: - environment: - KAFKA_HEAP_OPTS=-Xmx2G -Xms1G - extra_hosts: - {% for node in KAFKANODES %} + {% for node in KAFKANODES %} - {{ node }}:{{ KAFKANODES[node].ip }} - {% endfor %} - {% if DOCKER.containers['so-kafka'].extra_hosts %} - {% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %} + {% endfor %} + {% if DOCKER.containers['so-kafka'].extra_hosts %} + {% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %} - {{ XTRAHOST }} - {% endfor %} - {% endif %} + {% endfor %} + {% endif %} - port_bindings: {% for BINDING in DOCKER.containers['so-kafka'].port_bindings %} - {{ BINDING }} @@ -43,8 +43,11 @@ so-kafka: - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts - /nsm/kafka/data/:/nsm/kafka/data/:rw - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + - /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties - watch: - - file: kafka_kraft_server_properties + {% for sc in ['server', 'client'] %} + - file: kafka_kraft_{{sc}}_properties + {% endfor %} delete_so-kafka_so-status.disabled: file.uncomment: diff --git a/salt/kafka/etc/client.properties.jinja b/salt/kafka/etc/client.properties.jinja new file mode 100644 index 0000000000..9f01904e42 --- /dev/null +++ b/salt/kafka/etc/client.properties.jinja @@ -0,0 +1,2 @@ +{%- from 'kafka/config.map.jinja' import KAFKACONFIG %} +{{ KAFKACONFIG.client }} diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index 486feb2142..a18262ac2f 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,128 +1,2 @@ -# This configuration file is intended for use in KRaft mode, where -# Apache ZooKeeper is not present. See config/kraft/README.md for details. -# - -############################# Server Basics ############################# - -# The role of this server. Setting this puts us in KRaft mode -process.roles=broker,controller - -# The node id associated with this instance's roles -node.id={{ kafka_nodeid }} - -# The connect string for the controller quorum -controller.quorum.voters={{ kraft_controller_quorum_voters }} - -############################# Socket Server Settings ############################# - -# The address the socket server listens on. -# Combined nodes (i.e. those with `process.roles=broker,controller`) must list the controller listener here at a minimum. -# If the broker listener is not defined, the default listener will use a host name that is equal to the value of java.net.InetAddress.getCanonicalHostName(), -# with PLAINTEXT listener name, and port 9092. -# FORMAT: -# listeners = listener_name://host_name:port -# EXAMPLE: -# listeners = PLAINTEXT://your.host.name:9092 - -# using 0.0.0.0 eliminates issues with binding to 9092 and 9093 in initial testing -listeners=BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 -#listeners=BROKER://{{ kafka_ip }}:9092,CONTROLLER://{{ kafka_ip }}:9093 - -auto.create.topics.enable=true - -# Name of listener used for communication between brokers. -inter.broker.listener.name=BROKER - -# Listener name, hostname and port the broker will advertise to clients. -# If not set, it uses the value for "listeners". -advertised.listeners=BROKER://{{ kafka_ip }}:9092 - -# A comma-separated list of the names of the listeners used by the controller. -# If no explicit mapping set in `listener.security.protocol.map`, default will be using PLAINTEXT protocol -# This is required if running in KRaft mode. -controller.listener.names=CONTROLLER - -# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details -listener.security.protocol.map=CONTROLLER:SSL,BROKER:SSL - -#SSL configuration -ssl.keystore.location=/etc/pki/kafka.jks -ssl.keystore.password=changeit -ssl.keystore.type=JKS -ssl.truststore.location=/etc/pki/java/sos/cacerts -ssl.truststore.password=changeit - -# The number of threads that the server uses for receiving requests from the network and sending responses to the network -num.network.threads=3 - -# The number of threads that the server uses for processing requests, which may include disk I/O -num.io.threads=8 - -# The send buffer (SO_SNDBUF) used by the socket server -socket.send.buffer.bytes=102400 - -# The receive buffer (SO_RCVBUF) used by the socket server -socket.receive.buffer.bytes=102400 - -# The maximum size of a request that the socket server will accept (protection against OOM) -socket.request.max.bytes=104857600 - - -############################# Log Basics ############################# - -# A comma separated list of directories under which to store log files -log.dirs=/nsm/kafka/data - -# The default number of log partitions per topic. More partitions allow greater -# parallelism for consumption, but this will also result in more files across -# the brokers. -num.partitions=1 - -# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased for installations with data dirs located in RAID array. -num.recovery.threads.per.data.dir=1 - -############################# Internal Topic Settings ############################# -# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" -# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. -offsets.topic.replication.factor=1 -transaction.state.log.replication.factor=1 -transaction.state.log.min.isr=1 - -############################# Log Flush Policy ############################# - -# Messages are immediately written to the filesystem but by default we only fsync() to sync -# the OS cache lazily. The following configurations control the flush of data to disk. -# There are a few important trade-offs here: -# 1. Durability: Unflushed data may be lost if you are not using replication. -# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. -# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. -# The settings below allow one to configure the flush policy to flush data after a period of time or -# every N messages (or both). This can be done globally and overridden on a per-topic basis. - -# The number of messages to accept before forcing a flush of data to disk -#log.flush.interval.messages=10000 - -# The maximum amount of time a message can sit in a log before we force a flush -#log.flush.interval.ms=1000 - -############################# Log Retention Policy ############################# - -# The following configurations control the disposal of log segments. The policy can -# be set to delete segments after a period of time, or after a given size has accumulated. -# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens -# from the end of the log. - -# The minimum age of a log file to be eligible for deletion due to age -log.retention.hours=168 - -# A size-based retention policy for logs. Segments are pruned from the log unless the remaining -# segments drop below log.retention.bytes. Functions independently of log.retention.hours. -#log.retention.bytes=1073741824 - -# The maximum size of a log segment file. When this size is reached a new log segment will be created. -log.segment.bytes=1073741824 - -# The interval at which log segments are checked to see if they can be deleted according -# to the retention policies -log.retention.check.interval.ms=300000 +{%- from 'kafka/config.map.jinja' import KAFKACONFIG %} +{{ KAFKACONFIG.server }} diff --git a/salt/kafka/map.jinja b/salt/kafka/map.jinja new file mode 100644 index 0000000000..f0e389e4ae --- /dev/null +++ b/salt/kafka/map.jinja @@ -0,0 +1,15 @@ +{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} +{% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set KAFKAMERGED.config.server.node_x_id = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} +{% set KAFKAMERGED.config.server.advertised_x_listeners = 'BROKER://' ~ GLOBALS.node_ip ~ ':9092' %} + +{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} +{% set combined = [] %} +{% for hostname, data in nodes.items() %} + {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} +{% endfor %} +{% set kraft_controller_quorum_voters = ','.join(combined) %} + +{% set KAFKAMERGED.config.server.controller_x_quorum_x_voters = kraft_controller_quorum_voters %} From 7aa00faa6c37bbe87ab4c4ed36647a9c1c25f6be Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 09:31:54 -0400 Subject: [PATCH 031/222] fix var --- salt/kafka/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 523681ba09..1666e65ae0 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -95,7 +95,7 @@ kafka_keystore_perms: - group: 939 {% for sc in ['server', 'client'] %} -kafka_kraft_{{type}}_properties: +kafka_kraft_{{sc}}_properties: file.managed: - source: salt://kafka/etc/{{sc}}.properties.jinja - name: /opt/so/conf/kafka/{{sc}}.properties From c48436ccbf2f542954b2e46585c005a3203d27be Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 10:19:17 -0400 Subject: [PATCH 032/222] fix dict update --- salt/kafka/map.jinja | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/kafka/map.jinja b/salt/kafka/map.jinja index f0e389e4ae..f1da7ec95a 100644 --- a/salt/kafka/map.jinja +++ b/salt/kafka/map.jinja @@ -2,8 +2,8 @@ {% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set KAFKAMERGED.config.server.node_x_id = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} -{% set KAFKAMERGED.config.server.advertised_x_listeners = 'BROKER://' ~ GLOBALS.node_ip ~ ':9092' %} +{% do KAFKAMERGED.config.server.update({ 'node_x_id': salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid')}) %} +{% do KAFKAMERGED.config.server.update({'advertised_x_listeners': 'BROKER://' ~ GLOBALS.node_ip ~ ':9092'}) %} {% set nodes = salt['pillar.get']('kafka:nodes', {}) %} {% set combined = [] %} @@ -12,4 +12,4 @@ {% endfor %} {% set kraft_controller_quorum_voters = ','.join(combined) %} -{% set KAFKAMERGED.config.server.controller_x_quorum_x_voters = kraft_controller_quorum_voters %} +{% do KAFKAMERGED.config.server.update({'controller_x_quorum_x_voters': kraft_controller_quorum_voters}) %} From daa5342986b2bf5cf7b3106888d4a0cf89048008 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 10:22:05 -0400 Subject: [PATCH 033/222] items not keys in for loop --- salt/kafka/config.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index ab43d84a9c..33f9f83875 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -2,7 +2,7 @@ {% from 'kafka/map.jinja' import KAFKAMERGED %} {% set KAFKACONFIG = {} %} -{% for k, v in KAFKAMERGED.config.keys() %} +{% for k, v in KAFKAMERGED.config.items() %} {% do KAFKACONFIG.update({k | replace("_x_", "."): v}) %} {% endfor %} From d38051e806c505bc1023291f90197b7c5dab7500 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 10:36:37 -0400 Subject: [PATCH 034/222] fix client and server properties formatting --- salt/kafka/etc/client.properties.jinja | 4 ++-- salt/kafka/etc/server.properties.jinja | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/kafka/etc/client.properties.jinja b/salt/kafka/etc/client.properties.jinja index 9f01904e42..1353ac4911 100644 --- a/salt/kafka/etc/client.properties.jinja +++ b/salt/kafka/etc/client.properties.jinja @@ -1,2 +1,2 @@ -{%- from 'kafka/config.map.jinja' import KAFKACONFIG %} -{{ KAFKACONFIG.client }} +{% from 'kafka/config.map.jinja' import KAFKACONFIG -%} +{{ KAFKACONFIG.client | yaml(False) }} diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index a18262ac2f..b02730faa2 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,2 +1,2 @@ -{%- from 'kafka/config.map.jinja' import KAFKACONFIG %} -{{ KAFKACONFIG.server }} +{% from 'kafka/config.map.jinja' import KAFKACONFIG -%} +{{ KAFKACONFIG.server | yaml(False) }} From bd5fe4328561017b87d78493867d1f410e83ffc1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 9 Apr 2024 11:07:53 -0400 Subject: [PATCH 035/222] jinja config files --- salt/kafka/config.map.jinja | 8 -------- salt/kafka/config.sls | 9 --------- salt/kafka/etc/client.properties.jinja | 4 ++-- salt/kafka/etc/server.properties.jinja | 4 ++-- 4 files changed, 4 insertions(+), 21 deletions(-) delete mode 100644 salt/kafka/config.map.jinja diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja deleted file mode 100644 index 33f9f83875..0000000000 --- a/salt/kafka/config.map.jinja +++ /dev/null @@ -1,8 +0,0 @@ -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'kafka/map.jinja' import KAFKAMERGED %} - -{% set KAFKACONFIG = {} %} -{% for k, v in KAFKAMERGED.config.items() %} -{% do KAFKACONFIG.update({k | replace("_x_", "."): v}) %} -{% endfor %} - diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 1666e65ae0..c856c4f809 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -10,16 +10,8 @@ {% set kafka_ips_logstash = [] %} {% set kafka_ips_kraft = [] %} {% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set kafka_nodeid = salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid') %} {% set kafka_ip = GLOBALS.node_ip %} -{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set combined = [] %} -{% for hostname, data in nodes.items() %} - {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} -{% endfor %} -{% set kraft_controller_quorum_voters = ','.join(combined) %} - {# Create list for kafka <-> logstash/searchnode communcations #} {% for node, node_data in kafkanodes.items() %} {% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} @@ -32,7 +24,6 @@ {% endfor %} {% set kraft_server_list = "','".join(kafka_ips_kraft) %} - include: - ssl diff --git a/salt/kafka/etc/client.properties.jinja b/salt/kafka/etc/client.properties.jinja index 1353ac4911..0245c3c420 100644 --- a/salt/kafka/etc/client.properties.jinja +++ b/salt/kafka/etc/client.properties.jinja @@ -1,2 +1,2 @@ -{% from 'kafka/config.map.jinja' import KAFKACONFIG -%} -{{ KAFKACONFIG.client | yaml(False) }} +{% from 'kafka/map.jinja' import KAFKAMERGED -%} +{{ KAFKAMERGED.config.client | yaml(False) | replace("_x_", ".") }} diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index b02730faa2..90a80063fd 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,2 +1,2 @@ -{% from 'kafka/config.map.jinja' import KAFKACONFIG -%} -{{ KAFKACONFIG.server | yaml(False) }} +{% from 'kafka/map.jinja' import KAFKAMERGED -%} +{{ KAFKAMERGED.config.server | yaml(False) | replace("_x_", ".") }} From 86b984001d4088eb24449f522ded6c6ebc616f37 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 10 Apr 2024 10:39:06 -0400 Subject: [PATCH 036/222] annotations and enable/disable from ui --- salt/kafka/disabled.sls | 16 +++ salt/kafka/etc/client.properties.jinja | 5 + salt/kafka/etc/server.properties.jinja | 5 + salt/kafka/init.sls | 8 +- salt/kafka/map.jinja | 5 + salt/kafka/soc_kafka.yaml | 164 +++++++++++++++++++++++++ 6 files changed, 201 insertions(+), 2 deletions(-) create mode 100644 salt/kafka/disabled.sls create mode 100644 salt/kafka/soc_kafka.yaml diff --git a/salt/kafka/disabled.sls b/salt/kafka/disabled.sls new file mode 100644 index 0000000000..6658f0c5e6 --- /dev/null +++ b/salt/kafka/disabled.sls @@ -0,0 +1,16 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +include: + - kafka.sostatus + +so-kafka: + docker_container.absent: + - force: True + +so-kafka_so-status.disabled: + file.comment: + - name: /opt/so/conf/so-status/so-status.conf + - regex: ^so-kafka$ diff --git a/salt/kafka/etc/client.properties.jinja b/salt/kafka/etc/client.properties.jinja index 0245c3c420..91ff5f7c28 100644 --- a/salt/kafka/etc/client.properties.jinja +++ b/salt/kafka/etc/client.properties.jinja @@ -1,2 +1,7 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + {% from 'kafka/map.jinja' import KAFKAMERGED -%} {{ KAFKAMERGED.config.client | yaml(False) | replace("_x_", ".") }} diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index 90a80063fd..df5632ba95 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -1,2 +1,7 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + {% from 'kafka/map.jinja' import KAFKAMERGED -%} {{ KAFKAMERGED.config.server | yaml(False) | replace("_x_", ".") }} diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index 903c66867f..b4a6a28b0a 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -3,7 +3,11 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -{# Create map.jinja to enable / disable kafka from UI #} -{# Temporarily just enable kafka #} +{% from 'kafka/map.jinja' import KAFKAMERGED %} + include: +{% if KAFKAMERGED.enabled %} - kafka.enabled +{% else %} + - kafka.disabled +{% endif %} diff --git a/salt/kafka/map.jinja b/salt/kafka/map.jinja index f1da7ec95a..771e6102bd 100644 --- a/salt/kafka/map.jinja +++ b/salt/kafka/map.jinja @@ -1,3 +1,8 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + {% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} {% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %} {% from 'vars/globals.map.jinja' import GLOBALS %} diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml new file mode 100644 index 0000000000..c16ff212e3 --- /dev/null +++ b/salt/kafka/soc_kafka.yaml @@ -0,0 +1,164 @@ +kafka: + enabled: + description: Enable or disable Kafka. + helpLink: kafka.html + config: + server: + advertised_x_listeners: + description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication. + title: advertised.listeners + helpLink: kafka.html + auto_x_create_x_topics_x_enable: + description: Enable the auto creation of topics. + title: auto.create.topics.enable + forcedType: bool + helpLink: kafka.html + controller_x_listener_x_names: + description: Set listeners used by the controller in a comma-seperated list. + title: controller.listener.names + helpLink: kafka.html + controller_x_quorum_x_voters: + description: A comma-seperated list of ID and endpoint information mapped for a set of voters. + title: controller.quorum.voters + helpLink: kafka.html + inter_x_broker_x_listener_x_name: + description: The name of the listener used for inter-broker communication. + title: inter.broker.listener.name + helpLink: kafka.html + listeners: + description: Set of URIs that is listened on and the listener names in a comma-seperated list. + helpLink: kafka.html + listener_x_security_x_protocol_x_map: + description: Comma-seperated mapping of listener name and security protocols. + title: listener.security.protocol.map + helpLink: kafka.html + log_x_dirs: + description: Where Kafka logs are stored within the Docker container. + title: log.dirs + helpLink: kafka.html + log_x_retention_x_check_x_interval_x_ms: + description: Frequency at which log files are checked if they are qualified for deletion. + title: log.retention.check.interval.ms + helpLink: kafka.html + log_x_retention_x_hours: + description: How long, in hours, a log file is kept. + title: log.retention.hours + forcedType: int + helpLink: kafka.html + log_x_segment_x_bytes: + description: The maximum allowable size for a log file. + title: log.segment.bytes + forcedType: int + helpLink: kafka.html + node_x_id: + description: The node ID corresponds to the roles performed by this process whenever process.roles is populated. + title: node.id + forcedType: int + readonly: True + helpLink: kafka.html + num_x_io_x_threads: + description: The number of threads used by Kafka. + title: num.io.threads + forcedType: int + helpLink: kafka.html + num_x_network_x_threads: + description: The number of threads used for network communication. + title: num.network.threads + forcedType: int + helpLink: kafka.html + num_x_partitions: + description: The number of log partitions assigned per topic. + title: num.partitions + forcedType: int + helpLink: kafka.html + num_x_recovery_x_threads_x_per_x_data_x_dir: + description: The number of threads used for log recuperation at startup and purging at shutdown. This ammount of threads is used per data directory. + title: num.recovery.threads.per.data.dir + forcedType: int + helpLink: kafka.html + offsets_x_topic_x_replication_x_factor: + description: The offsets topic replication factor. + title: offsets.topic.replication.factor + forcedType: int + helpLink: kafka.html + process_x_roles: + description: The roles the process performs. Use a comma-seperated list is multiple. + title: process.roles + helpLink: kafka.html + socket_x_receive_x_buffer_x_bytes: + description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default. + title: socket.receive.buffer.bytes + #forcedType: int - soc needs to allow -1 as an int before we can use this + helpLink: kafka.html + socket_x_request_x_max_x_bytes: + description: The maximum bytes allowed for a request to the socket. + title: socket.request.max.bytes + forcedType: int + helpLink: kafka.html + socket_x_send_x_buffer_x_bytes: + description: Size, in bytes of the SO_SNDBUF buffer. A value of -1 will use the OS default. + title: socket.send.buffer.byte + #forcedType: int - soc needs to allow -1 as an int before we can use this + helpLink: kafka.html + ssl_x_keystore_x_location: + description: The key store file location within the Docker container. + title: ssl.keystore.location + helpLink: kafka.html + ssl_x_keystore_x_password: + description: The key store file password. Invalid for PEM format. + title: ssl.keystore.password + sensitive: True + helpLink: kafka.html + ssl_x_keystore_x_type: + description: The key store file format. + title: ssl.keystore.type + regex: ^(JKS|PKCS12|PEM)$ + helpLink: kafka.html + ssl_x_truststore_x_location: + description: The trust store file location within the Docker container. + title: ssl.truststore.location + helpLink: kafka.html + ssl_x_truststore_x_password: + description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format. + title: ssl.truststore.password + sensitive: True + helpLink: kafka.html + transaction_x_state_x_log_x_min_x_isr: + description: Overrides min.insync.replicas for the transaction topic. When a producer configures acks to "all" (or "-1"), this setting determines the minimum number of replicas required to acknowledge a write as successful. Failure to meet this minimum triggers an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used in conjunction, min.insync.replicas and acks enable stronger durability guarantees. For instance, creating a topic with a replication factor of 3, setting min.insync.replicas to 2, and using acks of "all" ensures that the producer raises an exception if a majority of replicas fail to receive a write. + title: transaction.state.log.min.isr + forcedType: int + helpLink: kafka.html + transaction_x_state_x_log_x_replication_x_factor: + description: Set the replication factor higher for the transaction topic to ensure availability. Internal topic creation will not proceed until the cluster size satisfies this replication factor prerequisite. + title: transaction.state.log.replication.factor + forcedType: int + helpLink: kafka.html + client: + security_x_protocol: + description: Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT + title: security.protocol + regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT) + helpLink: kafka.html + ssl_x_keystore_x_location: + description: The key store file location within the Docker container. + title: ssl.keystore.location + helpLink: kafka.html + ssl_x_keystore_x_password: + description: The key store file password. Invalid for PEM format. + title: ssl.keystore.password + sensitive: True + helpLink: kafka.html + ssl_x_keystore_x_type: + description: The key store file format. + title: ssl.keystore.type + regex: ^(JKS|PKCS12|PEM)$ + helpLink: kafka.html + ssl_x_truststore_x_location: + description: The trust store file location within the Docker container. + title: ssl.truststore.location + helpLink: kafka.html + ssl_x_truststore_x_password: + description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format. + title: ssl.truststore.password + sensitive: True + helpLink: kafka.html From d3bd56b131dfe372a04e012bc337333c0faca2bd Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 10 Apr 2024 14:13:27 -0400 Subject: [PATCH 037/222] disable logstash and redis if kafka enabled --- salt/kafka/soc_kafka.yaml | 2 +- salt/logstash/init.sls | 3 ++- salt/redis/init.sls | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index c16ff212e3..2fec8c3024 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -135,7 +135,7 @@ kafka: helpLink: kafka.html client: security_x_protocol: - description: Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT + description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT' title: security.protocol regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT) helpLink: kafka.html diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index 62b2a2ebba..f7adc13307 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -4,9 +4,10 @@ # Elastic License 2.0. {% from 'logstash/map.jinja' import LOGSTASH_MERGED %} +{% from 'kafka/map.jinja' import KAFKAMERGED %} include: -{% if LOGSTASH_MERGED.enabled %} +{% if LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %} - logstash.enabled {% else %} - logstash.disabled diff --git a/salt/redis/init.sls b/salt/redis/init.sls index 2f7f38dcce..4936c3254b 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -4,9 +4,10 @@ # Elastic License 2.0. {% from 'redis/map.jinja' import REDISMERGED %} +{% from 'kafka/map.jinja' import KAFKAMERGED %} include: -{% if REDISMERGED.enabled %} +{% if REDISMERGED.enabled and not KAFKAMERGED.enabled %} - redis.enabled {% else %} - redis.disabled From af53dcda1bc064c7dba0a4fb1927007e7280c2ab Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:32:00 -0400 Subject: [PATCH 038/222] Remove references to kafkanode Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../assigned_hostgroups.local.map.yaml | 3 +- pillar/kafka/nodes.sls | 2 +- pillar/logstash/nodes.sls | 2 +- pillar/top.sls | 9 +- salt/allowed_states.map.jinja | 14 +-- salt/firewall/containers.map.jinja | 6 +- salt/firewall/defaults.yaml | 92 +------------------ salt/firewall/soc_firewall.yaml | 62 ------------- salt/kafka/enabled.sls | 2 +- salt/logstash/config.sls | 2 +- salt/logstash/defaults.yaml | 4 - salt/logstash/enabled.sls | 2 +- .../config/so/0800_input_kafka.conf.jinja | 4 +- salt/logstash/soc_logstash.yaml | 2 - salt/manager/tools/sbin/so-firewall-minion | 3 - salt/manager/tools/sbin/so-minion | 5 - salt/ssl/init.sls | 5 +- salt/top.sls | 9 -- setup/so-functions | 4 +- setup/so-whiptail | 3 - 20 files changed, 17 insertions(+), 218 deletions(-) diff --git a/files/firewall/assigned_hostgroups.local.map.yaml b/files/firewall/assigned_hostgroups.local.map.yaml index fca293d3a7..025b32131a 100644 --- a/files/firewall/assigned_hostgroups.local.map.yaml +++ b/files/firewall/assigned_hostgroups.local.map.yaml @@ -19,5 +19,4 @@ role: receiver: standalone: searchnode: - sensor: - kafkanode: \ No newline at end of file + sensor: \ No newline at end of file diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls index b1842834c7..6fe64685d3 100644 --- a/pillar/kafka/nodes.sls +++ b/pillar/kafka/nodes.sls @@ -1,4 +1,4 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-receiver or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} {% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} {% set existing_ids = [] %} diff --git a/pillar/logstash/nodes.sls b/pillar/logstash/nodes.sls index 99fbb857cd..a77978821b 100644 --- a/pillar/logstash/nodes.sls +++ b/pillar/logstash/nodes.sls @@ -2,7 +2,7 @@ {% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %} {% for minionid, ip in salt.saltutil.runner( 'mine.get', - tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet or G@role:so-kafkanode ', + tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ', fun='network.ip_addrs', tgt_type='compound') | dictsort() %} diff --git a/pillar/top.sls b/pillar/top.sls index 61f4f338fb..817767bf7f 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -233,15 +233,8 @@ base: - redis.adv_redis - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - - '*_kafkanode': - - logstash.nodes - - logstash.soc_logstash - - logstash.adv_logstash - - minions.{{ grains.id }} - - minions.adv_{{ grains.id }} - - secrets - kafka.nodes + - secrets '*_import': - secrets diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 6fa60c2ea2..0fa9686582 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -188,16 +188,8 @@ 'telegraf', 'firewall', 'schedule', - 'docker_clean' - ], - 'so-kafkanode': [ - 'kafka', - 'logstash', - 'ssl', - 'telegraf', - 'firewall', - 'schedule', - 'docker_clean' + 'docker_clean', + 'kafka' ], 'so-desktop': [ 'ssl', @@ -214,7 +206,7 @@ {% do allowed_states.append('strelka') %} {% endif %} - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import', 'so-kafkanode'] %} + {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %} {% do allowed_states.append('elasticsearch') %} {% endif %} diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index 7efb9abab2..02a1b7cac8 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -81,11 +81,7 @@ {% set NODE_CONTAINERS = [ 'so-logstash', 'so-redis', -] %} -{% elif GLOBALS.role == 'so-kafkanode' %} -{% set NODE_CONTAINERS = [ - 'so-logstash', - 'so-kafka', + 'so-kafka' ] %} {% elif GLOBALS.role == 'so-idh' %} diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index e51bf58256..0b6d06eda3 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -19,7 +19,6 @@ firewall: manager: [] managersearch: [] receiver: [] - kafkanode: [] searchnode: [] self: [] sensor: [] @@ -443,15 +442,6 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni - kafkanode: - portgroups: - - yum - - docker_registry - - influxdb - - elastic_agent_control - - elastic_agent_data - - elastic_agent_update - - sensoroni analyst: portgroups: - nginx @@ -530,9 +520,6 @@ firewall: receiver: portgroups: - salt_manager - kafkanode: - portgroups: - - salt_manager desktop: portgroups: - salt_manager @@ -647,15 +634,6 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni - kafkanode: - portgroups: - - yum - - docker_registry - - influxdb - - elastic_agent_control - - elastic_agent_data - - elastic_agent_update - - sensoroni analyst: portgroups: - nginx @@ -1305,14 +1283,17 @@ firewall: - beats_5044 - beats_5644 - elastic_agent_data + - kafka searchnode: portgroups: - redis - beats_5644 + - kafka managersearch: portgroups: - redis - beats_5644 + - kafka self: portgroups: - redis @@ -1383,73 +1364,6 @@ firewall: portgroups: [] customhostgroup9: portgroups: [] - kafkanode: - chain: - DOCKER-USER: - hostgroups: - searchnode: - portgroups: - - kafka - kafkanode: - portgroups: - - kafka - customhostgroup0: - portgroups: [] - customhostgroup1: - portgroups: [] - customhostgroup2: - portgroups: [] - customhostgroup3: - portgroups: [] - customhostgroup4: - portgroups: [] - customhostgroup5: - portgroups: [] - customhostgroup6: - portgroups: [] - customhostgroup7: - portgroups: [] - customhostgroup8: - portgroups: [] - customhostgroup9: - portgroups: [] - INPUT: - hostgroups: - anywhere: - portgroups: - - ssh - dockernet: - portgroups: - - all - localhost: - portgroups: - - all - self: - portgroups: - - syslog - syslog: - portgroups: - - syslog - customhostgroup0: - portgroups: [] - customhostgroup1: - portgroups: [] - customhostgroup2: - portgroups: [] - customhostgroup3: - portgroups: [] - customhostgroup4: - portgroups: [] - customhostgroup5: - portgroups: [] - customhostgroup6: - portgroups: [] - customhostgroup7: - portgroups: [] - customhostgroup8: - portgroups: [] - customhostgroup9: - portgroups: [] idh: chain: DOCKER-USER: diff --git a/salt/firewall/soc_firewall.yaml b/salt/firewall/soc_firewall.yaml index 3e4c4355f9..28791a705c 100644 --- a/salt/firewall/soc_firewall.yaml +++ b/salt/firewall/soc_firewall.yaml @@ -34,7 +34,6 @@ firewall: heavynode: *hostgroupsettings idh: *hostgroupsettings import: *hostgroupsettings - kafkanode: *hostgroupsettings localhost: *ROhostgroupsettingsadv manager: *hostgroupsettings managersearch: *hostgroupsettings @@ -361,8 +360,6 @@ firewall: portgroups: *portgroupsdocker endgame: portgroups: *portgroupsdocker - kafkanode: - portgroups: *portgroupsdocker analyst: portgroups: *portgroupsdocker desktop: @@ -454,8 +451,6 @@ firewall: portgroups: *portgroupsdocker syslog: portgroups: *portgroupsdocker - kafkanode: - portgroups: *portgroupsdocker analyst: portgroups: *portgroupsdocker desktop: @@ -940,63 +935,6 @@ firewall: portgroups: *portgroupshost customhostgroup9: portgroups: *portgroupshost - kafkanode: - chain: - DOCKER-USER: - hostgroups: - searchnode: - portgroups: *portgroupsdocker - kafkanode: - portgroups: *portgroupsdocker - customhostgroup0: - portgroups: *portgroupsdocker - customhostgroup1: - portgroups: *portgroupsdocker - customhostgroup2: - portgroups: *portgroupsdocker - customhostgroup3: - portgroups: *portgroupsdocker - customhostgroup4: - portgroups: *portgroupsdocker - customhostgroup5: - portgroups: *portgroupsdocker - customhostgroup6: - portgroups: *portgroupsdocker - customhostgroup7: - portgroups: *portgroupsdocker - customhostgroup8: - portgroups: *portgroupsdocker - customhostgroup9: - portgroups: *portgroupsdocker - INPUT: - hostgroups: - anywhere: - portgroups: *portgroupshost - dockernet: - portgroups: *portgroupshost - localhost: - portgroups: *portgroupshost - customhostgroup0: - portgroups: *portgroupshost - customhostgroup1: - portgroups: *portgroupshost - customhostgroup2: - portgroups: *portgroupshost - customhostgroup3: - portgroups: *portgroupshost - customhostgroup4: - portgroups: *portgroupshost - customhostgroup5: - portgroups: *portgroupshost - customhostgroup6: - portgroups: *portgroupshost - customhostgroup7: - portgroups: *portgroupshost - customhostgroup8: - portgroups: *portgroupshost - customhostgroup9: - portgroups: *portgroupshost - idh: chain: DOCKER-USER: diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index c2fca70db4..ed26297b3e 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -7,7 +7,7 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} -{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} +{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} include: - kafka.sostatus diff --git a/salt/logstash/config.sls b/salt/logstash/config.sls index 402d1ef206..8a59c83b7e 100644 --- a/salt/logstash/config.sls +++ b/salt/logstash/config.sls @@ -12,7 +12,7 @@ include: - ssl - {% if GLOBALS.role not in ['so-receiver','so-fleet', 'so-kafkanode'] %} + {% if GLOBALS.role not in ['so-receiver','so-fleet'] %} - elasticsearch {% endif %} diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index 3ca4570fdb..348acb6226 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -19,8 +19,6 @@ logstash: - search fleet: - fleet - kafkanode: - - kafkanode defined_pipelines: fleet: - so/0012_input_elastic_agent.conf.jinja @@ -39,8 +37,6 @@ logstash: - so/0900_input_redis.conf.jinja - so/9805_output_elastic_agent.conf.jinja - so/9900_output_endgame.conf.jinja - kafkanode: - - so/0899_output_kafka.conf.jinja custom0: [] custom1: [] custom2: [] diff --git a/salt/logstash/enabled.sls b/salt/logstash/enabled.sls index fcc2ec190c..3881ef1f4e 100644 --- a/salt/logstash/enabled.sls +++ b/salt/logstash/enabled.sls @@ -75,7 +75,7 @@ so-logstash: {% else %} - /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro {% endif %} - {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode', 'so-kafkanode' ] %} + {% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %} - /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro - /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro - /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 1391ce9834..85e6729e23 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -1,11 +1,9 @@ -{% set kafka_brokers = salt['pillar.get']('logstash:nodes:kafkanode', {}) %} +{% set kafka_brokers = salt['pillar.get']('logstash:nodes:receiver', {}) %} {% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %} {% set broker_ips = [] %} {% for node, node_data in kafka_brokers.items() %} {% do broker_ips.append(node_data['ip'] + ":9092") %} {% endfor %} - -{# For testing kafka stuff from manager not dedicated kafkanodes #} {% for node, node_data in kafka_on_mngr.items() %} {% do broker_ips.append(node_data['ip'] + ":9092") %} {% endfor %} diff --git a/salt/logstash/soc_logstash.yaml b/salt/logstash/soc_logstash.yaml index 82fb25becc..3172ff7c58 100644 --- a/salt/logstash/soc_logstash.yaml +++ b/salt/logstash/soc_logstash.yaml @@ -16,7 +16,6 @@ logstash: manager: *assigned_pipelines managersearch: *assigned_pipelines fleet: *assigned_pipelines - kafkanode: *assigned_pipelines defined_pipelines: receiver: &defined_pipelines description: List of pipeline configurations assign to this group. @@ -27,7 +26,6 @@ logstash: fleet: *defined_pipelines manager: *defined_pipelines search: *defined_pipelines - kafkanode: *defined_pipelines custom0: *defined_pipelines custom1: *defined_pipelines custom2: *defined_pipelines diff --git a/salt/manager/tools/sbin/so-firewall-minion b/salt/manager/tools/sbin/so-firewall-minion index 3357e5185a..66a0afcea7 100755 --- a/salt/manager/tools/sbin/so-firewall-minion +++ b/salt/manager/tools/sbin/so-firewall-minion @@ -79,9 +79,6 @@ fi 'RECEIVER') so-firewall includehost receiver "$IP" --apply ;; - 'KAFKANODE') - so-firewall includehost kafkanode "$IP" --apply - ;; 'DESKTOP') so-firewall includehost desktop "$IP" --apply ;; diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 7b3e6fd3ed..34e069ece8 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -565,11 +565,6 @@ function createRECEIVER() { add_telegraf_to_minion } -function createKAFKANODE() { - add_logstash_to_minion - # add_telegraf_to_minion -} - function createDESKTOP() { add_desktop_to_minion add_telegraf_to_minion diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 90f9cc64f6..f337d62cb0 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -664,10 +664,7 @@ elastickeyperms: {%- endif %} -# Roles will need to be modified. Below is just for testing encrypted kafka pipelines -# Remove so-manager. Just inplace for testing -{% if grains['role'] in ['so-manager', 'so-kafkanode', 'so-searchnode'] %} -# Create a cert for Redis encryption +{% if grains['role'] in ['so-manager', 'so-searchnode', 'so-receiver'] %} kafka_key: x509.private_key_managed: - name: /etc/pki/kafka.key diff --git a/salt/top.sls b/salt/top.sls index 289dd462b0..ec5e4d7386 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -235,16 +235,7 @@ base: - firewall - logstash - redis - - elasticfleet.install_agent_grid - - '*_kafkanode and G@saltversion:{{saltversion}}': - - match: compound - kafka - - logstash - - ssl - - telegraf - - firewall - - docker_clean - elasticfleet.install_agent_grid '*_idh and G@saltversion:{{saltversion}}': diff --git a/setup/so-functions b/setup/so-functions index 070711d637..a669c52fc4 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1143,7 +1143,7 @@ get_redirect() { get_minion_type() { local minion_type case "$install_type" in - 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER' | 'DESKTOP' | 'KAFKANODE') + 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER' | 'DESKTOP') minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]') ;; esac @@ -1505,8 +1505,6 @@ process_installtype() { is_import=true elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true - elif [ "$install_type" = 'KAFKANODE' ]; then - is_kafka=true elif [ "$install_type" = 'DESKTOP' ]; then is_desktop=true fi diff --git a/setup/so-whiptail b/setup/so-whiptail index a732a9c975..fd9625ec43 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -681,7 +681,6 @@ whiptail_install_type_dist_existing() { "HEAVYNODE" "Sensor + Search Node " \ "IDH" "Intrusion Detection Honeypot Node " \ "RECEIVER" "Receiver Node " \ - "KAFKANODE" "Kafka Broker + Kraft controller" \ 3>&1 1>&2 2>&3 # "HOTNODE" "Add Hot Node (Uses Elastic Clustering)" \ # TODO # "WARMNODE" "Add Warm Node to existing Hot or Search node" \ # TODO @@ -712,8 +711,6 @@ whiptail_install_type_dist_existing() { is_import=true elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true - elif [ "$install_type" = 'KAFKANODE' ]; then - is_kafka=true elif [ "$install_type" = 'DESKTOP' ]; then is_desktop=true fi From ca7253a5896f2a028021339a04dccd9eabb3b863 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:38:03 -0400 Subject: [PATCH 039/222] Run kafka-clusterid script when pillar values are missing Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/so-kafka-clusterid | 8 +++++--- setup/so-functions | 4 +++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 7199732473..fcbe3ba42f 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -16,7 +16,9 @@ fi if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then kafka_cluster_id=$(get_random_value 22) echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls -else - echo 'kafka_cluster_id exists' - salt-call pillar.get secrets +fi + +if ! grep -q "^ kafkapass:" $local_salt_dir/pillar/secrets.sls; then + kafkapass=$(get_random_value) + echo ' kafkapass: '$kafkapass >> $local_salt_dir/pillar/secrets.sls fi \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index a669c52fc4..176349edb1 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1116,6 +1116,7 @@ generate_passwords(){ SOCSRVKEY=$(get_random_value 64) IMPORTPASS=$(get_random_value) KAFKACLUSTERID=$(get_random_value 22) + KAFKAPASS=$(get_random_value) } generate_interface_vars() { @@ -1947,7 +1948,8 @@ secrets_pillar(){ "secrets:"\ " import_pass: $IMPORTPASS"\ " influx_pass: $INFLUXPASS"\ - " kafka_cluster_id: $KAFKACLUSTERID" > $local_salt_dir/pillar/secrets.sls + " kafka_cluster_id: $KAFKACLUSTERID"\ + " kafka_pass: $KAFKAPASS" > $local_salt_dir/pillar/secrets.sls fi } From 6b28dc72e86ee51476b5e95dedd28361223435ce Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 15:38:33 -0400 Subject: [PATCH 040/222] Update annotation for global.pipeline Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/global/soc_global.yaml | 3 +-- salt/kafka/config.sls | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index daaf28b286..667bf78262 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -36,9 +36,8 @@ global: global: True advanced: True pipeline: - description: Sets which pipeline technology for events to use. Currently only Redis is supported. + description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license. global: True - readonly: True advanced: True repo_host: description: Specify the host where operating system packages will be served from. diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index c856c4f809..c9e028ff59 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -26,7 +26,7 @@ include: - ssl - +g kafka_group: group.present: - name: kafka From 39555873729731bbf79ee3ebbe32d7c4c01e9ad9 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 11 Apr 2024 16:20:09 -0400 Subject: [PATCH 041/222] Use global.pipeline for redis / kafka states Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/global/defaults.yaml | 3 ++- salt/global/soc_global.yaml | 2 ++ salt/kafka/init.sls | 3 ++- salt/manager/tools/sbin/soup | 3 ++- salt/redis/init.sls | 4 ++-- salt/vars/kafkanode.map.jinja | 1 - setup/so-functions | 1 - 7 files changed, 10 insertions(+), 7 deletions(-) delete mode 100644 salt/vars/kafkanode.map.jinja diff --git a/salt/global/defaults.yaml b/salt/global/defaults.yaml index bd7244a587..5daa942c81 100644 --- a/salt/global/defaults.yaml +++ b/salt/global/defaults.yaml @@ -1,2 +1,3 @@ global: - pcapengine: STENO \ No newline at end of file + pcapengine: STENO + pipeline: REDIS \ No newline at end of file diff --git a/salt/global/soc_global.yaml b/salt/global/soc_global.yaml index 667bf78262..5a349a3c34 100644 --- a/salt/global/soc_global.yaml +++ b/salt/global/soc_global.yaml @@ -37,6 +37,8 @@ global: advanced: True pipeline: description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license. + regex: ^(REDIS|KAFKA)$ + regexFailureMessage: You must enter either REDIS or KAFKA. global: True advanced: True repo_host: diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index b4a6a28b0a..acedba3c36 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -4,9 +4,10 @@ # Elastic License 2.0. {% from 'kafka/map.jinja' import KAFKAMERGED %} +{% from 'vars/globals.map.jinja' import GLOBALS %} include: -{% if KAFKAMERGED.enabled %} +{% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %} - kafka.enabled {% else %} - kafka.disabled diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index fa3c3b5eea..3ca353856c 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -438,7 +438,8 @@ post_to_2.4.60() { } post_to_2.4.70() { - echo "Nothing to apply" + echo "Removing global.pipeline pillar configuration" + sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls POSTVERSION=2.4.70 } diff --git a/salt/redis/init.sls b/salt/redis/init.sls index 4936c3254b..7142c92c38 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -4,10 +4,10 @@ # Elastic License 2.0. {% from 'redis/map.jinja' import REDISMERGED %} -{% from 'kafka/map.jinja' import KAFKAMERGED %} +{% from 'vars/globals.map.jinja' import GLOBALS %} include: -{% if REDISMERGED.enabled and not KAFKAMERGED.enabled %} +{% if GLOBALS.pipeline == "REDIS" and REDISMERGED.enabled %} - redis.enabled {% else %} - redis.disabled diff --git a/salt/vars/kafkanode.map.jinja b/salt/vars/kafkanode.map.jinja deleted file mode 100644 index 396cefcc90..0000000000 --- a/salt/vars/kafkanode.map.jinja +++ /dev/null @@ -1 +0,0 @@ -{% set ROLE_GLOBALS = {} %} \ No newline at end of file diff --git a/setup/so-functions b/setup/so-functions index 176349edb1..038a4deb4a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1327,7 +1327,6 @@ create_global() { # Continue adding other details echo " imagerepo: '$IMAGEREPO'" >> $global_pillar_file - echo " pipeline: 'redis'" >> $global_pillar_file echo " repo_host: '$HOSTNAME'" >> $global_pillar_file echo " influxdb_host: '$HOSTNAME'" >> $global_pillar_file echo " registry_host: '$HOSTNAME'" >> $global_pillar_file From f514e5e9bba49f21b6e57ce41a40aceef8710c28 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 11 Apr 2024 16:23:05 -0400 Subject: [PATCH 042/222] add kafka to receiver --- salt/top.sls | 9 --------- 1 file changed, 9 deletions(-) diff --git a/salt/top.sls b/salt/top.sls index 289dd462b0..e4cd067c3d 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -236,16 +236,7 @@ base: - logstash - redis - elasticfleet.install_agent_grid - - '*_kafkanode and G@saltversion:{{saltversion}}': - - match: compound - kafka - - logstash - - ssl - - telegraf - - firewall - - docker_clean - - elasticfleet.install_agent_grid '*_idh and G@saltversion:{{saltversion}}': - match: compound From a54a72c2696f32b9d565efce7d9748b3bd148070 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 11:19:20 -0400 Subject: [PATCH 043/222] move kafka_cluster_id to kafka:cluster_id --- pillar/top.sls | 4 +++- salt/allowed_states.map.jinja | 20 +++++++------------- salt/kafka/soc_kafka.yaml | 6 ++++++ salt/kafka/storage.sls | 15 ++++++++------- salt/manager/tools/sbin/so-kafka-clusterid | 11 ++++++----- salt/ssl/init.sls | 4 ++-- 6 files changed, 32 insertions(+), 28 deletions(-) diff --git a/pillar/top.sls b/pillar/top.sls index 61f4f338fb..170b3f7591 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -61,7 +61,7 @@ base: - backup.adv_backup - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - - kafka.nodes + - kafka.* - stig.soc_stig '*_sensor': @@ -177,6 +177,7 @@ base: - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - stig.soc_stig + - kafka.* '*_heavynode': - elasticsearch.auth @@ -233,6 +234,7 @@ base: - redis.adv_redis - minions.{{ grains.id }} - minions.adv_{{ grains.id }} + - kafka.* '*_kafkanode': - logstash.nodes diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 6fa60c2ea2..091cb37866 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -123,7 +123,8 @@ 'utility', 'schedule', 'docker_clean', - 'stig' + 'stig', + 'kafka' ], 'so-searchnode': [ 'ssl', @@ -157,7 +158,8 @@ 'schedule', 'tcpreplay', 'docker_clean', - 'stig' + 'stig', + 'kafka' ], 'so-sensor': [ 'ssl', @@ -188,16 +190,8 @@ 'telegraf', 'firewall', 'schedule', - 'docker_clean' - ], - 'so-kafkanode': [ - 'kafka', - 'logstash', - 'ssl', - 'telegraf', - 'firewall', - 'schedule', - 'docker_clean' + 'docker_clean', + 'kafka' ], 'so-desktop': [ 'ssl', @@ -214,7 +208,7 @@ {% do allowed_states.append('strelka') %} {% endif %} - {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import', 'so-kafkanode'] %} + {% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %} {% do allowed_states.append('elasticsearch') %} {% endif %} diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 2fec8c3024..8a6c516a9b 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -2,6 +2,12 @@ kafka: enabled: description: Enable or disable Kafka. helpLink: kafka.html + cluster_id: + description: The ID of the Kafka cluster. + readonly: True + advanced: True + sensitive: True + helpLink: kafka.html config: server: advertised_x_listeners: diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index e99455e3d8..fbb7c73280 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -6,17 +6,18 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_cluster_id = salt['pillar.get']('secrets:kafka_cluster_id', default=None) %} +{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %} -{% if kafka_cluster_id is none %} +{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} +{% if kafka_cluster_id is none %} generate_kafka_cluster_id: cmd.run: - name: /usr/sbin/so-kafka-clusterid -{% endif %} +{% endif %} +{% endif %} {# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} -{% if salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} -{% else %} +{% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} kafka_storage_init: cmd.run: - name: | @@ -25,7 +26,7 @@ kafka_rm_kafkainit: cmd.run: - name: | docker rm so-kafkainit -{% endif %} +{% endif %} {% else %} @@ -34,4 +35,4 @@ kafka_rm_kafkainit: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 7199732473..def454372c 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -13,10 +13,11 @@ else source $(dirname $0)/../../../common/tools/sbin/so-common fi -if ! grep -q "^ kafka_cluster_id:" $local_salt_dir/pillar/secrets.sls; then +if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafka_cluster_id=$(get_random_value 22) - echo ' kafka_cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/secrets.sls + echo 'kafka: ' > $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls else - echo 'kafka_cluster_id exists' - salt-call pillar.get secrets -fi \ No newline at end of file + echo 'kafka:cluster_id pillar exists' + salt-call pillar.get kafka:cluster_id +fi diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 90f9cc64f6..0aa06bc8eb 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -666,7 +666,7 @@ elastickeyperms: # Roles will need to be modified. Below is just for testing encrypted kafka pipelines # Remove so-manager. Just inplace for testing -{% if grains['role'] in ['so-manager', 'so-kafkanode', 'so-searchnode'] %} +{% if grains['role'] in ['so-manager', 'so-receiver', 'so-searchnode'] %} # Create a cert for Redis encryption kafka_key: x509.private_key_managed: @@ -770,7 +770,7 @@ kafka_logstash_crt: - onchanges: - x509: /etc/pki/kafka-logstash.key -{% if grains['role'] in ['so-manager'] %} +{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-receiver'] %} kafka_client_key: x509.private_key_managed: - name: /etc/pki/kafka-client.key From 0ed9894b7e921924193df51d11c2572689f7376e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 11:19:46 -0400 Subject: [PATCH 044/222] create kratos local pillar dirs during setup --- setup/so-functions | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 070711d637..153a3371ce 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1115,7 +1115,6 @@ generate_passwords(){ REDISPASS=$(get_random_value) SOCSRVKEY=$(get_random_value 64) IMPORTPASS=$(get_random_value) - KAFKACLUSTERID=$(get_random_value 22) } generate_interface_vars() { @@ -1392,7 +1391,7 @@ make_some_dirs() { mkdir -p $local_salt_dir/salt/firewall/portgroups mkdir -p $local_salt_dir/salt/firewall/ports - for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do + for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global kafka;do mkdir -p $local_salt_dir/pillar/$THEDIR touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls @@ -1948,8 +1947,7 @@ secrets_pillar(){ printf '%s\n'\ "secrets:"\ " import_pass: $IMPORTPASS"\ - " influx_pass: $INFLUXPASS"\ - " kafka_cluster_id: $KAFKACLUSTERID" > $local_salt_dir/pillar/secrets.sls + " influx_pass: $INFLUXPASS" > $local_salt_dir/pillar/secrets.sls fi } From fbd3cff90d6665dce961ed0b56ea6352526e128b Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 12 Apr 2024 11:21:19 -0400 Subject: [PATCH 045/222] Make global.pipeline use GLOBALMERGED value Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/vars/globals.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/vars/globals.map.jinja b/salt/vars/globals.map.jinja index ed7129678a..0a4995c0cb 100644 --- a/salt/vars/globals.map.jinja +++ b/salt/vars/globals.map.jinja @@ -23,7 +23,7 @@ 'manager_ip': INIT.PILLAR.global.managerip, 'md_engine': INIT.PILLAR.global.mdengine, 'pcap_engine': GLOBALMERGED.pcapengine, - 'pipeline': INIT.PILLAR.global.pipeline, + 'pipeline': GLOBALMERGED.pipeline, 'so_version': INIT.PILLAR.global.soversion, 'so_docker_gateway': DOCKER.gateway, 'so_docker_range': DOCKER.range, From 04ddcd5c9328d090d4fe000eefef8af111abd7cb Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 11:52:57 -0400 Subject: [PATCH 046/222] add receiver managersearch and standalone to kafka.nodes pillar --- pillar/kafka/nodes.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls index b1842834c7..447e7a35db 100644 --- a/pillar/kafka/nodes.sls +++ b/pillar/kafka/nodes.sls @@ -1,4 +1,4 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-kafkanode or G@role:so-manager', fun='network.ip_addrs', tgt_type='compound') %} +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %} {% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} {% set existing_ids = [] %} From d73ba7dd3e31a3ea1c3bb27788d78178f8c6cd86 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 11:55:26 -0400 Subject: [PATCH 047/222] order kafka pillar assignment --- pillar/top.sls | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/pillar/top.sls b/pillar/top.sls index 170b3f7591..fbb1604dad 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -61,7 +61,9 @@ base: - backup.adv_backup - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - - kafka.* + - kafka.nodes + - kafka.soc_kafka + - kafka.adv_kafka - stig.soc_stig '*_sensor': @@ -177,7 +179,9 @@ base: - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - stig.soc_stig - - kafka.* + - kafka.nodes + - kafka.soc_kafka + - kafka.adv_kafka '*_heavynode': - elasticsearch.auth @@ -234,16 +238,9 @@ base: - redis.adv_redis - minions.{{ grains.id }} - minions.adv_{{ grains.id }} - - kafka.* - - '*_kafkanode': - - logstash.nodes - - logstash.soc_logstash - - logstash.adv_logstash - - minions.{{ grains.id }} - - minions.adv_{{ grains.id }} - - secrets - kafka.nodes + - kafka.soc_kafka + - kafka.adv_kafka '*_import': - secrets From a6ff92b099faa1c1a43def5e85331d084e18edc8 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 12 Apr 2024 12:11:18 -0400 Subject: [PATCH 048/222] Note to remove so-kafka-clusterid. Update soup and setup to generate needed kafka pillar values Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/so-kafka-clusterid | 5 +++++ salt/manager/tools/sbin/soup | 17 +++++++++++++++++ setup/so-functions | 13 +++++++++++++ setup/so-variables | 6 ++++++ 4 files changed, 41 insertions(+) diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index adddfe3cec..7ac0559974 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -5,6 +5,11 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. + + +### THIS SCRIPT AND SALT STATE REFERENCES TO THIS SCRIPT TO BE REMOVED ONCE INITIAL TESTING IS DONE - THESE VALUES WILL GENERATED IN SETUP AND SOUP + + local_salt_dir=/opt/so/saltstack/local if [[ -f /usr/sbin/so-common ]]; then diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 3ca353856c..a6f9032a5b 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -438,8 +438,25 @@ post_to_2.4.60() { } post_to_2.4.70() { + # Global pipeline changes to REDIS or KAFKA echo "Removing global.pipeline pillar configuration" sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls + + # Kafka configuration + mkdir -p /opt/so/saltstack/local/pillar/kafka + touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls + echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + + if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + kafka_cluster_id=$(get_random_value 22) + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + + if ! grep -q "^ certpass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + kafkapass=$(get_random_value) + echo ' certpass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls + fi + POSTVERSION=2.4.70 } diff --git a/setup/so-functions b/setup/so-functions index 2332ab94cd..30e8fbfd6a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -803,6 +803,7 @@ create_manager_pillars() { patch_pillar nginx_pillar kibana_pillar + kafka_pillar } create_repo() { @@ -1191,6 +1192,18 @@ kibana_pillar() { logCmd "touch $kibana_pillar_file" } +kafka_pillar() { + KAFKACLUSTERID=$(get_random_value 22) + KAFKAPASS=$(get_random_value) + logCmd "mkdir -p $local_salt_dir/pillar/kakfa" + logCmd "touch $adv_kafka_pillar_file" + logCmd "touch $kafka_pillar_file" + printf '%s\n'\ + "kafka:"\ + " cluster_id: $KAFKACLUSTERID"\ + " certpass: $KAFKAPASS" > $kafka_pillar_file +} + logrotate_pillar() { logCmd "mkdir -p $local_salt_dir/pillar/logrotate" logCmd "touch $adv_logrotate_pillar_file" diff --git a/setup/so-variables b/setup/so-variables index 42ed8fc5c2..4a2f29c58f 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -178,6 +178,12 @@ export redis_pillar_file adv_redis_pillar_file="$local_salt_dir/pillar/redis/adv_redis.sls" export adv_redis_pillar_file +kafka_pillar_file="local_salt_dir/pillar/kafka/soc_kafka.sls" +export kafka_pillar_file + +adv_kafka_pillar_file="$local_salt_dir/pillar/kafka/adv_kafka.sls" +export kafka_pillar_file + idh_pillar_file="$local_salt_dir/pillar/idh/soc_idh.sls" export idh_pillar_file From 911ee579a9e8a92b751695d12b7f449358df8260 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 12 Apr 2024 12:16:20 -0400 Subject: [PATCH 049/222] Typo Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index c9e028ff59..c856c4f809 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -26,7 +26,7 @@ include: - ssl -g + kafka_group: group.present: - name: kafka From c014508519d024ce6425955bdc1512f22991a22d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 13:50:25 -0400 Subject: [PATCH 050/222] need /opt/so/conf/ca/cacerts on receiver for kafka to run --- salt/allowed_states.map.jinja | 3 ++- salt/elasticsearch/ca.sls | 2 +- salt/kafka/enabled.sls | 1 + 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 091cb37866..57cff5b4fa 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -191,7 +191,8 @@ 'firewall', 'schedule', 'docker_clean', - 'kafka' + 'kafka', + 'elasticsearch.ca' ], 'so-desktop': [ 'ssl', diff --git a/salt/elasticsearch/ca.sls b/salt/elasticsearch/ca.sls index 5485bb6764..188450311d 100644 --- a/salt/elasticsearch/ca.sls +++ b/salt/elasticsearch/ca.sls @@ -4,7 +4,7 @@ # Elastic License 2.0. {% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} +{% if sls.split('.')[0] in allowed_states or sls in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} # Move our new CA over so Elastic and Logstash can use SSL with the internal CA diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index ed26297b3e..a42b6f18ba 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -10,6 +10,7 @@ {% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} include: + - elasticsearch.ca - kafka.sostatus - kafka.config - kafka.storage From bb983d4ba2570c1764f4131743c82b98700d595a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 12 Apr 2024 16:16:03 -0400 Subject: [PATCH 051/222] just broker as default process --- salt/kafka/defaults.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 7828f0536b..91f55a07dc 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -2,7 +2,7 @@ kafka: enabled: False config: server: - advertised_x_listeners: BROKER://10.66.166.231:9092 + advertised_x_listeners: auto_x_create_x_topics_x_enable: true controller_x_listener_x_names: CONTROLLER controller_x_quorum_x_voters: @@ -13,13 +13,13 @@ kafka: log_x_retention_x_check_x_interval_x_ms: 300000 log_x_retention_x_hours: 168 log_x_segment_x_bytes: 1073741824 - node_x_id: + node_x_id: num_x_io_x_threads: 8 num_x_network_x_threads: 3 num_x_partitions: 1 num_x_recovery_x_threads_x_per_x_data_x_dir: 1 offsets_x_topic_x_replication_x_factor: 1 - process_x_roles: broker,controller + process_x_roles: broker socket_x_receive_x_buffer_x_bytes: 102400 socket_x_request_x_max_x_bytes: 104857600 socket_x_send_x_buffer_x_bytes: 102400 From de6ea29e3b155de07278dbdee1ff1f63e758f19f Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 12 Apr 2024 16:18:53 -0400 Subject: [PATCH 052/222] update default process.role to broker only Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 7828f0536b..3da14acb4f 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -19,7 +19,7 @@ kafka: num_x_partitions: 1 num_x_recovery_x_threads_x_per_x_data_x_dir: 1 offsets_x_topic_x_replication_x_factor: 1 - process_x_roles: broker,controller + process_x_roles: broker socket_x_receive_x_buffer_x_bytes: 102400 socket_x_request_x_max_x_bytes: 104857600 socket_x_send_x_buffer_x_bytes: 102400 From c4994a208b9f0746cc7b3867f8623faba41d718a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 15 Apr 2024 11:37:21 -0400 Subject: [PATCH 053/222] restart salt minion if a manager and signing policies change --- salt/salt/minion.sls | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 2cc0327451..8c6f7f0198 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -13,6 +13,9 @@ include: - systemd.reload - repo.client - salt.mine_functions +{% if GLOBALS.role in GLOBALS.manager_roles %} + - ca +{% endif %} {% if INSTALLEDSALTVERSION|string != SALTVERSION|string %} @@ -98,5 +101,8 @@ salt_minion_service: - file: mine_functions {% if INSTALLEDSALTVERSION|string == SALTVERSION|string %} - file: set_log_levels +{% endif %} +{% if GLOBALS.role in GLOBALS.manager_roles %} + - file: /etc/salt/minion.d/signing_policies.conf {% endif %} - order: last From 4b79623ce31edfbf59763d41747ddb7a79fe7220 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 16 Apr 2024 16:51:35 -0400 Subject: [PATCH 054/222] watch pillar files for changes and do something --- salt/salt/engines/master/pillarWatch.py | 72 +++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 salt/salt/engines/master/pillarWatch.py diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py new file mode 100644 index 0000000000..c45e2383d2 --- /dev/null +++ b/salt/salt/engines/master/pillarWatch.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- + +import logging +from time import sleep +import os +import salt.client +import re + +log = logging.getLogger(__name__) +local = salt.client.LocalClient() + +def start(fpa, interval=10): + log.info("##### PILLARWATCH STARTED #####") + + try: + # maybe change this location + dataFile = open("/opt/so/state/pillarWatch.txt", "r+") + df = dataFile.read() + log.info("df: %s" % str(df)) + except FileNotFoundError: + log.info("No previous pillarWatch data saved") + + currentValues = [] + + log.info("FPA: %s" % str(fpa)) + for i in fpa: + log.trace("files: %s" % i['files']) + log.trace("pillar: %s" % i['pillar']) + log.trace("action: %s" % i['action']) + pillarFiles = i['files'] + pillar = i['pillar'] + action = i['action'] + + patterns = pillar.split(".") + log.trace("pillar: %s" % pillar) + log.trace("patterns: %s" % patterns) + log.trace("patterns length: %i" % len(patterns)) + # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later + patternFound = 0 + for pillarFile in pillarFiles: + with open(pillarFile, "r") as file: + log.info("checking file: %s" % pillarFile) + for line in file: + log.trace("line: %s" % line) + log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound]) + # since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar + # ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look + # for pipeline. once pipeline is found, it will record the value + if re.search(patterns[patternFound], line): + log.trace("PILLARWATCH FOUND: %s" % patterns[patternFound]) + patternFound += 1 + # we have found the final key in the pillar that we are looking for, get the value + if patternFound == len(patterns): + for l in df.splitlines(): + if pillar in l: + previousPillarValue = l.split(":")[1] + log.info("%s previousPillarValue:%s" % (pillar, str(previousPillarValue))) + currentPillarValue = str(line.split(":")[1]).strip() + log.info("%s currentPillarValue: %s" % (pillar,currentPillarValue)) + if pillar in df: + df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df) + #df = df.replace(pillar, pillar + ': ' + currentPillarValue) + else: + df = pillar + ': ' + currentPillarValue + log.info("df: %s" % df) + #currentValues.append(pillar + ":" + currentPillarValue) + # we have found the pillar so we dont need to loop throught the file anymore + break + dataFile.seek(0) + dataFile.write(df) + dataFile.truncate() + dataFile.close() From 665b7197a6a1820d4ced3bd2da4be7e2000c057a Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 17 Apr 2024 17:08:41 -0400 Subject: [PATCH 055/222] Update Kafka nodeid Update so-minion to include running kafka.nodes state to ensure nodeid is generated for new brokers Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- pillar/kafka/nodes.sls | 30 +------------ salt/kafka/enabled.sls | 7 ++- salt/kafka/files/managed_node_pillar.jinja | 7 +++ salt/kafka/nodes.map.jinja | 50 ++++++++++++++++++++++ salt/kafka/nodes.sls | 19 ++++++++ salt/manager/tools/sbin/so-minion | 4 ++ 6 files changed, 86 insertions(+), 31 deletions(-) create mode 100644 salt/kafka/files/managed_node_pillar.jinja create mode 100644 salt/kafka/nodes.map.jinja create mode 100644 salt/kafka/nodes.sls diff --git a/pillar/kafka/nodes.sls b/pillar/kafka/nodes.sls index 447e7a35db..ba14c219ee 100644 --- a/pillar/kafka/nodes.sls +++ b/pillar/kafka/nodes.sls @@ -1,30 +1,2 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %} -{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %} - -{% set existing_ids = [] %} -{% for node in pillar_kafkanodes.values() %} - {% if node.get('id') %} - {% do existing_ids.append(node['nodeid']) %} - {% endif %} -{% endfor %} -{% set all_possible_ids = range(1, 256)|list %} - -{% set available_ids = [] %} -{% for id in all_possible_ids %} - {% if id not in existing_ids %} - {% do available_ids.append(id) %} - {% endif %} -{% endfor %} - -{% set final_nodes = pillar_kafkanodes.copy() %} - -{% for minionid, ip in current_kafkanodes.items() %} - {% set hostname = minionid.split('_')[0] %} - {% if hostname not in final_nodes %} - {% set new_id = available_ids.pop(0) %} - {% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} - {% endif %} -{% endfor %} - kafka: - nodes: {{ final_nodes|tojson }} + nodes: \ No newline at end of file diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index a42b6f18ba..3c4f548f1f 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -1,5 +1,5 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. @@ -7,9 +7,12 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} -{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} +{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES as KAFKANODES %} include: + {% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} + - kafka.nodes + {% endif %} - elasticsearch.ca - kafka.sostatus - kafka.config diff --git a/salt/kafka/files/managed_node_pillar.jinja b/salt/kafka/files/managed_node_pillar.jinja new file mode 100644 index 0000000000..fb2ef410ec --- /dev/null +++ b/salt/kafka/files/managed_node_pillar.jinja @@ -0,0 +1,7 @@ +kafka: + nodes: +{% for node, values in COMBINED_KAFKANODES.items() %} + {{ node }}: + ip: {{ values['ip'] }} + nodeid: {{ values['nodeid'] }} +{% endfor %} \ No newline at end of file diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja new file mode 100644 index 0000000000..a6e36d36a6 --- /dev/null +++ b/salt/kafka/nodes.map.jinja @@ -0,0 +1,50 @@ +{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %} +{% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} + +{% set existing_ids = [] %} + +{# Check STORED_KAFKANODES for existing kafka nodes and pull their IDs so they are not reused across the grid #} +{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} +{% for node, values in STORED_KAFKANODES.nodes.items() %} +{% if values.get('nodeid') %} +{% do existing_ids.append(values['nodeid']) %} +{% endif %} +{% endfor %} +{% endif %} + +{# Create list of possible node ids #} +{% set all_possible_ids = range(1, 65536)|list %} + +{# Don't like the below loop because the higher the range for all_possible_ids the more time spent on loop #} +{# Create list of available node ids by looping through all_possible_ids and ensuring it isn't in existing_ids #} +{% set available_ids = [] %} +{% for id in all_possible_ids %} +{% if id not in existing_ids %} +{% do available_ids.append(id) %} +{% endif %} +{% endfor %} + +{# Collect kafka eligible nodes and check if they're already in STORED_KAFKANODES to avoid potentially reassigning a nodeid #} +{% set NEW_KAFKANODES = {} %} +{% for minionid, ip in current_kafkanodes.items() %} +{% set hostname = minionid.split('_')[0] %} +{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 and hostname not in STORED_KAFKANODES.nodes %} +{% set new_id = available_ids.pop(0) %} +{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} +{% endif %} +{% if hostname not in NEW_KAFKANODES %} +{% set new_id = available_ids.pop(0) %} +{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} +{% endif %} +{% endfor %} + +{# Combine STORED_KAFKANODES and NEW_KAFKANODES for writing to the pillar/kafka/nodes.sls #} +{% set COMBINED_KAFKANODES = {} %} +{% for node, details in NEW_KAFKANODES.items() %} +{% do COMBINED_KAFKANODES.update({node: details}) %} +{% endfor %} +{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} +{% for node, details in STORED_KAFKANODES.nodes.items() %} +{% do COMBINED_KAFKANODES.update({node: details}) %} +{% endfor %} +{% endif %} diff --git a/salt/kafka/nodes.sls b/salt/kafka/nodes.sls new file mode 100644 index 0000000000..5085c6ccab --- /dev/null +++ b/salt/kafka/nodes.sls @@ -0,0 +1,19 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. +{% from 'vars/globals.map.jinja' import GLOBALS %} +{% if GLOBALS.pipeline == "KAFKA" %} +{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} + +{# Store kafka pillar in a file rather than memory where values could be lost. Kafka does not support nodeid's changing #} +write_kafka_pillar_yaml: + file.managed: + - name: /opt/so/saltstack/local/pillar/kafka/nodes.sls + - mode: 644 + - user: socore + - source: salt://kafka/files/managed_node_pillar.jinja + - template: jinja + - context: + COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} +{% endif %} \ No newline at end of file diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 34e069ece8..72ae552099 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -616,6 +616,10 @@ function updateMineAndApplyStates() { salt $MINIONID state.apply elasticsearch queue=True --async salt $MINIONID state.apply soc queue=True --async fi + if [[ "$NODETYPE" == "RECEIVER" ]]; then + # Setup nodeid for Kafka + salt-call state.apply kafka.nodes queue=True + fi # run this async so the cli doesn't wait for a return salt "$MINION_ID" state.highstate --async queue=True } From 4caa6a10b586d733f02df376e715e9f4338fbe8e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 17 Apr 2024 18:09:04 -0400 Subject: [PATCH 056/222] watch a pillar in files and take action --- salt/salt/engines/master/pillarWatch.py | 47 +++++++++++++++++++------ salt/salt/files/engines.conf | 30 ++++++++++++++++ 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index c45e2383d2..c6f343dc80 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -5,6 +5,7 @@ import os import salt.client import re +from ast import literal_eval log = logging.getLogger(__name__) local = salt.client.LocalClient() @@ -12,24 +13,26 @@ def start(fpa, interval=10): log.info("##### PILLARWATCH STARTED #####") + # try to open the file that stores the previous runs data + # if the file doesn't exist, create a blank one try: # maybe change this location dataFile = open("/opt/so/state/pillarWatch.txt", "r+") - df = dataFile.read() - log.info("df: %s" % str(df)) except FileNotFoundError: log.info("No previous pillarWatch data saved") + dataFile = open("/opt/so/state/pillarWatch.txt", "w+") - currentValues = [] + df = dataFile.read() + log.info("df: %s" % str(df)) log.info("FPA: %s" % str(fpa)) for i in fpa: log.trace("files: %s" % i['files']) log.trace("pillar: %s" % i['pillar']) - log.trace("action: %s" % i['action']) + log.trace("action: %s" % i['actions']) pillarFiles = i['files'] pillar = i['pillar'] - action = i['action'] + actions = i['actions'] patterns = pillar.split(".") log.trace("pillar: %s" % pillar) @@ -49,23 +52,45 @@ def start(fpa, interval=10): if re.search(patterns[patternFound], line): log.trace("PILLARWATCH FOUND: %s" % patterns[patternFound]) patternFound += 1 - # we have found the final key in the pillar that we are looking for, get the value + # we have found the final key in the pillar that we are looking for, get the previous value then the current value if patternFound == len(patterns): + # at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars + previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE' for l in df.splitlines(): if pillar in l: - previousPillarValue = l.split(":")[1] - log.info("%s previousPillarValue:%s" % (pillar, str(previousPillarValue))) + previousPillarValue = l.split(":")[1].strip() + log.info("%s previousPillarValue: %s" % (pillar, str(previousPillarValue))) currentPillarValue = str(line.split(":")[1]).strip() log.info("%s currentPillarValue: %s" % (pillar,currentPillarValue)) if pillar in df: df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df) - #df = df.replace(pillar, pillar + ': ' + currentPillarValue) else: - df = pillar + ': ' + currentPillarValue + df += pillar + ': ' + currentPillarValue + '\n' log.info("df: %s" % df) - #currentValues.append(pillar + ":" + currentPillarValue) # we have found the pillar so we dont need to loop throught the file anymore break + if currentPillarValue != previousPillarValue: + log.info("cPV != pPV: %s != %s" % (currentPillarValue,previousPillarValue)) + if previousPillarValue in actions['from']: + ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue] + elif '*' in actions['from']: + # need more logic here for to and from + ACTIONS=actions['from']['*']['to']['*'] + else: + ACTIONS='FROM TO NOT DEFINED' + #for f in actions: + log.info("actions: %s" % actions['from']) + log.info("ACTIONS: %s" % ACTIONS) + for action in ACTIONS: + log.info(action) + for saltModule, args in action.items(): + log.info(saltModule) + log.info(args) + # args=list(action.values())[0] + # log.info(args) + whatHappened = __salt__[saltModule](**args) + log.info("whatHappened: %s" % whatHappened) + dataFile.seek(0) dataFile.write(df) dataFile.truncate() diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 7c43e99e10..26be7bf370 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -4,3 +4,33 @@ engines_dirs: engines: - checkmine: interval: 60 + - pillarWatch: + fpa: + - files: + - /opt/so/saltstack/local/pillar/global/soc_global.sls + - /opt/so/saltstack/local/pillar/global/adv_global.sls + pillar: global.pipeline + actions: + from: + REDIS: + to: + KAFKA: + - cmd.run: + cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True + KAFKA: + to: + REDIS: + - cmd.run: + cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False + - files: + - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls + - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls + pillar: idstools.config.ruleset + actions: + from: + '*': + to: + '*': + - cmd.run: + cmd: /usr/sbin/so-rule-update + interval: 10 From 506bbd314df0eff9fbd6a24f60135991e5270818 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 18 Apr 2024 10:26:10 -0400 Subject: [PATCH 057/222] more comments, better logging --- salt/salt/engines/master/pillarWatch.py | 96 +++++++++++++++---------- salt/salt/files/engines.conf | 4 +- 2 files changed, 60 insertions(+), 40 deletions(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index c6f343dc80..b19a6d709e 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -1,17 +1,19 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + # -*- coding: utf-8 -*- import logging -from time import sleep -import os -import salt.client import re -from ast import literal_eval +import salt.client log = logging.getLogger(__name__) local = salt.client.LocalClient() def start(fpa, interval=10): - log.info("##### PILLARWATCH STARTED #####") + log.info("pillarWatch engine: started") # try to open the file that stores the previous runs data # if the file doesn't exist, create a blank one @@ -19,77 +21,95 @@ def start(fpa, interval=10): # maybe change this location dataFile = open("/opt/so/state/pillarWatch.txt", "r+") except FileNotFoundError: - log.info("No previous pillarWatch data saved") + log.warn("pillarWatch engine: No previous pillarWatch data saved") dataFile = open("/opt/so/state/pillarWatch.txt", "w+") df = dataFile.read() - log.info("df: %s" % str(df)) - - log.info("FPA: %s" % str(fpa)) for i in fpa: - log.trace("files: %s" % i['files']) - log.trace("pillar: %s" % i['pillar']) - log.trace("action: %s" % i['actions']) + log.trace("pillarWatch engine: files: %s" % i['files']) + log.trace("pillarWatch engine: pillar: %s" % i['pillar']) + log.trace("pillarWatch engine: actions: %s" % i['actions']) pillarFiles = i['files'] pillar = i['pillar'] actions = i['actions'] - + # these are the keys that we are going to look for as we traverse the pillarFiles patterns = pillar.split(".") - log.trace("pillar: %s" % pillar) - log.trace("patterns: %s" % patterns) - log.trace("patterns length: %i" % len(patterns)) # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later patternFound = 0 for pillarFile in pillarFiles: with open(pillarFile, "r") as file: - log.info("checking file: %s" % pillarFile) + log.info("pillarWatch engine: checking file: %s" % pillarFile) for line in file: - log.trace("line: %s" % line) + log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file)) log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound]) # since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar # ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look # for pipeline. once pipeline is found, it will record the value - if re.search(patterns[patternFound], line): - log.trace("PILLARWATCH FOUND: %s" % patterns[patternFound]) + if re.search(patterns[patternFound] + ':', line): + # strip the newline because it makes the logs u-g-l-y + log.info("pillarWatch engine: found: %s" % line.strip('\n')) patternFound += 1 # we have found the final key in the pillar that we are looking for, get the previous value then the current value if patternFound == len(patterns): # at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE' + # check the contents of the dataFile that stores the previousPillarValue(s). + # find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value for l in df.splitlines(): if pillar in l: - previousPillarValue = l.split(":")[1].strip() - log.info("%s previousPillarValue: %s" % (pillar, str(previousPillarValue))) + previousPillarValue = str(l.split(":")[1].strip()) + log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) currentPillarValue = str(line.split(":")[1]).strip() - log.info("%s currentPillarValue: %s" % (pillar,currentPillarValue)) + log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + # if the pillar we are checking for changes has been defined in the dataFile, + # replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it. if pillar in df: df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df) else: df += pillar + ': ' + currentPillarValue + '\n' - log.info("df: %s" % df) + log.trace("pillarWatch engine: df: %s" % df) # we have found the pillar so we dont need to loop throught the file anymore break + # if the pillar value changed, then we find what actions we should take + log.info("pillarWatch engine: checking if currentPillarValue != previousPillarValue") + log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) if currentPillarValue != previousPillarValue: - log.info("cPV != pPV: %s != %s" % (currentPillarValue,previousPillarValue)) + log.info("pillarWatch engine: currentPillarValue != previousPillarValue: %s != %s" % (currentPillarValue, previousPillarValue)) + # check if the previous pillar value is defined in the pillar from -> to actions if previousPillarValue in actions['from']: - ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue] + # check if the new / current pillar value is defined under to + if currentPillarValue in actions['from'][previousPillarValue]['to']: + ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue] + # if the new / current pillar value isn't defined under to, is there a wildcard defined + elif '*' in actions['from'][previousPillarValue]['to']: + ACTIONS=actions['from'][previousPillarValue]['to']['*'] + # no action was defined for us to take when we see the pillar change + else: + ACTIONS='NO DEFINED ACTION FOR US TO TAKE' + # if the previous pillar wasn't defined in the actions from, is there a wildcard defined for the pillar that we are changing from elif '*' in actions['from']: + # is the new pillar value defined for the wildcard match + if currentPillarValue in actions['from']['*']['to']: + ACTIONS=actions['from']['*']['to'][currentPillarValue] + # if the new pillar doesn't have an action, was a wildcard defined + elif '*' in actions['from']['*']['to']: # need more logic here for to and from - ACTIONS=actions['from']['*']['to']['*'] + ACTIONS=actions['from']['*']['to']['*'] + else: + ACTIONS='NO DEFINED ACTION FOR US TO TAKE' + # a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard else: - ACTIONS='FROM TO NOT DEFINED' - #for f in actions: - log.info("actions: %s" % actions['from']) - log.info("ACTIONS: %s" % ACTIONS) + ACTIONS='NO DEFINED ACTION FOR US TO TAKE' + log.info("pillarWatch engine: actions: %s" % actions['from']) + log.info("pillarWatch engine: ACTIONS: %s" % ACTIONS) for action in ACTIONS: - log.info(action) + log.info("pillarWatch engine: action: %s" % action) for saltModule, args in action.items(): - log.info(saltModule) - log.info(args) - # args=list(action.values())[0] - # log.info(args) - whatHappened = __salt__[saltModule](**args) - log.info("whatHappened: %s" % whatHappened) + log.info("pillarWatch engine: saltModule: %s" % saltModule) + log.info("pillarWatch engine: args: %s" % args) + actionReturn = __salt__[saltModule](**args) + log.info("pillarWatch engine: actionReturn: %s" % actionReturn) dataFile.seek(0) dataFile.write(df) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 26be7bf370..de5c1e43a7 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -12,14 +12,14 @@ engines: pillar: global.pipeline actions: from: - REDIS: + '*': to: KAFKA: - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True KAFKA: to: - REDIS: + '*': - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False - files: From 610dd2c08d3907eac18b2a1841f7fc4d5d6665d1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 18 Apr 2024 11:11:14 -0400 Subject: [PATCH 058/222] improve it --- salt/salt/engines/master/pillarWatch.py | 27 ++++++++++++++----------- salt/salt/files/engines.conf | 8 ++++++++ 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index b19a6d709e..abbb632f8d 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -13,7 +13,7 @@ local = salt.client.LocalClient() def start(fpa, interval=10): - log.info("pillarWatch engine: started") + log.info("pillarWatch engine: checking watched pillars for changes") # try to open the file that stores the previous runs data # if the file doesn't exist, create a blank one @@ -26,6 +26,8 @@ def start(fpa, interval=10): df = dataFile.read() for i in fpa: + currentPillarValue = '' + previousPillarValue = '' log.trace("pillarWatch engine: files: %s" % i['files']) log.trace("pillarWatch engine: pillar: %s" % i['pillar']) log.trace("pillarWatch engine: actions: %s" % i['actions']) @@ -58,9 +60,9 @@ def start(fpa, interval=10): for l in df.splitlines(): if pillar in l: previousPillarValue = str(l.split(":")[1].strip()) - log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) currentPillarValue = str(line.split(":")[1]).strip() log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) # if the pillar we are checking for changes has been defined in the dataFile, # replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it. if pillar in df: @@ -86,7 +88,7 @@ def start(fpa, interval=10): ACTIONS=actions['from'][previousPillarValue]['to']['*'] # no action was defined for us to take when we see the pillar change else: - ACTIONS='NO DEFINED ACTION FOR US TO TAKE' + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] # if the previous pillar wasn't defined in the actions from, is there a wildcard defined for the pillar that we are changing from elif '*' in actions['from']: # is the new pillar value defined for the wildcard match @@ -97,19 +99,20 @@ def start(fpa, interval=10): # need more logic here for to and from ACTIONS=actions['from']['*']['to']['*'] else: - ACTIONS='NO DEFINED ACTION FOR US TO TAKE' + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] # a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard else: - ACTIONS='NO DEFINED ACTION FOR US TO TAKE' - log.info("pillarWatch engine: actions: %s" % actions['from']) - log.info("pillarWatch engine: ACTIONS: %s" % ACTIONS) + ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] + log.info("pillarWatch engine: all defined actions: %s" % actions['from']) + log.info("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue)) for action in ACTIONS: log.info("pillarWatch engine: action: %s" % action) - for saltModule, args in action.items(): - log.info("pillarWatch engine: saltModule: %s" % saltModule) - log.info("pillarWatch engine: args: %s" % args) - actionReturn = __salt__[saltModule](**args) - log.info("pillarWatch engine: actionReturn: %s" % actionReturn) + if action != 'NO DEFINED ACTION FOR US TO TAKE': + for saltModule, args in action.items(): + log.info("pillarWatch engine: saltModule: %s" % saltModule) + log.info("pillarWatch engine: args: %s" % args) + actionReturn = __salt__[saltModule](**args) + log.info("pillarWatch engine: actionReturn: %s" % actionReturn) dataFile.seek(0) dataFile.write(df) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index de5c1e43a7..c15194eaf0 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -17,11 +17,19 @@ engines: KAFKA: - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True + - cmd.run: + cmd: salt-call saltutil.kill_all_jobs + - cmd.run: + cmd: salt-call state.highstate & KAFKA: to: '*': - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False + - cmd.run: + cmd: salt-call saltutil.kill_all_jobs + - cmd.run: + cmd: salt-call state.highstate & - files: - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls From 5cc358de4ef20965b708436c6fba707684227760 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 18 Apr 2024 11:58:25 -0400 Subject: [PATCH 059/222] Update map files to handle empty kafka:nodes pillar Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/map.jinja | 4 ++-- salt/kafka/nodes.map.jinja | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/salt/kafka/map.jinja b/salt/kafka/map.jinja index 771e6102bd..56f85144a5 100644 --- a/salt/kafka/map.jinja +++ b/salt/kafka/map.jinja @@ -6,13 +6,13 @@ {% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} {% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %} {% from 'vars/globals.map.jinja' import GLOBALS %} +{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} {% do KAFKAMERGED.config.server.update({ 'node_x_id': salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid')}) %} {% do KAFKAMERGED.config.server.update({'advertised_x_listeners': 'BROKER://' ~ GLOBALS.node_ip ~ ':9092'}) %} -{% set nodes = salt['pillar.get']('kafka:nodes', {}) %} {% set combined = [] %} -{% for hostname, data in nodes.items() %} +{% for hostname, data in COMBINED_KAFKANODES.items() %} {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} {% endfor %} {% set kraft_controller_quorum_voters = ','.join(combined) %} diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index a6e36d36a6..5d74e9e1c0 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -4,7 +4,8 @@ {% set existing_ids = [] %} {# Check STORED_KAFKANODES for existing kafka nodes and pull their IDs so they are not reused across the grid #} -{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} +{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} #} +{% if STORED_KAFKANODES != none %} {% for node, values in STORED_KAFKANODES.nodes.items() %} {% if values.get('nodeid') %} {% do existing_ids.append(values['nodeid']) %} @@ -28,7 +29,8 @@ {% set NEW_KAFKANODES = {} %} {% for minionid, ip in current_kafkanodes.items() %} {% set hostname = minionid.split('_')[0] %} -{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 and hostname not in STORED_KAFKANODES.nodes %} +{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 and hostname not in STORED_KAFKANODES.nodes %} #} +{% if STORED_KAFKANODES != none and hostname not in STORED_KAFKANODES.nodes %} {% set new_id = available_ids.pop(0) %} {% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} {% endif %} @@ -43,7 +45,8 @@ {% for node, details in NEW_KAFKANODES.items() %} {% do COMBINED_KAFKANODES.update({node: details}) %} {% endfor %} -{% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} +{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} #} +{% if STORED_KAFKANODES != none %} {% for node, details in STORED_KAFKANODES.nodes.items() %} {% do COMBINED_KAFKANODES.update({node: details}) %} {% endfor %} From 1f6eb9cdc34e496979c25223c32721fb9ad838b6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 18 Apr 2024 13:50:37 -0400 Subject: [PATCH 060/222] match keys better. go through files reverse first found is prio --- salt/salt/engines/master/pillarWatch.py | 24 +++++++++++++++--------- salt/salt/files/engines.conf | 1 + 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index abbb632f8d..9f85a07c47 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -26,8 +26,6 @@ def start(fpa, interval=10): df = dataFile.read() for i in fpa: - currentPillarValue = '' - previousPillarValue = '' log.trace("pillarWatch engine: files: %s" % i['files']) log.trace("pillarWatch engine: pillar: %s" % i['pillar']) log.trace("pillarWatch engine: actions: %s" % i['actions']) @@ -36,9 +34,12 @@ def start(fpa, interval=10): actions = i['actions'] # these are the keys that we are going to look for as we traverse the pillarFiles patterns = pillar.split(".") - # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later - patternFound = 0 - for pillarFile in pillarFiles: + # check the pillar files in reveresed order to replicate the same hierarchy as the pillar top file + for pillarFile in reversed(pillarFiles): + currentPillarValue = '' + previousPillarValue = '' + # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later + patternFound = 0 with open(pillarFile, "r") as file: log.info("pillarWatch engine: checking file: %s" % pillarFile) for line in file: @@ -47,7 +48,7 @@ def start(fpa, interval=10): # since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar # ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look # for pipeline. once pipeline is found, it will record the value - if re.search(patterns[patternFound] + ':', line): + if re.search('^' + patterns[patternFound] + ':', line.strip()): # strip the newline because it makes the logs u-g-l-y log.info("pillarWatch engine: found: %s" % line.strip('\n')) patternFound += 1 @@ -70,8 +71,12 @@ def start(fpa, interval=10): else: df += pillar + ': ' + currentPillarValue + '\n' log.trace("pillarWatch engine: df: %s" % df) - # we have found the pillar so we dont need to loop throught the file anymore + # we have found the pillar so we dont need to loop through the file anymore break + # if key and value was found in the first file, then we don't want to look in + # any more files since we use the first file as the source of truth. + if patternFound == len(patterns): + break # if the pillar value changed, then we find what actions we should take log.info("pillarWatch engine: checking if currentPillarValue != previousPillarValue") log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) @@ -111,8 +116,9 @@ def start(fpa, interval=10): for saltModule, args in action.items(): log.info("pillarWatch engine: saltModule: %s" % saltModule) log.info("pillarWatch engine: args: %s" % args) - actionReturn = __salt__[saltModule](**args) - log.info("pillarWatch engine: actionReturn: %s" % actionReturn) + __salt__[saltModule](**args) + #actionReturn = __salt__[saltModule](**args) + #log.info("pillarWatch engine: actionReturn: %s" % actionReturn) dataFile.seek(0) dataFile.write(df) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index c15194eaf0..3066f588cc 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -6,6 +6,7 @@ engines: interval: 60 - pillarWatch: fpa: + # these files will be checked in reversed order to replicate the same hierarchy as the pillar top file - files: - /opt/so/saltstack/local/pillar/global/soc_global.sls - /opt/so/saltstack/local/pillar/global/adv_global.sls From fe81ffaf78929c0d83dc3d465d6153b4c5d66070 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 18 Apr 2024 15:11:22 -0400 Subject: [PATCH 061/222] Variables no longer used. Replaced by map file Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index c856c4f809..b1a31d23f5 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -7,23 +7,6 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_ips_logstash = [] %} -{% set kafka_ips_kraft = [] %} -{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %} -{% set kafka_ip = GLOBALS.node_ip %} - -{# Create list for kafka <-> logstash/searchnode communcations #} -{% for node, node_data in kafkanodes.items() %} -{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %} -{% endfor %} -{% set kafka_server_list = "','".join(kafka_ips_logstash) %} - -{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #} -{% for node, node_data in kafkanodes.items() %} -{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %} -{% endfor %} -{% set kraft_server_list = "','".join(kafka_ips_kraft) %} - include: - ssl From 746128e37b214251716d85702239464e3e3d8a4c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 18 Apr 2024 15:13:29 -0400 Subject: [PATCH 062/222] update so-kafka-clusterid This is a temporary script used to setup kafka secret and clusterid needed for kafka to start. This scripts functionality will be replaced by soup/setup scripts Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/nodes.map.jinja | 2 +- salt/manager/tools/sbin/so-kafka-clusterid | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index 5d74e9e1c0..36f789259f 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -1,5 +1,5 @@ {% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %} -{% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %} +{% set STORED_KAFKANODES = salt['pillar.get']('kafka', {}) %} {% set existing_ids = [] %} diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 7ac0559974..829e4fc87f 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -20,10 +20,10 @@ fi if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafka_cluster_id=$(get_random_value 22) - echo 'kafka: ' > $local_salt_dir/pillar/kafka/soc_kafka.sls echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls +fi if ! grep -q "^ kafkapass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafkapass=$(get_random_value) echo ' kafkapass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls -fi +fi \ No newline at end of file From 4ac04a1a4671d0d2ff3e486801ab4b4f50cebcbb Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 18 Apr 2024 16:46:36 -0400 Subject: [PATCH 063/222] add kafkapass soc annotation Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/soc_kafka.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 8a6c516a9b..500ad59c3b 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -9,6 +9,10 @@ kafka: sensitive: True helpLink: kafka.html config: + kafkapass: + description: The password to use for the Kafka certificates. + sensitive: True + helpLink: kafka.html server: advertised_x_listeners: description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication. From 6c5e0579cf01a106ff16347e7817b40407e46744 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 19 Apr 2024 09:32:32 -0400 Subject: [PATCH 064/222] logging changes. ensure salt master has pillarWatch engine --- salt/salt/engines/master/pillarWatch.py | 35 ++++++++++++------------- salt/salt/files/engines.conf | 16 +++++------ salt/salt/master.sls | 5 ++++ 3 files changed, 30 insertions(+), 26 deletions(-) diff --git a/salt/salt/engines/master/pillarWatch.py b/salt/salt/engines/master/pillarWatch.py index 9f85a07c47..0f6f0ba6a1 100644 --- a/salt/salt/engines/master/pillarWatch.py +++ b/salt/salt/engines/master/pillarWatch.py @@ -7,13 +7,14 @@ import logging import re -import salt.client - log = logging.getLogger(__name__) -local = salt.client.LocalClient() + +# will need this in future versions of this engine +#import salt.client +#local = salt.client.LocalClient() def start(fpa, interval=10): - log.info("pillarWatch engine: checking watched pillars for changes") + log.info("pillarWatch engine: ##### checking watched pillars for changes #####") # try to open the file that stores the previous runs data # if the file doesn't exist, create a blank one @@ -41,7 +42,7 @@ def start(fpa, interval=10): # this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later patternFound = 0 with open(pillarFile, "r") as file: - log.info("pillarWatch engine: checking file: %s" % pillarFile) + log.debug("pillarWatch engine: checking file: %s" % pillarFile) for line in file: log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file)) log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound]) @@ -50,7 +51,7 @@ def start(fpa, interval=10): # for pipeline. once pipeline is found, it will record the value if re.search('^' + patterns[patternFound] + ':', line.strip()): # strip the newline because it makes the logs u-g-l-y - log.info("pillarWatch engine: found: %s" % line.strip('\n')) + log.debug("pillarWatch engine: found: %s" % line.strip('\n')) patternFound += 1 # we have found the final key in the pillar that we are looking for, get the previous value then the current value if patternFound == len(patterns): @@ -62,8 +63,8 @@ def start(fpa, interval=10): if pillar in l: previousPillarValue = str(l.split(":")[1].strip()) currentPillarValue = str(line.split(":")[1]).strip() - log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) - log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) + log.debug("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) + log.debug("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) # if the pillar we are checking for changes has been defined in the dataFile, # replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it. if pillar in df: @@ -78,9 +79,7 @@ def start(fpa, interval=10): if patternFound == len(patterns): break # if the pillar value changed, then we find what actions we should take - log.info("pillarWatch engine: checking if currentPillarValue != previousPillarValue") - log.info("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue)) - log.info("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue)) + log.debug("pillarWatch engine: checking if currentPillarValue != previousPillarValue") if currentPillarValue != previousPillarValue: log.info("pillarWatch engine: currentPillarValue != previousPillarValue: %s != %s" % (currentPillarValue, previousPillarValue)) # check if the previous pillar value is defined in the pillar from -> to actions @@ -108,17 +107,17 @@ def start(fpa, interval=10): # a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard else: ACTIONS=['NO DEFINED ACTION FOR US TO TAKE'] - log.info("pillarWatch engine: all defined actions: %s" % actions['from']) - log.info("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue)) + log.debug("pillarWatch engine: all defined actions: %s" % actions['from']) + log.debug("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue)) for action in ACTIONS: log.info("pillarWatch engine: action: %s" % action) if action != 'NO DEFINED ACTION FOR US TO TAKE': for saltModule, args in action.items(): - log.info("pillarWatch engine: saltModule: %s" % saltModule) - log.info("pillarWatch engine: args: %s" % args) - __salt__[saltModule](**args) - #actionReturn = __salt__[saltModule](**args) - #log.info("pillarWatch engine: actionReturn: %s" % actionReturn) + log.debug("pillarWatch engine: saltModule: %s" % saltModule) + log.debug("pillarWatch engine: args: %s" % args) + #__salt__[saltModule](**args) + actionReturn = __salt__[saltModule](**args) + log.info("pillarWatch engine: actionReturn: %s" % actionReturn) dataFile.seek(0) dataFile.write(df) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 3066f588cc..bee9493eef 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -18,19 +18,19 @@ engines: KAFKA: - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True - - cmd.run: - cmd: salt-call saltutil.kill_all_jobs - - cmd.run: - cmd: salt-call state.highstate & +# - cmd.run: +# cmd: salt-call saltutil.kill_all_jobs +# - cmd.run: +# cmd: salt-call state.highstate & KAFKA: to: '*': - cmd.run: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False - - cmd.run: - cmd: salt-call saltutil.kill_all_jobs - - cmd.run: - cmd: salt-call state.highstate & +# - cmd.run: +# cmd: salt-call saltutil.kill_all_jobs +# - cmd.run: +# cmd: salt-call state.highstate & - files: - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 0a65f3e014..51acca61d7 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -27,6 +27,11 @@ checkmine_engine: - source: salt://salt/engines/master/checkmine.py - makedirs: True +pillarWatch_engine: + file.managed: + - name: /etc/salt/engines/pillarWatch.py + - source: salt://salt/engines/master/pillarWatch.py + engines_config: file.managed: - name: /etc/salt/master.d/engines.conf From 25d63f751676b9fc708d04b4e4a5f60dc0e392e2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 22 Apr 2024 16:42:59 -0400 Subject: [PATCH 065/222] Setup kafka reactor for managing kafka controllers globally Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/controllers.sls | 20 ++++++++++++++++++++ salt/kafka/enabled.sls | 1 + salt/reactor/kafka.sls | 16 ++++++++++++++++ salt/salt/files/reactor.conf | 3 +++ salt/salt/master.sls | 5 +++++ 5 files changed, 45 insertions(+) create mode 100644 salt/kafka/controllers.sls create mode 100644 salt/reactor/kafka.sls create mode 100644 salt/salt/files/reactor.conf diff --git a/salt/kafka/controllers.sls b/salt/kafka/controllers.sls new file mode 100644 index 0000000000..c6df07b0c6 --- /dev/null +++ b/salt/kafka/controllers.sls @@ -0,0 +1,20 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'allowed_states.map.jinja' import allowed_states %} +{% if sls.split('.')[0] in allowed_states %} +{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} + +{% set process_x_roles = salt['pillar.get']('kafka:config:server:process_x_roles', KAFKADEFAULTS.kafka.config.server.process_x_roles, merge=true) %} + +{# Send an event to the salt master at every highstate. Containing the minions process_x_roles. + if no value is set for this minion then the default in kafka/defaults.yaml is used #} +push_event_to_master: + event.send: + - name: kafka/controllers_update + - data: + id: {{ grains['id'] }} + process_x_roles: {{ process_x_roles }} +{% endif %} diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 3c4f548f1f..d05a49a0ec 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -13,6 +13,7 @@ include: {% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} - kafka.nodes {% endif %} + - kafka.controllers - elasticsearch.ca - kafka.sostatus - kafka.config diff --git a/salt/reactor/kafka.sls b/salt/reactor/kafka.sls new file mode 100644 index 0000000000..879fb54310 --- /dev/null +++ b/salt/reactor/kafka.sls @@ -0,0 +1,16 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +https://securityonion.net/license; you may not use this file except in compliance with the +Elastic License 2.0. #} + +{% set minionid = data['id'].split('_')[0] %} +{% set role = data['data']['process_x_roles'] %} + +{# Run so-yaml to replace kafka.node..role with the value from kafka/controllers.sls #} + +update_global_kafka_pillar: + local.cmd.run: + - tgt: 'G@role:so-manager or G@role:so-managersearch or G@role:so-standalone' + - tgt_type: compound + - arg: + - '/usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/nodes.sls kafka.nodes.{{ minionid }}.role {{ role }}' \ No newline at end of file diff --git a/salt/salt/files/reactor.conf b/salt/salt/files/reactor.conf new file mode 100644 index 0000000000..1293055720 --- /dev/null +++ b/salt/salt/files/reactor.conf @@ -0,0 +1,3 @@ +reactor: + - 'kafka/controllers_update': + - salt://reactor/kafka.sls \ No newline at end of file diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 0a65f3e014..76340fb3d4 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -32,6 +32,11 @@ engines_config: - name: /etc/salt/master.d/engines.conf - source: salt://salt/files/engines.conf +reactor_config: + file.managed: + - name: /etc/salt/master.d/reactor.conf + - source: salt://salt/files/reactor.conf + salt_master_service: service.running: - name: salt-master From 5a401af1fddb967a52d1d66d089b6f48746639f7 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 22 Apr 2024 16:44:35 -0400 Subject: [PATCH 066/222] Update kafka process_x_roles annotation Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/soc_kafka.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 500ad59c3b..47ff057191 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -92,8 +92,10 @@ kafka: forcedType: int helpLink: kafka.html process_x_roles: - description: The roles the process performs. Use a comma-seperated list is multiple. + description: The roles performed by Kafka node. Default is to act as 'broker' only. title: process.roles + regex: ^(broker|controller|broker,controller|controller,broker)$ + regexFailureMessage: Valid values include 'broker' 'controller' or 'broker,controller' helpLink: kafka.html socket_x_receive_x_buffer_x_bytes: description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default. From aa0c589361f3719173d264d5aa7856f4ac66fcbd Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 23 Apr 2024 13:51:12 -0400 Subject: [PATCH 067/222] Update kafka managed node pillar template to include its process.role Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/files/managed_node_pillar.jinja | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/kafka/files/managed_node_pillar.jinja b/salt/kafka/files/managed_node_pillar.jinja index fb2ef410ec..aa4f7e5023 100644 --- a/salt/kafka/files/managed_node_pillar.jinja +++ b/salt/kafka/files/managed_node_pillar.jinja @@ -4,4 +4,7 @@ kafka: {{ node }}: ip: {{ values['ip'] }} nodeid: {{ values['nodeid'] }} +{%- if values['role'] != none %} + role: {{ values['role'] }} +{%- endif %} {% endfor %} \ No newline at end of file From 36573d6005a40ccd7b83fe32087a453f0abb5a48 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 23 Apr 2024 16:45:36 -0400 Subject: [PATCH 068/222] Update kafka cert permissions Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/ssl/init.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 853afb2b3d..854628949a 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -856,10 +856,10 @@ kafka_logstash_pkcs12_perms: - user: 960 - group: 931 -kafka_pkcs8_perms: +elasticfleet_kafka_pkcs8_perms: file.managed: - replace: False - - name: /etc/pki/kafka.p8 + - name: /etc/pki/elasticfleet-kafka.p8 - mode: 640 - user: 960 - group: 939 From 29c964cca12cbaa1e2cb5e2e0a5b066c9953ccab Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 09:04:52 -0400 Subject: [PATCH 069/222] Set kafka.nodes state to run first to populate kafka.nodes pillar Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/init.sls | 4 ++++ salt/kafka/nodes.sls | 10 ++++------ 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index acedba3c36..c4351ebfc0 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -7,6 +7,10 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} include: +{# Run kafka/nodes.sls before Kafka is enabled, so kafka nodes pillar is setup #} +{% if grains.role in ['so-manager','so-managersearch', 'so-standalone'] %} + - kafka.nodes +{% endif %} {% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %} - kafka.enabled {% else %} diff --git a/salt/kafka/nodes.sls b/salt/kafka/nodes.sls index 5085c6ccab..edc5f07018 100644 --- a/salt/kafka/nodes.sls +++ b/salt/kafka/nodes.sls @@ -2,11 +2,10 @@ # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% if GLOBALS.pipeline == "KAFKA" %} -{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} -{# Store kafka pillar in a file rather than memory where values could be lost. Kafka does not support nodeid's changing #} +{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} + +{# Write Kafka pillar, so all grid members have access to nodeid of other kafka nodes and their roles #} write_kafka_pillar_yaml: file.managed: - name: /opt/so/saltstack/local/pillar/kafka/nodes.sls @@ -15,5 +14,4 @@ write_kafka_pillar_yaml: - source: salt://kafka/files/managed_node_pillar.jinja - template: jinja - context: - COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} -{% endif %} \ No newline at end of file + COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} \ No newline at end of file From 086ebe1a7c0d3b274e5824d4a352e50904a69b1e Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 09:08:14 -0400 Subject: [PATCH 070/222] Split kafka defaults between broker / controller Setup config.map.jinja to update broker / controller / combined node types Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 66 +++++++++++++++++++++++++ salt/kafka/defaults.yaml | 21 ++++++-- salt/kafka/enabled.sls | 5 +- salt/kafka/etc/server.properties.jinja | 4 +- salt/kafka/map.jinja | 16 ++---- salt/kafka/soc_kafka.yaml | 67 +++++++++++++++++--------- 6 files changed, 134 insertions(+), 45 deletions(-) create mode 100644 salt/kafka/config.map.jinja diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja new file mode 100644 index 0000000000..8f116c02e2 --- /dev/null +++ b/salt/kafka/config.map.jinja @@ -0,0 +1,66 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} +{% from 'kafka/map.jinja' import KAFKAMERGED %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %} + +{# Create list of KRaft controllers #} +{% set controllers = [] %} +{% for node, values in KAFKA_NODES_PILLAR.items() %} +{% if 'controller' in values['role'] %} +{% do controllers.append(values.nodeid ~ "@" ~ node ~ ":9093") %} +{% endif %} +{% endfor %} + +{% set kafka_controller_quorum_voters = ','.join(controllers) %} + +{# By default all Kafka eligible nodes are given the role of broker, except for + grid MANAGER (broker,controller) until overridden through SOC UI #} +{% set node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %} + +{# Generate server.properties for 'broker' , 'controller', 'broker,controller' node types + anything above this line is a configuration needed for ALL Kafka nodes #} +{% if node_type == 'broker' %} +{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %} +{% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %} +{% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %} + +{% endif %} + +{% if node_type == 'controller' %} +{% do KAFKAMERGED.config.controller.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %} +{% do KAFKAMERGED.config.controller.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %} + +{% endif %} + +{# Kafka nodes of this type are not recommended for use outside of development / testing. #} +{% if node_type == 'broker,controller' %} +{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %} +{% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %} +{% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %} +{% do KAFKAMERGED.config.broker.update({'process_x_roles': 'broker,controller' }) %} + +{% do KAFKAMERGED.config.broker.update({ + 'listeners': + KAFKAMERGED.config.broker.listeners + + ',' + + KAFKAMERGED.config.controller.listeners }) + %} + +{% do KAFKAMERGED.config.broker.update({ + 'listener_x_security_x_protocol_x_map': + KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map + + ',' + + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map }) + %} + +{% endif %} + +{% if 'broker' in node_type %} +{% set KAFKACONFIG = KAFKAMERGED.config.broker %} +{% else %} +{% set KAFKACONFIG = KAFKAMERGED.config.controller %} +{% endif %} \ No newline at end of file diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 91f55a07dc..8dcd70b981 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -1,14 +1,16 @@ kafka: enabled: False + cluster_id: + kafka_pass: + kafka_controllers: [] config: - server: + broker: advertised_x_listeners: auto_x_create_x_topics_x_enable: true - controller_x_listener_x_names: CONTROLLER controller_x_quorum_x_voters: inter_x_broker_x_listener_x_name: BROKER - listeners: BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 - listener_x_security_x_protocol_x_map: CONTROLLER:SSL,BROKER:SSL + listeners: BROKER://0.0.0.0:9092 + listener_x_security_x_protocol_x_map: BROKER:SSL log_x_dirs: /nsm/kafka/data log_x_retention_x_check_x_interval_x_ms: 300000 log_x_retention_x_hours: 168 @@ -37,3 +39,14 @@ kafka: ssl_x_keystore_x_location: /etc/pki/kafka.jks ssl_x_keystore_x_type: JKS ssl_x_keystore_x_password: changeit + controller: + controller_x_listener_x_names: CONTROLLER + controller_x_quorum_x_voters: + listeners: CONTROLLER://0.0.0.0:9093 + listener_x_security_x_protocol_x_map: CONTROLLER:SSL + log_x_dirs: /nsm/kafka/data + log_x_retention_x_check_x_interval_x_ms: 300000 + log_x_retention_x_hours: 168 + log_x_segment_x_bytes: 1073741824 + node_x_id: + process_x_roles: controller \ No newline at end of file diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index d05a49a0ec..ec2dc8e465 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -7,12 +7,9 @@ {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} -{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES as KAFKANODES %} +{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %} include: - {% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} - - kafka.nodes - {% endif %} - kafka.controllers - elasticsearch.ca - kafka.sostatus diff --git a/salt/kafka/etc/server.properties.jinja b/salt/kafka/etc/server.properties.jinja index df5632ba95..fb0c785cf4 100644 --- a/salt/kafka/etc/server.properties.jinja +++ b/salt/kafka/etc/server.properties.jinja @@ -3,5 +3,5 @@ https://securityonion.net/license; you may not use this file except in compliance with the Elastic License 2.0. #} -{% from 'kafka/map.jinja' import KAFKAMERGED -%} -{{ KAFKAMERGED.config.server | yaml(False) | replace("_x_", ".") }} +{% from 'kafka/config.map.jinja' import KAFKACONFIG -%} +{{ KAFKACONFIG | yaml(False) | replace("_x_", ".") }} diff --git a/salt/kafka/map.jinja b/salt/kafka/map.jinja index 56f85144a5..996e5dedfc 100644 --- a/salt/kafka/map.jinja +++ b/salt/kafka/map.jinja @@ -3,18 +3,8 @@ https://securityonion.net/license; you may not use this file except in compliance with the Elastic License 2.0. #} +{# This is only used to determine if Kafka is enabled / disabled. Configuration is found in kafka/config.map.jinja #} +{# kafka/config.map.jinja depends on there being a kafka nodes pillar being populated #} + {% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} {% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %} -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} - -{% do KAFKAMERGED.config.server.update({ 'node_x_id': salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid')}) %} -{% do KAFKAMERGED.config.server.update({'advertised_x_listeners': 'BROKER://' ~ GLOBALS.node_ip ~ ':9092'}) %} - -{% set combined = [] %} -{% for hostname, data in COMBINED_KAFKANODES.items() %} - {% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %} -{% endfor %} -{% set kraft_controller_quorum_voters = ','.join(combined) %} - -{% do KAFKAMERGED.config.server.update({'controller_x_quorum_x_voters': kraft_controller_quorum_voters}) %} diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 47ff057191..2216aaaa7e 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -8,12 +8,15 @@ kafka: advanced: True sensitive: True helpLink: kafka.html + kafkapass: + description: The password to use for the Kafka certificates. + sensitive: True + helpLink: kafka.html + kafka_controllers: + description: A list of Security Onion grid members that should act as KRaft controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. All other nodes will default to broker roles. + helpLink: kafka.html config: - kafkapass: - description: The password to use for the Kafka certificates. - sensitive: True - helpLink: kafka.html - server: + broker: advertised_x_listeners: description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication. title: advertised.listeners @@ -23,14 +26,6 @@ kafka: title: auto.create.topics.enable forcedType: bool helpLink: kafka.html - controller_x_listener_x_names: - description: Set listeners used by the controller in a comma-seperated list. - title: controller.listener.names - helpLink: kafka.html - controller_x_quorum_x_voters: - description: A comma-seperated list of ID and endpoint information mapped for a set of voters. - title: controller.quorum.voters - helpLink: kafka.html inter_x_broker_x_listener_x_name: description: The name of the listener used for inter-broker communication. title: inter.broker.listener.name @@ -60,12 +55,6 @@ kafka: title: log.segment.bytes forcedType: int helpLink: kafka.html - node_x_id: - description: The node ID corresponds to the roles performed by this process whenever process.roles is populated. - title: node.id - forcedType: int - readonly: True - helpLink: kafka.html num_x_io_x_threads: description: The number of threads used by Kafka. title: num.io.threads @@ -92,10 +81,9 @@ kafka: forcedType: int helpLink: kafka.html process_x_roles: - description: The roles performed by Kafka node. Default is to act as 'broker' only. + description: The role performed by Kafka brokers. title: process.roles - regex: ^(broker|controller|broker,controller|controller,broker)$ - regexFailureMessage: Valid values include 'broker' 'controller' or 'broker,controller' + readonly: True helpLink: kafka.html socket_x_receive_x_buffer_x_bytes: description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default. @@ -174,3 +162,38 @@ kafka: title: ssl.truststore.password sensitive: True helpLink: kafka.html + controller: + controller_x_listener_x_names: + description: Set listeners used by the controller in a comma-seperated list. + title: controller.listener.names + helpLink: kafka.html + listeners: + description: Set of URIs that is listened on and the listener names in a comma-seperated list. + helpLink: kafka.html + listener_x_security_x_protocol_x_map: + description: Comma-seperated mapping of listener name and security protocols. + title: listener.security.protocol.map + helpLink: kafka.html + log_x_dirs: + description: Where Kafka logs are stored within the Docker container. + title: log.dirs + helpLink: kafka.html + log_x_retention_x_check_x_interval_x_ms: + description: Frequency at which log files are checked if they are qualified for deletion. + title: log.retention.check.interval.ms + helpLink: kafka.html + log_x_retention_x_hours: + description: How long, in hours, a log file is kept. + title: log.retention.hours + forcedType: int + helpLink: kafka.html + log_x_segment_x_bytes: + description: The maximum allowable size for a log file. + title: log.segment.bytes + forcedType: int + helpLink: kafka.html + process_x_roles: + description: The role performed by KRaft controller node. + title: process.roles + readonly: True + helpLink: kafka.html \ No newline at end of file From 529c8d7cf21bae87fd6d81d12327baed0ad600af Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 11:35:46 -0400 Subject: [PATCH 071/222] Remove salt reactor for Kafka Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/controllers.sls | 20 -------------------- salt/salt/files/reactor.conf | 3 --- salt/salt/master.sls | 5 ----- 3 files changed, 28 deletions(-) delete mode 100644 salt/kafka/controllers.sls delete mode 100644 salt/salt/files/reactor.conf diff --git a/salt/kafka/controllers.sls b/salt/kafka/controllers.sls deleted file mode 100644 index c6df07b0c6..0000000000 --- a/salt/kafka/controllers.sls +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls.split('.')[0] in allowed_states %} -{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} - -{% set process_x_roles = salt['pillar.get']('kafka:config:server:process_x_roles', KAFKADEFAULTS.kafka.config.server.process_x_roles, merge=true) %} - -{# Send an event to the salt master at every highstate. Containing the minions process_x_roles. - if no value is set for this minion then the default in kafka/defaults.yaml is used #} -push_event_to_master: - event.send: - - name: kafka/controllers_update - - data: - id: {{ grains['id'] }} - process_x_roles: {{ process_x_roles }} -{% endif %} diff --git a/salt/salt/files/reactor.conf b/salt/salt/files/reactor.conf deleted file mode 100644 index 1293055720..0000000000 --- a/salt/salt/files/reactor.conf +++ /dev/null @@ -1,3 +0,0 @@ -reactor: - - 'kafka/controllers_update': - - salt://reactor/kafka.sls \ No newline at end of file diff --git a/salt/salt/master.sls b/salt/salt/master.sls index d28f0b1bdb..51acca61d7 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -37,11 +37,6 @@ engines_config: - name: /etc/salt/master.d/engines.conf - source: salt://salt/files/engines.conf -reactor_config: - file.managed: - - name: /etc/salt/master.d/reactor.conf - - source: salt://salt/files/reactor.conf - salt_master_service: service.running: - name: salt-master From fd9a91420db18527e82940b6fbceb821a94d09a2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 11:37:24 -0400 Subject: [PATCH 072/222] Use SOC UI to configure list of KRaft (Kafka) controllers for cluster Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 32 +++++++++++++++++------------ salt/kafka/enabled.sls | 1 - salt/kafka/nodes.map.jinja | 40 ++++++++++++++++++++++++++----------- 3 files changed, 47 insertions(+), 26 deletions(-) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index 8f116c02e2..c9f3e79e23 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -7,13 +7,24 @@ {% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %} +{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:kafka_controllers', default=None) %} + {# Create list of KRaft controllers #} {% set controllers = [] %} -{% for node, values in KAFKA_NODES_PILLAR.items() %} -{% if 'controller' in values['role'] %} -{% do controllers.append(values.nodeid ~ "@" ~ node ~ ":9093") %} -{% endif %} -{% endfor %} + +{% if KAFKA_CONTROLLERS_PILLAR != none %} +{% for node in KAFKA_CONTROLLERS_PILLAR %} +{# Check that the user input for kafka_controllers pillar exists as a kafka:node value #} +{% if node in KAFKA_NODES_PILLAR %} +{% do controllers.append(KAFKA_NODES_PILLAR[node]['nodeid'] ~ '@' ~ node ~ ':9093') %} +{% endif %} +{% endfor %} +{% endif %} +{# Ensure in the event that the SOC controllers pillar has a single hostname and that hostname doesn't exist in kafka:nodes + that a controller is still set for the Kafka cluster. Defaulting to the grid manager #} +{% if controllers | length < 1 %} +{% do controllers.append(KAFKA_NODES_PILLAR[GLOBALS.manager]['nodeid'] ~ "@" ~ GLOBALS.manager ~ ":9093") %} +{% endif %} {% set kafka_controller_quorum_voters = ','.join(controllers) %} @@ -44,17 +55,12 @@ {% do KAFKAMERGED.config.broker.update({'process_x_roles': 'broker,controller' }) %} {% do KAFKAMERGED.config.broker.update({ - 'listeners': - KAFKAMERGED.config.broker.listeners - + ',' - + KAFKAMERGED.config.controller.listeners }) + 'listeners': KAFKAMERGED.config.broker.listeners + ',' + KAFKAMERGED.config.controller.listeners }) %} {% do KAFKAMERGED.config.broker.update({ - 'listener_x_security_x_protocol_x_map': - KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map - + ',' - + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map }) + 'listener_x_security_x_protocol_x_map': KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map + + ',' + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map }) %} {% endif %} diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index ec2dc8e465..78e0d87d9d 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -10,7 +10,6 @@ {% set KAFKANODES = salt['pillar.get']('kafka:nodes') %} include: - - kafka.controllers - elasticsearch.ca - kafka.sostatus - kafka.config diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index 36f789259f..9b4979e92d 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -1,12 +1,27 @@ -{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %} -{% set STORED_KAFKANODES = salt['pillar.get']('kafka', {}) %} +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + +{# USED TO GENERATE PILLAR/KAFKA/NODES.SLS. #} +{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %} +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{% set process_x_roles = KAFKADEFAULTS.kafka.config.broker.process_x_roles %} + +{% set current_kafkanodes = salt.saltutil.runner( + 'mine.get', + tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', + fun='network.ip_addrs', + tgt_type='compound') %} + +{% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', default=None) %} {% set existing_ids = [] %} {# Check STORED_KAFKANODES for existing kafka nodes and pull their IDs so they are not reused across the grid #} -{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} #} {% if STORED_KAFKANODES != none %} -{% for node, values in STORED_KAFKANODES.nodes.items() %} +{% for node, values in STORED_KAFKANODES.items() %} {% if values.get('nodeid') %} {% do existing_ids.append(values['nodeid']) %} {% endif %} @@ -16,7 +31,6 @@ {# Create list of possible node ids #} {% set all_possible_ids = range(1, 65536)|list %} -{# Don't like the below loop because the higher the range for all_possible_ids the more time spent on loop #} {# Create list of available node ids by looping through all_possible_ids and ensuring it isn't in existing_ids #} {% set available_ids = [] %} {% for id in all_possible_ids %} @@ -29,14 +43,17 @@ {% set NEW_KAFKANODES = {} %} {% for minionid, ip in current_kafkanodes.items() %} {% set hostname = minionid.split('_')[0] %} -{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 and hostname not in STORED_KAFKANODES.nodes %} #} -{% if STORED_KAFKANODES != none and hostname not in STORED_KAFKANODES.nodes %} +{# Override the default process_x_roles for manager and set to 'broker,controller'. Changes from SOC UI will overwrite this #} +{% if hostname == GLOBALS.manager %} +{% set process_x_roles = 'broker,controller' %} +{% endif %} +{% if STORED_KAFKANODES != none and hostname not in STORED_KAFKANODES.items() %} {% set new_id = available_ids.pop(0) %} -{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} +{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0], 'role': process_x_roles }}) %} {% endif %} -{% if hostname not in NEW_KAFKANODES %} +{% if hostname not in NEW_KAFKANODES.items() %} {% set new_id = available_ids.pop(0) %} -{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %} +{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0], 'role': process_x_roles }}) %} {% endif %} {% endfor %} @@ -45,9 +62,8 @@ {% for node, details in NEW_KAFKANODES.items() %} {% do COMBINED_KAFKANODES.update({node: details}) %} {% endfor %} -{# {% if STORED_KAFKANODES.get('nodes', {}).items() | length > 0 %} #} {% if STORED_KAFKANODES != none %} -{% for node, details in STORED_KAFKANODES.nodes.items() %} +{% for node, details in STORED_KAFKANODES.items() %} {% do COMBINED_KAFKANODES.update({node: details}) %} {% endfor %} {% endif %} From 11055b1d32d0b5a5b7f569d00f1c33eed6cf0f23 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:09:09 -0400 Subject: [PATCH 073/222] Rename kafkapass -> kafka_pass Run so-kafka-clusterid within nodes.sls state so switchover is consistent Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/nodes.sls | 10 +++++++++- salt/kafka/soc_kafka.yaml | 2 +- salt/kafka/storage.sls | 10 +--------- salt/manager/tools/sbin/so-kafka-clusterid | 10 +++++----- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/salt/kafka/nodes.sls b/salt/kafka/nodes.sls index edc5f07018..7cafb10bc4 100644 --- a/salt/kafka/nodes.sls +++ b/salt/kafka/nodes.sls @@ -4,6 +4,7 @@ # Elastic License 2.0. {% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %} +{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %} {# Write Kafka pillar, so all grid members have access to nodeid of other kafka nodes and their roles #} write_kafka_pillar_yaml: @@ -14,4 +15,11 @@ write_kafka_pillar_yaml: - source: salt://kafka/files/managed_node_pillar.jinja - template: jinja - context: - COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} \ No newline at end of file + COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} + + +{% if kafka_cluster_id is none %} +generate_kafka_cluster_id: + cmd.run: + - name: /usr/sbin/so-kafka-clusterid +{% endif %} \ No newline at end of file diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 2216aaaa7e..505469d6b4 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -8,7 +8,7 @@ kafka: advanced: True sensitive: True helpLink: kafka.html - kafkapass: + kafka_pass: description: The password to use for the Kafka certificates. sensitive: True helpLink: kafka.html diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index fbb7c73280..507c199c60 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -6,15 +6,7 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} -{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %} - -{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %} -{% if kafka_cluster_id is none %} -generate_kafka_cluster_id: - cmd.run: - - name: /usr/sbin/so-kafka-clusterid -{% endif %} -{% endif %} +{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id') %} {# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #} {% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %} diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index 829e4fc87f..eb0701b8bc 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -18,12 +18,12 @@ else source $(dirname $0)/../../../common/tools/sbin/so-common fi -if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then +if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls fi -if ! grep -q "^ kafkapass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then +if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafkapass=$(get_random_value) - echo ' kafkapass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls -fi \ No newline at end of file + echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls +fit \ No newline at end of file From 529bc01d6934acc31594779db68fe0b56913024f Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 14:53:52 -0400 Subject: [PATCH 074/222] Add missing configuration for nodes running Kafka broker role only Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index c9f3e79e23..4e82eac422 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -39,6 +39,12 @@ {% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %} {% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %} +{# Nodes with only the 'broker' role need to have the below settings for communicating with controller nodes #} +{% do KAFKAMERGED.config.broker.update({'controller_x_listener_x_names': KAFKAMERGED.config.controller.controller_x_listener_x_names }) %} +{% do KAFKAMERGED.config.broker.update({ + 'listener_x_security_x_protocol_x_map': KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map + + ',' + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map }) + %} {% endif %} {% if node_type == 'controller' %} @@ -50,6 +56,7 @@ {# Kafka nodes of this type are not recommended for use outside of development / testing. #} {% if node_type == 'broker,controller' %} {% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %} +{% do KAFKAMERGED.config.broker.update({'controller_x_listener_x_names': KAFKAMERGED.config.controller.controller_x_listener_x_names }) %} {% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %} {% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %} {% do KAFKAMERGED.config.broker.update({'process_x_roles': 'broker,controller' }) %} From a6e8b25969c01439339f16a3f4f2d88ba7d4cb55 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 29 Apr 2024 15:48:57 -0400 Subject: [PATCH 075/222] Add Kafka connectivity between manager - > receiver nodes. Add connectivity to Kafka between other node types that may need to publish to Kafka. Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/firewall/defaults.yaml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 0b6d06eda3..6dd3fead3a 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -405,7 +405,6 @@ firewall: - docker_registry - influxdb - sensoroni - - kafka searchnode: portgroups: - redis @@ -433,6 +432,7 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + - kafka receiver: portgroups: - yum @@ -442,6 +442,7 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + - kafka analyst: portgroups: - nginx @@ -566,6 +567,7 @@ firewall: - elastic_agent_update - localrules - sensoroni + - kafka fleet: portgroups: - elasticsearch_rest @@ -613,6 +615,7 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + - kafka heavynode: portgroups: - redis @@ -625,6 +628,7 @@ firewall: - elastic_agent_data - elastic_agent_update - sensoroni + - kafka receiver: portgroups: - yum @@ -761,7 +765,7 @@ firewall: - beats_5044 - beats_5644 - beats_5056 - - redis + - kafka - elasticsearch_node - elastic_agent_control - elastic_agent_data @@ -813,6 +817,7 @@ firewall: - redis - elasticsearch_rest - elasticsearch_node + - kafka heavynode: portgroups: - docker_registry @@ -822,6 +827,7 @@ firewall: - redis - elasticsearch_rest - elasticsearch_node + - kafka receiver: portgroups: - yum @@ -1289,6 +1295,9 @@ firewall: - redis - beats_5644 - kafka + manager: + portgroups: + - kafka managersearch: portgroups: - redis From 9c83a52c6d22db4a3508e6fafe19e0e61b635da7 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:01:31 -0400 Subject: [PATCH 076/222] Add Kafka output to elastic-fleet setup. Includes separating topics by event.module with fallback to default-logs if no event.module is specified or doesn't match processors Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../tools/sbin_jinja/so-elastic-fleet-setup | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index 361469b26e..e01360687a 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -77,6 +77,23 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl printf "\n\n" {%- endif %} +printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n" +{% if grains.role not in ['so-import', 'so-eval'] %} +KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) +KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) +{# KAFKACA=$(openssl x509 -in $INTCA) #} +KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) +KAFKA_OUTPUT_VERSION="2.6.0" +JSON_STRING=$( jq -n \ + --arg KAFKACRT "$KAFKACRT" \ + --arg KAFKAKEY "$KAFKAKEY" \ + --arg KAFKACA "$KAFKACA" \ + --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ + '{ "name": "grid_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics": [ { "topic": "zeek-logs", "when": { "type": "equals", "condition": "event.module:zeek" } }, { "topic": "suricata-logs", "when": { "type": "equals", "condition": "event.module:suricata" } }, { "topic": "strelka-logs", "when": { "type": "equals", "condition": "event.module:strelka" } }, { "topic": "opencanary-logs", "when": { "type": "equals", "condition": "event.module:opencanary" } }, { "topic": "system-logs", "when": { "type": "equals", "condition": "event.module:system" } }, { "topic": "kratos-logs", "when": { "type": "equals", "condition": "event.module:kratos" } }, { "topic": "soc-logs", "when": { "type": "equals", "condition": "event.module:soc" } }, { "topic": "rita-logs", "when": { "type": "equals", "condition": "event.module:rita" } }, { "topic": "default-logs" } ], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' + ) +curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" +{% endif %} + # Add Manager Hostname & URL Base to Fleet Host URLs printf "\nAdd SO-Manager Fleet URL\n" if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then From fcc4050f86da9c09a76d74bf40981a3f86abc632 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 30 Apr 2024 12:59:53 -0400 Subject: [PATCH 077/222] Add id to grid-kafka fleet output policy Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index e01360687a..14ab58b45d 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -89,7 +89,7 @@ JSON_STRING=$( jq -n \ --arg KAFKAKEY "$KAFKAKEY" \ --arg KAFKACA "$KAFKACA" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ - '{ "name": "grid_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics": [ { "topic": "zeek-logs", "when": { "type": "equals", "condition": "event.module:zeek" } }, { "topic": "suricata-logs", "when": { "type": "equals", "condition": "event.module:suricata" } }, { "topic": "strelka-logs", "when": { "type": "equals", "condition": "event.module:strelka" } }, { "topic": "opencanary-logs", "when": { "type": "equals", "condition": "event.module:opencanary" } }, { "topic": "system-logs", "when": { "type": "equals", "condition": "event.module:system" } }, { "topic": "kratos-logs", "when": { "type": "equals", "condition": "event.module:kratos" } }, { "topic": "soc-logs", "when": { "type": "equals", "condition": "event.module:soc" } }, { "topic": "rita-logs", "when": { "type": "equals", "condition": "event.module:rita" } }, { "topic": "default-logs" } ], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' + '{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics": [ { "topic": "zeek-logs", "when": { "type": "equals", "condition": "event.module:zeek" } }, { "topic": "suricata-logs", "when": { "type": "equals", "condition": "event.module:suricata" } }, { "topic": "strelka-logs", "when": { "type": "equals", "condition": "event.module:strelka" } }, { "topic": "opencanary-logs", "when": { "type": "equals", "condition": "event.module:opencanary" } }, { "topic": "system-logs", "when": { "type": "equals", "condition": "event.module:system" } }, { "topic": "kratos-logs", "when": { "type": "equals", "condition": "event.module:kratos" } }, { "topic": "soc-logs", "when": { "type": "equals", "condition": "event.module:soc" } }, { "topic": "rita-logs", "when": { "type": "equals", "condition": "event.module:rita" } }, { "topic": "default-logs" } ], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' ) curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" {% endif %} From bb49944b9613402cffdfa9ab0ebe4a88a5f6dc5d Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 30 Apr 2024 16:47:40 -0400 Subject: [PATCH 078/222] Setup elastic fleet rollover from logstash -> kafka output policy Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../so-elastic-fleet-outputs-update | 152 +++++++++++------- .../tools/sbin_jinja/so-elastic-fleet-setup | 3 +- 2 files changed, 99 insertions(+), 56 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index eb5ccc1ed4..4d2867fc7c 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -21,64 +21,104 @@ function update_logstash_outputs() { # Update Logstash Outputs curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq } +function update_kafka_outputs() { + # Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup + SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl') + + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --argjson SSL_CONFIG "$SSL_CONFIG" \ + '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}') + # Update Kafka outputs + curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq +} -# Get current list of Logstash Outputs -RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash') - -# Check to make sure that the server responded with good data - else, bail from script -CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") -if [ "$CHECKSUM" != "so-manager_logstash" ]; then - printf "Failed to query for current Logstash Outputs..." - exit 1 -fi - -# Get the current list of Logstash outputs & hash them -CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") -CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') - -declare -a NEW_LIST=() +{% if GLOBALS.pipeline == "KAFKA" %} + # Get current list of Kafka Outputs + RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka') + + # Check to make sure that the server responded with good data - else, bail from script + CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") + if [ "$CHECKSUM" != "so-manager_kafka" ]; then + printf "Failed to query for current Kafka Outputs..." + exit 1 + fi + + # Get the current list of kafka outputs & hash them + CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") + CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + + declare -a NEW_LIST=() + + # Query for the current Grid Nodes that are running kafka + KAFKANODES=$(salt-call --out=json pillar.get kafka:nodes | jq '.local') + + # Query for Kafka nodes with Broker role and add hostname to list + while IFS= read -r line; do + NEW_LIST+=("$line") + done < <(jq -r 'to_entries | .[] | select(.value.role | contains("broker")) | .key + ":9092"' <<< $KAFKANODES) + + {# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #} +{% else %} + # Get current list of Logstash Outputs + RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash') + + # Check to make sure that the server responded with good data - else, bail from script + CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON") + if [ "$CHECKSUM" != "so-manager_logstash" ]; then + printf "Failed to query for current Logstash Outputs..." + exit 1 + fi + + # Get the current list of Logstash outputs & hash them + CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") + CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + + declare -a NEW_LIST=() + + {# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #} + {% if ELASTICFLEETMERGED.enable_manager_output %} + # Create array & add initial elements + if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then + NEW_LIST+=("{{ GLOBALS.url_base }}:5055") + else + NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055") + fi + {% endif %} + + # Query for FQDN entries & add them to the list + {% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %} + CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}') + readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST") + for CUSTOMNAME in "${CUSTOMFQDN[@]}" + do + NEW_LIST+=("$CUSTOMNAME:5055") + done + {% endif %} + + # Query for the current Grid Nodes that are running Logstash + LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local') + + # Query for Receiver Nodes & add them to the list + if grep -q "receiver" <<< $LOGSTASHNODES; then + readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES) + for NODE in "${RECEIVERNODES[@]}" + do + NEW_LIST+=("$NODE:5055") + done + fi + + # Query for Fleet Nodes & add them to the list + if grep -q "fleet" <<< $LOGSTASHNODES; then + readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES) + for NODE in "${FLEETNODES[@]}" + do + NEW_LIST+=("$NODE:5055") + done + fi -{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #} -{% if ELASTICFLEETMERGED.enable_manager_output %} -# Create array & add initial elements -if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then - NEW_LIST+=("{{ GLOBALS.url_base }}:5055") -else - NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055") -fi {% endif %} -# Query for FQDN entries & add them to the list -{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %} -CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}') -readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST") -for CUSTOMNAME in "${CUSTOMFQDN[@]}" -do - NEW_LIST+=("$CUSTOMNAME:5055") -done -{% endif %} - -# Query for the current Grid Nodes that are running Logstash -LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local') - -# Query for Receiver Nodes & add them to the list -if grep -q "receiver" <<< $LOGSTASHNODES; then - readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES) - for NODE in "${RECEIVERNODES[@]}" - do - NEW_LIST+=("$NODE:5055") - done -fi - -# Query for Fleet Nodes & add them to the list -if grep -q "fleet" <<< $LOGSTASHNODES; then - readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES) - for NODE in "${FLEETNODES[@]}" - do - NEW_LIST+=("$NODE:5055") - done -fi - # Sort & hash the new list of Logstash Outputs NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') @@ -91,5 +131,9 @@ if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then else printf "\nHashes don't match - update needed.\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" +{% if GLOBALS.pipeline == "KAFKA" %} + update_kafka_outputs +{% else %} update_logstash_outputs +{% endif %} fi diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index 14ab58b45d..acaec360b5 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -81,8 +81,7 @@ printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n" {% if grains.role not in ['so-import', 'so-eval'] %} KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) -{# KAFKACA=$(openssl x509 -in $INTCA) #} -KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) +KAFKACA=$(openssl x509 -in $INTCA) KAFKA_OUTPUT_VERSION="2.6.0" JSON_STRING=$( jq -n \ --arg KAFKACRT "$KAFKACRT" \ From cef9bb148725e980933325ee86f294512438b487 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 09:16:13 -0400 Subject: [PATCH 079/222] Dynamically create Kafka topics based on event.module from elastic agent logs eg. zeek-topic. Depends on Kafka brokers having auto.create.topics.enable set to true Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index acaec360b5..aacc3ebc8d 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -88,7 +88,7 @@ JSON_STRING=$( jq -n \ --arg KAFKAKEY "$KAFKAKEY" \ --arg KAFKACA "$KAFKACA" \ --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ - '{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics": [ { "topic": "zeek-logs", "when": { "type": "equals", "condition": "event.module:zeek" } }, { "topic": "suricata-logs", "when": { "type": "equals", "condition": "event.module:suricata" } }, { "topic": "strelka-logs", "when": { "type": "equals", "condition": "event.module:strelka" } }, { "topic": "opencanary-logs", "when": { "type": "equals", "condition": "event.module:opencanary" } }, { "topic": "system-logs", "when": { "type": "equals", "condition": "event.module:system" } }, { "topic": "kratos-logs", "when": { "type": "equals", "condition": "event.module:kratos" } }, { "topic": "soc-logs", "when": { "type": "equals", "condition": "event.module:soc" } }, { "topic": "rita-logs", "when": { "type": "equals", "condition": "event.module:rita" } }, { "topic": "default-logs" } ], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' + '{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-topic","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-topic"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' ) curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" {% endif %} From eb1249618b995a83c02b2b56692703019fe3532f Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 09:27:01 -0400 Subject: [PATCH 080/222] Update soup for Kafka Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/so-kafka-clusterid | 2 +- salt/manager/tools/sbin/soup | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid index eb0701b8bc..c4e449448c 100644 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ b/salt/manager/tools/sbin/so-kafka-clusterid @@ -26,4 +26,4 @@ fi if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafkapass=$(get_random_value) echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls -fit \ No newline at end of file +fi \ No newline at end of file diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index a6f9032a5b..abde1ed0a3 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -448,13 +448,13 @@ post_to_2.4.70() { touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls - if ! grep -q "^ certpass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafkapass=$(get_random_value) - echo ' certpass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls fi POSTVERSION=2.4.70 From 63f3e23e2b00af48a3e3789657ed1c3feee488be Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 09:54:19 -0400 Subject: [PATCH 081/222] soup typo Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index abde1ed0a3..16f4d90761 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -451,6 +451,7 @@ post_to_2.4.70() { if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafka_cluster_id=$(get_random_value 22) echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + fi if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then kafkapass=$(get_random_value) From 6b60e85a337f11ac8559740ff90bae3a43d0507a Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 10:15:26 -0400 Subject: [PATCH 082/222] Make kafka configuration changes prior to 2.4.70 upgrade Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 41 ++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 16f4d90761..6e48207171 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -438,26 +438,7 @@ post_to_2.4.60() { } post_to_2.4.70() { - # Global pipeline changes to REDIS or KAFKA - echo "Removing global.pipeline pillar configuration" - sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls - - # Kafka configuration - mkdir -p /opt/so/saltstack/local/pillar/kafka - touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls - echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - - if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls - fi - - if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafkapass=$(get_random_value) - echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls - fi - + echo "Nothing to apply" POSTVERSION=2.4.70 } @@ -603,6 +584,26 @@ up_to_2.4.60() { up_to_2.4.70() { playbook_migration toggle_telemetry + + # Kafka configuration changes + + # Global pipeline changes to REDIS or KAFKA + echo "Removing global.pipeline pillar configuration" + sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls + # Kafka pillars + mkdir -p /opt/so/saltstack/local/pillar/kafka + touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls + echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + kafka_cluster_id=$(get_random_value 22) + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + fi + if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then + kafkapass=$(get_random_value) + echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls + fi + INSTALLEDVERSION=2.4.70 } From 84abfa688181c0eb878e26c7e87cdce79861825c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 10:45:05 -0400 Subject: [PATCH 083/222] Remove check for existing value since Kafka pillar is made empty on upgrade Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 6e48207171..4c328b373b 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -595,14 +595,10 @@ up_to_2.4.70() { touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls - fi - if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafkapass=$(get_random_value) - echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls - fi + kafka_cluster_id=$(get_random_value 22) + echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + kafkapass=$(get_random_value) + echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls INSTALLEDVERSION=2.4.70 } From de0af58cf8328ccc1c58886a9b0166b3cf67286d Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 10:45:46 -0400 Subject: [PATCH 084/222] Write out Kafka pillar path Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 4c328b373b..436765d302 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -596,9 +596,9 @@ up_to_2.4.70() { touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' cluster_id: '$kafka_cluster_id >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafkapass=$(get_random_value) - echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls + echo ' kafka_pass: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls INSTALLEDVERSION=2.4.70 } From 3efdb4e5328713ab1633bf85de200a46f8d66a03 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 13:01:29 -0400 Subject: [PATCH 085/222] Reconfigure logstash Kafka input - TODO: Configure what topics are pulled to searchnodes via the SOC UI Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- pillar/top.sls | 1 + salt/logstash/defaults.yaml | 1 + .../config/so/0800_input_kafka.conf.jinja | 19 +++++++++---------- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/pillar/top.sls b/pillar/top.sls index fbb1604dad..61b812cc84 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -226,6 +226,7 @@ base: - minions.adv_{{ grains.id }} - stig.soc_stig - soc.license + - kafka.nodes '*_receiver': - logstash.nodes diff --git a/salt/logstash/defaults.yaml b/salt/logstash/defaults.yaml index 348acb6226..d82cba1ffd 100644 --- a/salt/logstash/defaults.yaml +++ b/salt/logstash/defaults.yaml @@ -37,6 +37,7 @@ logstash: - so/0900_input_redis.conf.jinja - so/9805_output_elastic_agent.conf.jinja - so/9900_output_endgame.conf.jinja + - so/0800_input_kafka.conf.jinja custom0: [] custom1: [] custom2: [] diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 85e6729e23..087ed7755b 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -1,18 +1,17 @@ -{% set kafka_brokers = salt['pillar.get']('logstash:nodes:receiver', {}) %} -{% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %} -{% set broker_ips = [] %} -{% for node, node_data in kafka_brokers.items() %} - {% do broker_ips.append(node_data['ip'] + ":9092") %} -{% endfor %} -{% for node, node_data in kafka_on_mngr.items() %} - {% do broker_ips.append(node_data['ip'] + ":9092") %} +{% set kafka_brokers = salt['pillar.get']('kafka:nodes', {}) %} +{% set brokers = [] %} + +{% for key, values in kafka_brokers.items() %} +{% if 'broker' in values['role'] %} +{% do brokers.append(key ~ ':9092') %} +{% endif %} {% endfor %} -{% set bootstrap_servers = "','".join(broker_ips) %} +{% set bootstrap_servers = ','.join(brokers) %} input { kafka { codec => json - topics => ['default-logs', 'kratos-logs', 'soc-logs', 'strelka-logs', 'suricata-logs', 'zeek-logs'] + topics => ['default-topic', 'kratos-topic', 'soc-topic', 'strelka-topic', 'suricata-topic', 'zeek-topic', 'rita-topic', 'opencanary-topic', 'syslog-topic'] group_id => 'searchnodes' client_id => '{{ GLOBALS.hostname }}' security_protocol => 'SSL' From e164d15ec64c6238312f2fdf1a9c25f70238c22e Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 13:02:47 -0400 Subject: [PATCH 086/222] Generate different Kafka certs for different SO nodetypes Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/ssl/init.sls | 219 +++++++++++++++++++++++++++++----------------- 1 file changed, 141 insertions(+), 78 deletions(-) diff --git a/salt/ssl/init.sls b/salt/ssl/init.sls index 854628949a..72fc6c9a49 100644 --- a/salt/ssl/init.sls +++ b/salt/ssl/init.sls @@ -662,31 +662,27 @@ elastickeyperms: - mode: 640 - group: 930 -{%- endif %} - -{% if grains['role'] in ['so-manager', 'so-receiver', 'so-searchnode'] %} - -kafka_key: +kafka_logstash_key: x509.private_key_managed: - - name: /etc/pki/kafka.key + - name: /etc/pki/kafka-logstash.key - keysize: 4096 - backup: True - new: True - {% if salt['file.file_exists']('/etc/pki/kafka.key') -%} + {% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%} - prereq: - - x509: /etc/pki/kafka.crt + - x509: /etc/pki/kafka-logstash.crt {%- endif %} - retry: attempts: 5 interval: 30 -kafka_crt: +kafka_logstash_crt: x509.certificate_managed: - - name: /etc/pki/kafka.crt + - name: /etc/pki/kafka-logstash.crt - ca_server: {{ ca_server }} - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - signing_policy: kafka - - private_key: /etc/pki/kafka.key + - private_key: /etc/pki/kafka-logstash.key - CN: {{ GLOBALS.hostname }} - days_remaining: 0 - days_valid: 820 @@ -696,9 +692,37 @@ kafka_crt: attempts: 5 interval: 30 cmd.run: - - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:changeit" + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:changeit" - onchanges: - - x509: /etc/pki/kafka.key + - x509: /etc/pki/kafka-logstash.key + +kafka_logstash_key_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.key + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_crt_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.crt + - mode: 640 + - user: 960 + - group: 939 + +kafka_logstash_pkcs12_perms: + file.managed: + - replace: False + - name: /etc/pki/kafka-logstash.p12 + - mode: 640 + - user: 960 + - group: 931 + +{%- endif %} + +{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %} elasticfleet_kafka_key: x509.private_key_managed: @@ -734,41 +758,30 @@ elasticfleet_kafka_crt: - onchanges: - x509: elasticfleet_kafka_key -kafka_logstash_key: - x509.private_key_managed: - - name: /etc/pki/kafka-logstash.key - - keysize: 4096 - - backup: True - - new: True - {% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%} - - prereq: - - x509: /etc/pki/kafka-logstash.crt - {%- endif %} - - retry: - attempts: 5 - interval: 30 +elasticfleet_kafka_cert_perms: + file.managed: + - replace: False + - name: /etc/pki/elasticfleet-kafka.crt + - mode: 640 + - user: 960 + - group: 939 -kafka_logstash_crt: - x509.certificate_managed: - - name: /etc/pki/kafka-logstash.crt - - ca_server: {{ ca_server }} - - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} - - signing_policy: kafka - - private_key: /etc/pki/kafka-logstash.key - - CN: {{ GLOBALS.hostname }} - - days_remaining: 0 - - days_valid: 820 - - backup: True - - timeout: 30 - - retry: - attempts: 5 - interval: 30 - cmd.run: - - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:changeit" - - onchanges: - - x509: /etc/pki/kafka-logstash.key +elasticfleet_kafka_key_perms: + file.managed: + - replace: False + - name: /etc/pki/elasticfleet-kafka.key + - mode: 640 + - user: 960 + - group: 939 + +elasticfleet_kafka_pkcs8_perms: + file.managed: + - replace: False + - name: /etc/pki/elasticfleet-kafka.p8 + - mode: 640 + - user: 960 + - group: 939 -{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-receiver'] %} kafka_client_key: x509.private_key_managed: - name: /etc/pki/kafka-client.key @@ -814,8 +827,44 @@ kafka_client_crt_perms: - mode: 640 - user: 960 - group: 939 -{% endif %} +{% endif %} + +{% if grains['role'] in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %} + +kafka_key: + x509.private_key_managed: + - name: /etc/pki/kafka.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka.key') -%} + - prereq: + - x509: /etc/pki/kafka.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 + +kafka_crt: + x509.certificate_managed: + - name: /etc/pki/kafka.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:changeit" + - onchanges: + - x509: /etc/pki/kafka.key kafka_key_perms: file.managed: - replace: False @@ -832,62 +881,76 @@ kafka_crt_perms: - user: 960 - group: 939 -kafka_logstash_key_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka-logstash.key - - mode: 640 - - user: 960 - - group: 939 - -kafka_logstash_crt_perms: +kafka_pkcs12_perms: file.managed: - replace: False - - name: /etc/pki/kafka-logstash.crt + - name: /etc/pki/kafka.p12 - mode: 640 - user: 960 - group: 939 -kafka_logstash_pkcs12_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka-logstash.p12 - - mode: 640 - - user: 960 - - group: 931 +{% endif %} +{# For automated testing standalone will need kafka-logstash key to pull logs from Kafka #} +{% if grains['role'] == 'so-standalone' %} +kafka_logstash_key: + x509.private_key_managed: + - name: /etc/pki/kafka-logstash.key + - keysize: 4096 + - backup: True + - new: True + {% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%} + - prereq: + - x509: /etc/pki/kafka-logstash.crt + {%- endif %} + - retry: + attempts: 5 + interval: 30 -elasticfleet_kafka_pkcs8_perms: - file.managed: - - replace: False - - name: /etc/pki/elasticfleet-kafka.p8 - - mode: 640 - - user: 960 - - group: 939 +kafka_logstash_crt: + x509.certificate_managed: + - name: /etc/pki/kafka-logstash.crt + - ca_server: {{ ca_server }} + - subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }} + - signing_policy: kafka + - private_key: /etc/pki/kafka-logstash.key + - CN: {{ GLOBALS.hostname }} + - days_remaining: 0 + - days_valid: 820 + - backup: True + - timeout: 30 + - retry: + attempts: 5 + interval: 30 + cmd.run: + - name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:changeit" + - onchanges: + - x509: /etc/pki/kafka-logstash.key -kafka_pkcs12_perms: +kafka_logstash_key_perms: file.managed: - replace: False - - name: /etc/pki/kafka.p12 + - name: /etc/pki/kafka-logstash.key - mode: 640 - user: 960 - group: 939 -elasticfleet_kafka_cert_perms: +kafka_logstash_crt_perms: file.managed: - replace: False - - name: /etc/pki/elasticfleet-kafka.crt + - name: /etc/pki/kafka-logstash.crt - mode: 640 - user: 960 - group: 939 -elasticfleet_kafka_key_perms: +kafka_logstash_pkcs12_perms: file.managed: - replace: False - - name: /etc/pki/elasticfleet-kafka.key + - name: /etc/pki/kafka-logstash.p12 - mode: 640 - user: 960 - - group: 939 + - group: 931 {% endif %} + {% else %} {{sls}}_state_not_allowed: From 58ebbfba206a314b217fa2cf9133609d1aae6c9c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 13:03:14 -0400 Subject: [PATCH 087/222] Add kafka state to standalone highstate Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/top.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/top.sls b/salt/top.sls index e4cd067c3d..464f23ea51 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -141,6 +141,7 @@ base: - utility - elasticfleet - stig + - kafka '*_searchnode and G@saltversion:{{saltversion}}': - match: compound From 47ced60243d86337cf7e0e6d3133b10fb985bbd8 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 1 May 2024 14:49:51 -0400 Subject: [PATCH 088/222] Create new Kafka output policy using salt Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../tools/sbin_jinja/so-elastic-fleet-setup | 13 +---------- salt/kafka/elasticfleet.sls | 22 ++++++++++++++++++ salt/kafka/init.sls | 1 + .../sbin_jinja/so-kafka-fleet-output-policy | 23 +++++++++++++++++++ 4 files changed, 47 insertions(+), 12 deletions(-) create mode 100644 salt/kafka/elasticfleet.sls create mode 100644 salt/kafka/tools/sbin_jinja/so-kafka-fleet-output-policy diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index aacc3ebc8d..02624d8132 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -79,18 +79,7 @@ printf "\n\n" printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n" {% if grains.role not in ['so-import', 'so-eval'] %} -KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) -KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) -KAFKACA=$(openssl x509 -in $INTCA) -KAFKA_OUTPUT_VERSION="2.6.0" -JSON_STRING=$( jq -n \ - --arg KAFKACRT "$KAFKACRT" \ - --arg KAFKAKEY "$KAFKAKEY" \ - --arg KAFKACA "$KAFKACA" \ - --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ - '{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ "{{ GLOBALS.manager }}:9092", "{{ GLOBALS.manager_ip }}:9092" ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-topic","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-topic"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' - ) -curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" +salt-call state.apply kafka.elasticfleet queue=True {% endif %} # Add Manager Hostname & URL Base to Fleet Host URLs diff --git a/salt/kafka/elasticfleet.sls b/salt/kafka/elasticfleet.sls new file mode 100644 index 0000000000..a91df765b3 --- /dev/null +++ b/salt/kafka/elasticfleet.sls @@ -0,0 +1,22 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +{% from 'vars/globals.map.jinja' import GLOBALS %} + +{# Create Kafka output policy if it doesn't exist #} +update_kafka_output_policy_script: + file.managed: + - name: /usr/sbin/so-kafka-fleet-output-policy + - source: salt://kafka/tools/sbin_jinja/so-kafka-fleet-output-policy + - user: root + - mode: 755 + - template: jinja + - defaults: + GLOBALS: {{ GLOBALS }} + +create_kafka_output_policy: + cmd.run: + - name: 'so-kafka-fleet-output-policy > /dev/null 2>&1' + - show_changes: false \ No newline at end of file diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index c4351ebfc0..67b66c45dd 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -10,6 +10,7 @@ include: {# Run kafka/nodes.sls before Kafka is enabled, so kafka nodes pillar is setup #} {% if grains.role in ['so-manager','so-managersearch', 'so-standalone'] %} - kafka.nodes + - kafka.elasticfleet {% endif %} {% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %} - kafka.enabled diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-fleet-output-policy b/salt/kafka/tools/sbin_jinja/so-kafka-fleet-output-policy new file mode 100644 index 0000000000..13f158bdd7 --- /dev/null +++ b/salt/kafka/tools/sbin_jinja/so-kafka-fleet-output-policy @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +output=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id) + +if ! echo "$output" | grep -q "so-manager_kafka"; then + KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) + KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) + KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) + KAFKA_OUTPUT_VERSION="2.6.0" + JSON_STRING=$( jq -n \ + --arg KAFKACRT "$KAFKACRT" \ + --arg KAFKAKEY "$KAFKAKEY" \ + --arg KAFKACA "$KAFKACA" \ + --arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \ + --arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \ + '{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-topic","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-topic"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }' + ) + curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" 2&1> /dev/null +fi \ No newline at end of file From de9f6425f9901cc82697ba216484e420e2864578 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 2 May 2024 12:13:46 -0400 Subject: [PATCH 089/222] Automatically switch between Kafka output policy and logstash output policy when globals.pipeline changes Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../so-elastic-fleet-outputs-update | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 4d2867fc7c..064d49d23c 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -127,13 +127,27 @@ NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then printf "\nHashes match - no update needed.\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" + + # Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed + printf "Checking if the correct output policy is set as default\n" + OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON) + if [ "$OUTPUT_DEFAULT" = "false" ]; then + printf "Default output policy needs to be updated.\n" + {%- if GLOBALS.pipeline == "KAFKA" %} + update_kafka_outputs + {%- else %} + update_logstash_outputs + {%- endif %} + else + printf "Default output policy is set - no update needed.\n" + fi exit 0 else printf "\nHashes don't match - update needed.\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" -{% if GLOBALS.pipeline == "KAFKA" %} + {%- if GLOBALS.pipeline == "KAFKA" %} update_kafka_outputs -{% else %} + {%- else %} update_logstash_outputs -{% endif %} + {%- endif %} fi From f663ef8c168c880d4c6b06483e1ab5e0a406769c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 2 May 2024 14:53:28 -0400 Subject: [PATCH 090/222] Setup Kafka to use PKCS12 and remove need for converting to JKS Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 14 -------------- salt/kafka/defaults.yaml | 10 +++++----- salt/kafka/enabled.sls | 4 +++- .../tools/sbin_jinja/so-kafka-generate-keystore | 13 ------------- 4 files changed, 8 insertions(+), 33 deletions(-) delete mode 100644 salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index b1a31d23f5..5cf6f8201c 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -54,20 +54,6 @@ kafka_data_dir: - group: 960 - makedirs: True -kafka_generate_keystore: - cmd.run: - - name: "/usr/sbin/so-kafka-generate-keystore" - - onchanges: - - x509: /etc/pki/kafka.key - -kafka_keystore_perms: - file.managed: - - replace: False - - name: /etc/pki/kafka.jks - - mode: 640 - - user: 960 - - group: 939 - {% for sc in ['server', 'client'] %} kafka_kraft_{{sc}}_properties: file.managed: diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 8dcd70b981..1cf7b73f1e 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -25,19 +25,19 @@ kafka: socket_x_receive_x_buffer_x_bytes: 102400 socket_x_request_x_max_x_bytes: 104857600 socket_x_send_x_buffer_x_bytes: 102400 - ssl_x_keystore_x_location: /etc/pki/kafka.jks + ssl_x_keystore_x_location: /etc/pki/kafka.p12 + ssl_x_keystore_x_type: PKCS12 ssl_x_keystore_x_password: changeit - ssl_x_keystore_x_type: JKS ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts ssl_x_truststore_x_password: changeit - transaction_x_state_x_log_x_min_x_isr: 1 + transaction_x_state_x_log_x_min_x_isr: 1n transaction_x_state_x_log_x_replication_x_factor: 1 client: security_x_protocol: SSL ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts ssl_x_truststore_x_password: changeit - ssl_x_keystore_x_location: /etc/pki/kafka.jks - ssl_x_keystore_x_type: JKS + ssl_x_keystore_x_location: /etc/pki/kafka.p12 + ssl_x_keystore_x_type: PKCS12 ssl_x_keystore_x_password: changeit controller: controller_x_listener_x_names: CONTROLLER diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 78e0d87d9d..9275eca914 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -40,11 +40,13 @@ so-kafka: - {{ BINDING }} {% endfor %} - binds: - - /etc/pki/kafka.jks:/etc/pki/kafka.jks + - /etc/pki/kafka.p12:/etc/pki/kafka.p12 - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts - /nsm/kafka/data/:/nsm/kafka/data/:rw - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + {% if GLOBALS.is_manager %} - /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties + {% endif %} - watch: {% for sc in ['server', 'client'] %} - file: kafka_kraft_{{sc}}_properties diff --git a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore b/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore deleted file mode 100644 index 8ae9d6db2f..0000000000 --- a/salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -# -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - -. /usr/sbin/so-common - -# Generate a new keystore -docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt -docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks -docker rm so-kafka-keystore \ No newline at end of file From 093cbc5ebcfc59f70f9e6860006e802ce61fa7fc Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 2 May 2024 15:10:13 -0400 Subject: [PATCH 091/222] Reconfigure Kafka defaults - Set default number of partitions per topic -> 3. Helps ensure that out of the box we can take advantage of multi-node Kafka clusters via load balancing across atleast 3 brokers. Also multiple searchnodes will be able to pull from each topic. In this case 3 searchnodes (consumers) would be able to pull from all topics concurrently. - Set default replication factor -> 2. This is the minimum value required for redundancy. Every partition will have 1 replica. In this case if we have 2 brokers each topic will have 3 partitions (load balanced across brokers) and each partition will have a replica on separate broker for redundancy Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 5 +++-- salt/kafka/soc_kafka.yaml | 5 +++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 1cf7b73f1e..86d2f6e946 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -8,6 +8,7 @@ kafka: advertised_x_listeners: auto_x_create_x_topics_x_enable: true controller_x_quorum_x_voters: + default_x_replication_x_factor: 2 inter_x_broker_x_listener_x_name: BROKER listeners: BROKER://0.0.0.0:9092 listener_x_security_x_protocol_x_map: BROKER:SSL @@ -18,7 +19,7 @@ kafka: node_x_id: num_x_io_x_threads: 8 num_x_network_x_threads: 3 - num_x_partitions: 1 + num_x_partitions: 3 num_x_recovery_x_threads_x_per_x_data_x_dir: 1 offsets_x_topic_x_replication_x_factor: 1 process_x_roles: broker @@ -30,7 +31,7 @@ kafka: ssl_x_keystore_x_password: changeit ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts ssl_x_truststore_x_password: changeit - transaction_x_state_x_log_x_min_x_isr: 1n + transaction_x_state_x_log_x_min_x_isr: 1 transaction_x_state_x_log_x_replication_x_factor: 1 client: security_x_protocol: SSL diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 505469d6b4..ba673fa685 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -26,6 +26,11 @@ kafka: title: auto.create.topics.enable forcedType: bool helpLink: kafka.html + default_x_replication_x_factor: + description: The default replication factor for automatically created topics. + title: default.replication.factor + forcedType: int + helpLink: kafka.html inter_x_broker_x_listener_x_name: description: The name of the listener used for inter-broker communication. title: inter.broker.listener.name From dff609d829a3f0311e1d56d345185b87553b7ce9 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 8 May 2024 16:13:09 -0400 Subject: [PATCH 092/222] Add basic read-only metric collection from Kafka Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/docker/defaults.yaml | 1 + salt/kafka/enabled.sls | 9 +++++---- salt/telegraf/etc/telegraf.conf | 17 +++++++++++++++++ 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index 82ad3b6ea9..161dde485d 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -192,6 +192,7 @@ docker: port_bindings: - 0.0.0.0:9092:9092 - 0.0.0.0:9093:9093 + - 0.0.0.0:8778:8778 custom_bind_mounts: [] extra_hosts: [] extra_env: [] diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 9275eca914..b01e6f2a82 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -25,7 +25,8 @@ so-kafka: - ipv4_address: {{ DOCKER.containers['so-kafka'].ip }} - user: kafka - environment: - - KAFKA_HEAP_OPTS=-Xmx2G -Xms1G + KAFKA_HEAP_OPTS: -Xmx2G -Xms1G + KAFKA_OPTS: -javaagent:/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/jolokia/jolokia.xml - extra_hosts: {% for node in KAFKANODES %} - {{ node }}:{{ KAFKANODES[node].ip }} @@ -40,10 +41,10 @@ so-kafka: - {{ BINDING }} {% endfor %} - binds: - - /etc/pki/kafka.p12:/etc/pki/kafka.p12 - - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts + - /etc/pki/kafka.p12:/etc/pki/kafka.p12:ro + - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts:ro - /nsm/kafka/data/:/nsm/kafka/data/:rw - - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties + - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties:ro {% if GLOBALS.is_manager %} - /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties {% endif %} diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index 1c58016451..42a8d43bf5 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -243,6 +243,23 @@ password = "{{ salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass') }}" {%- endif %} +{% if grains.role in ['so-manager','so-managersearch','so-standalone','so-receiver'] -%} +[[inputs.jolokia2_agent]] + name_prefix= "kafka_" + urls = ["http://localhost:8778/jolokia"] + +[[inputs.jolokia2_agent.metric]] + name = "topics" + mbean = "kafka.server:name=*,type=BrokerTopicMetrics" + field_prefix = "$1." + +[[inputs.jolokia2_agent.metric]] + name = "topic" + mbean = "kafka.server:name=*,topic=*,type=BrokerTopicMetrics" + field_prefix = "$1." + tag_keys = ["topic"] + +{%- endif %} # # Read metrics from one or more commands that can output to stdout {%- if 'sostatus.sh' in TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]] %} {%- do TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]].remove('sostatus.sh') %} From eca2a4a9c8227d5065955b03ed299eaa3b7cfdbc Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 8 May 2024 16:17:09 -0400 Subject: [PATCH 093/222] Logstash consumer threads should match topic partition count - Default is set to 3. If there are too many consumer threads it may lead to idle logstash worker threads and could require decreasing this value to saturate workers Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja index 087ed7755b..3d0d03020a 100644 --- a/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja +++ b/salt/logstash/pipelines/config/so/0800_input_kafka.conf.jinja @@ -13,6 +13,7 @@ input { codec => json topics => ['default-topic', 'kratos-topic', 'soc-topic', 'strelka-topic', 'suricata-topic', 'zeek-topic', 'rita-topic', 'opencanary-topic', 'syslog-topic'] group_id => 'searchnodes' + consumer_threads => 3 client_id => '{{ GLOBALS.hostname }}' security_protocol => 'SSL' bootstrap_servers => '{{ bootstrap_servers }}' From 91f8b1fef7d0bfc8059abd29e3a77103cb08076c Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 22 May 2024 13:35:09 -0400 Subject: [PATCH 094/222] Set default replication factor back to Kafka default If replication factor is > 1 Kafka will fail to start until another broker is added - For internal automated testing purposes a Standalone will be utilized Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 86d2f6e946..9a8c05c432 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -8,7 +8,7 @@ kafka: advertised_x_listeners: auto_x_create_x_topics_x_enable: true controller_x_quorum_x_voters: - default_x_replication_x_factor: 2 + default_x_replication_x_factor: 1 inter_x_broker_x_listener_x_name: BROKER listeners: BROKER://0.0.0.0:9092 listener_x_security_x_protocol_x_map: BROKER:SSL From b1beb617b3202fed18a7ee4a8a6ea489c53fdd6a Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 22 May 2024 13:38:09 -0400 Subject: [PATCH 095/222] Logstash should be disabled when Kafka is enabled except when a minion override exists OR node is a standalone - Standalone subscribes to Kafka topics via logstash for ingest Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/logstash/init.sls | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/logstash/init.sls b/salt/logstash/init.sls index f7adc13307..3bc539b358 100644 --- a/salt/logstash/init.sls +++ b/salt/logstash/init.sls @@ -7,7 +7,10 @@ {% from 'kafka/map.jinja' import KAFKAMERGED %} include: -{% if LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %} +{# Disable logstash when Kafka is enabled except when the role is standalone #} +{% if LOGSTASH_MERGED.enabled and grains.role == 'so-standalone' %} + - logstash.enabled +{% elif LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %} - logstash.enabled {% else %} - logstash.disabled From 382cd24a57b3394688dbe93f1f9b5d1c6e3998ed Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 22 May 2024 13:39:21 -0400 Subject: [PATCH 096/222] Small changes needed for using new Kafka docker image + added Kafka logging output to /opt/so/log/kafka/ Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/enabled.sls | 7 ++++--- salt/kafka/storage.sls | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index b01e6f2a82..833cc7f3c9 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -26,7 +26,7 @@ so-kafka: - user: kafka - environment: KAFKA_HEAP_OPTS: -Xmx2G -Xms1G - KAFKA_OPTS: -javaagent:/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/jolokia/jolokia.xml + KAFKA_OPTS: -javaagent:/opt/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/opt/jolokia/jolokia.xml - extra_hosts: {% for node in KAFKANODES %} - {{ node }}:{{ KAFKANODES[node].ip }} @@ -44,9 +44,10 @@ so-kafka: - /etc/pki/kafka.p12:/etc/pki/kafka.p12:ro - /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts:ro - /nsm/kafka/data/:/nsm/kafka/data/:rw - - /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties:ro + - /opt/so/log/kafka:/opt/kafka/logs/:rw + - /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro {% if GLOBALS.is_manager %} - - /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties + - /opt/so/conf/kafka/client.properties:/opt/kafka/config/kraft/client.properties {% endif %} - watch: {% for sc in ['server', 'client'] %} diff --git a/salt/kafka/storage.sls b/salt/kafka/storage.sls index 507c199c60..efc36acf61 100644 --- a/salt/kafka/storage.sls +++ b/salt/kafka/storage.sls @@ -13,7 +13,7 @@ kafka_storage_init: cmd.run: - name: | - docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/newserver.properties + docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /opt/kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /opt/kafka/config/kraft/newserver.properties kafka_rm_kafkainit: cmd.run: - name: | From b5f656ae58d2ea2580238211f278262d82e65a3f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 23 May 2024 13:22:22 -0400 Subject: [PATCH 097/222] dont render pillar each time so-tcpreplay runs --- salt/common/tools/{sbin => sbin_jinja}/so-tcpreplay | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) rename salt/common/tools/{sbin => sbin_jinja}/so-tcpreplay (92%) diff --git a/salt/common/tools/sbin/so-tcpreplay b/salt/common/tools/sbin_jinja/so-tcpreplay similarity index 92% rename from salt/common/tools/sbin/so-tcpreplay rename to salt/common/tools/sbin_jinja/so-tcpreplay index 99314c2895..6f3f02983c 100755 --- a/salt/common/tools/sbin/so-tcpreplay +++ b/salt/common/tools/sbin_jinja/so-tcpreplay @@ -10,7 +10,7 @@ . /usr/sbin/so-common . /usr/sbin/so-image-common -REPLAYIFACE=${REPLAYIFACE:-$(lookup_pillar interface sensor)} +REPLAYIFACE=${REPLAYIFACE:-"{{pillar.sensor.interface}}"} REPLAYSPEED=${REPLAYSPEED:-10} mkdir -p /opt/so/samples @@ -57,8 +57,8 @@ if ! docker ps | grep -q so-tcpreplay; then fi if is_sensor_node; then - echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface ${REPLAYIFACE}..." - docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i ${REPLAYIFACE} -M${REPLAYSPEED} $@" + echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface $REPLAYIFACE..." + docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i $REPLAYIFACE -M${REPLAYSPEED} $@" echo "Replay completed. Warnings shown above are typically expected." elif is_manager_node; then From fcb6a47e8c39a4d7a0bd3d3f75ffd4357154c6cf Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Sun, 26 May 2024 21:10:41 -0400 Subject: [PATCH 098/222] Remove redis.sh telegraf script when Kafka is global pipeline Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/telegraf/map.jinja | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/telegraf/map.jinja b/salt/telegraf/map.jinja index b56c8a64db..1d92b8b5b5 100644 --- a/salt/telegraf/map.jinja +++ b/salt/telegraf/map.jinja @@ -22,3 +22,10 @@ {% endif %} {% endif %} + +{% if GLOBALS.pipeline != 'REDIS' %} +{# When global pipeline is not REDIS remove redis.sh script. KAFKA metrics are collected via jolokia agent. Config in telegraf.conf #} +{% if GLOBALS.role in ['so-standalone', 'so-manager', 'so-managersearch', 'so-receiver', 'so-heavynode'] %} +{% do TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]].remove('redis.sh') %} +{% endif %} +{% endif %} From 15a0b959aae7a9a2381e9b2aa66b5240230b3499 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 10:51:39 -0400 Subject: [PATCH 099/222] Add jolokia metrics for influxdb dashboard Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/telegraf/etc/telegraf.conf | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index 42a8d43bf5..1e8d9d3fee 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -259,6 +259,23 @@ field_prefix = "$1." tag_keys = ["topic"] +[[inputs.jolokia2_agent.metric]] + name = "controller" + mbean = "kafka.controller:name=*,type=*" + field_prefix = "$1." + +[[inputs.jolokia2_agent.metric]] + name = "partition" + mbean = "kafka.log:name=*,partition=*,topic=*,type=Log" + field_name = "$1" + tag_keys = ["topic", "partition"] + +[[inputs.jolokia2_agent.metric]] + name = "partition" + mbean = "kafka.cluster:name=UnderReplicated,partition=*,topic=*,type=Partition" + field_name = "UnderReplicatedPartitions" + tag_keys = ["topic", "partition"] + {%- endif %} # # Read metrics from one or more commands that can output to stdout {%- if 'sostatus.sh' in TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]] %} From 47efcfd6e2ac4bf828755893b6f42343cd8297a6 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 10:55:11 -0400 Subject: [PATCH 100/222] Add basic Kafka metrics to 'Security Onion Performance' influxdb dashboard Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../templates/dashboard-security_onion_performance.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/influxdb/templates/dashboard-security_onion_performance.json b/salt/influxdb/templates/dashboard-security_onion_performance.json index e4f2a6d38c..05eb9de99f 100644 --- a/salt/influxdb/templates/dashboard-security_onion_performance.json +++ b/salt/influxdb/templates/dashboard-security_onion_performance.json @@ -1 +1 @@ -[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":38},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":38},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance"}}] \ No newline at end of file +[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"lQ75rvTyd2Lq5pZjzy6LB","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"KLfpRZtiEnU2GxjPtrrzQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"1kLynwKxvJ3B5IeJnrBqp","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\r\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\r\n |> derivative(unit: 1s, nonNegative: true)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\r\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Controllers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false)\n |> yield(name: \"current\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Brokers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"current\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":24},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":4,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Under Replicated Partitions","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance edit"}}] \ No newline at end of file From 1c1a1a1d3fc0138b3df81c19d61e80a9f4f62ec3 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 11:14:19 -0400 Subject: [PATCH 101/222] Remove unneeded jolokia aggregate metrics to reduce data ingested to influx Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/telegraf/etc/telegraf.conf | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index 1e8d9d3fee..ecfb0730a0 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -247,6 +247,25 @@ [[inputs.jolokia2_agent]] name_prefix= "kafka_" urls = ["http://localhost:8778/jolokia"] + fieldexclude = [ + "*.EventType", + "*.FifteenMinuteRate", + "*.FiveMinuteRate", + "*.MeanRate", + "*.OneMinuteRate", + "*.RateUnit", + "*.LatencyUnit", + "*.50thPercentile", + "*.75thPercentile", + "*.95thPercentile", + "*.98thPercentile", + "*.99thPercentile", + "*.999thPercentile", + "*.Min", + "*.Mean", + "*.Max", + "*.StdDev" + ] [[inputs.jolokia2_agent.metric]] name = "topics" From 0d7c331ff0e99e5148a7e6c589eaa386ce0e05d2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 11:29:38 -0400 Subject: [PATCH 102/222] only show specific fields when hovering over Kafka influxdb panels Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../templates/dashboard-security_onion_performance.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/influxdb/templates/dashboard-security_onion_performance.json b/salt/influxdb/templates/dashboard-security_onion_performance.json index 05eb9de99f..831f8eb169 100644 --- a/salt/influxdb/templates/dashboard-security_onion_performance.json +++ b/salt/influxdb/templates/dashboard-security_onion_performance.json @@ -1 +1 @@ -[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"lQ75rvTyd2Lq5pZjzy6LB","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"KLfpRZtiEnU2GxjPtrrzQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"1kLynwKxvJ3B5IeJnrBqp","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\r\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\r\n |> derivative(unit: 1s, nonNegative: true)\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\r\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Controllers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false)\n |> yield(name: \"current\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Brokers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"current\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":24},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":4,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Under Replicated Partitions","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance edit"}}] \ No newline at end of file +[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"lQ75rvTyd2Lq5pZjzy6LB","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"KLfpRZtiEnU2GxjPtrrzQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"1kLynwKxvJ3B5IeJnrBqp","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Controllers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Brokers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":24},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":4,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Under Replicated Partitions","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance edit"}}] \ No newline at end of file From 77b5aa4369ae46240f069ef52fdb75714def9322 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 11:34:35 -0400 Subject: [PATCH 103/222] Correct dashboard name Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../templates/dashboard-security_onion_performance.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/influxdb/templates/dashboard-security_onion_performance.json b/salt/influxdb/templates/dashboard-security_onion_performance.json index 831f8eb169..4f543c8d10 100644 --- a/salt/influxdb/templates/dashboard-security_onion_performance.json +++ b/salt/influxdb/templates/dashboard-security_onion_performance.json @@ -1 +1 @@ -[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"lQ75rvTyd2Lq5pZjzy6LB","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"KLfpRZtiEnU2GxjPtrrzQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"1kLynwKxvJ3B5IeJnrBqp","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Controllers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Brokers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":24},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":4,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Under Replicated Partitions","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance edit"}}] \ No newline at end of file +[{"apiVersion":"influxdata.com/v2alpha1","kind":"Dashboard","metadata":{"name":"vivid-wilson-002001"},"spec":{"charts":[{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Uptime","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24 * 60 * 60)}))\n |> group(columns: [\"host\"])\n |> last()\n |> lowestMin(n:1)"}],"staticLegend":{},"suffix":" days","width":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"ruby","type":"text","hex":"#BF3D5E","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Critical Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"crit\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"yPos":2},{"colors":[{"id":"base","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QCTYWuGuHkikYFsZSKMzQ","name":"rainforest","type":"text","hex":"#4ED8A0"},{"id":"QdpMyTRBb0LJ56-P5wfAW","name":"laser","type":"text","hex":"#00C9FF","value":1},{"id":"VQGwCoMrxZyP8asiOW5Cq","name":"tiger","type":"text","hex":"#F48D38","value":2},{"id":"zSO9QkesSIxrU_ntCBx2i","name":"ruby","type":"text","hex":"#BF3D5E","value":3}],"fieldOptions":[{"fieldName":"_time","visible":true},{"displayName":"Alarm","fieldName":"_check_name","visible":true},{"displayName":"Severity","fieldName":"_value","visible":true},{"displayName":"Status","fieldName":"_level","visible":true}],"height":6,"kind":"Table","name":"Alarm Status","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> drop(columns: [\"_value\"])\n |> duplicate(column: \"_level\", as: \"_value\")\n |> map(fn: (r) => ({ r with _value: if r._value == \"ok\" then 0 else if r._value == \"info\" then 1 else if r._value == \"warn\" then 2 else 3 }))\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> keep(columns: [\"_check_name\",\"_level\",\"_value\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"_check_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"yPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Storage Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"store_size_in_bytes\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"B"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"InfluxDB Size","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"influxsize\")\n |> filter(fn: (r) => r[\"_field\"] == \"kbytes\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 1000.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"last\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60)}))\n |> yield(name: \"Trend\")"}],"shade":true,"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"lQ75rvTyd2Lq5pZjzy6LB","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"KLfpRZtiEnU2GxjPtrrzQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"1kLynwKxvJ3B5IeJnrBqp","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_topics\")\n |> filter(fn: (r) => r[\"_field\"] == \"MessagesInPerSec.Count\")\n |> derivative(unit: 1s, nonNegative: true)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"System Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_recv\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":42},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"5m Load Average","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load5\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":1},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"tiger","type":"text","hex":"#F48D38","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Warning Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"warn\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":1,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"IO Wait","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"},{"id":"z83MTSufTrlrCoEPiBXda","name":"laser","type":"text","hex":"#00C9FF","value":1}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Informative Alarms","queries":[{"query":"from(bucket: \"_monitoring\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"statuses\")\n |> filter(fn: (r) => r[\"_field\"] == \"_message\")\n |> group(columns: [\"_check_id\"])\n |> sort(columns: [\"_time\"])\n |> last()\n |> group()\n |> filter(fn: (r) => r[\"_level\"] == \"info\")\n |> count()"}],"staticLegend":{},"suffix":" ","width":1,"xPos":2,"yPos":2},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Estimated EPS In","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> hostFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":3},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"CPU Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_idle\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> map(fn: (r) => ({r with _value: r._value * -1.0 + 100.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"kOQLOg2H4FVEE-E1_L8Kq","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"5IArg2lDb8KvnphywgUXa","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Root Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":3,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Suricata Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":3,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"kind":"Single_Stat","name":"Redis Queue","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"width":1,"xPos":4},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elasticsearch Document Count","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"mean\")"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_indices\")\n |> filter(fn: (r) => r[\"_field\"] == \"docs_count\")\n |> filter(fn: (r) => r[\"host\"] == r[\"node_name\"])\n |> hostFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Redis Queue","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"redisqueue\")\n |> filter(fn: (r) => r[\"_field\"] == \"unparsed\")\n |> group(columns: [\"host\", \"_field\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":14},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Uptime","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24 * 60 * 60 * 1000000000)}))\n |> yield(name: \"last\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"uptime_ns\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> group(columns: [\"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> map(fn: (r) => ({r with _value: float(v: r._value) / float(v: 24.0 * 60.0 * 60.0 * 1000000000.0)}))\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Controllers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: last, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveControllerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":2,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Active Brokers","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"trend\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_controller\")\n |> filter(fn: (r) => r[\"_field\"] == \"ActiveBrokerCount.Value\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"current\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":24},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"yT5vTIlaaFChSrQvKLfqf","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"mzzUVSu3ibTph1JmQmDAQ","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"mOcnDo7l8ii6qNLFIB5rs","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container CPU Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_cpu\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Memory Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_percent\")\n |> filter(fn: (r) => r[\"container_status\"] == \"running\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b"}],"colorizeRows":true,"colors":[{"id":"0ynR6Zs0wuQ3WY0Lz-_KC","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"YiArehCNBwFm9mn8DSXSG","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"DxByY_EQW9Xs2jD5ktkG5","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Container Traffic - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"mean\")"},{"query":"containerFilter = (tables=<-) =>\n if v.Container != \"(All)\" then\n tables |> filter(fn: (r) => r[\"container_name\"] == v.Container)\n else\n tables\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_net\")\n |> filter(fn: (r) => r[\"_field\"] == \"rx_bytes\")\n |> hostFilter()\n |> roleFilter()\n |> containerFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with _value: r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\", \"container_name\"])\n |> sort(columns: [\"_time\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":4,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Disk Usage /nsm","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xPos":4,"yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Traffic","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_recv\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":5},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Inbound Drops","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\") \n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: r._value * 8.0 / (1000.0 * 1000.0)}))\n |> group(columns: [\"host\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" Mb/s","width":1,"xPos":6},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":70},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":80},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Memory Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"mem\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":2},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"laser","type":"threshold","hex":"#00C9FF","value":85},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"tiger","type":"threshold","hex":"#F48D38","value":90},{"id":"H7uprvKmMEh39en6X-ms_","name":"ruby","type":"threshold","hex":"#BF3D5E","value":95},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"NSM Disk Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"disk\")\n |> filter(fn: (r) => r[\"path\"] == \"/nsm\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":6,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Management Interface Traffic - Outbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"bytes_sent\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"manint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"bytes_sent\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n \n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":6,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":38},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Packet Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> hostFilter()\n |> roleFilter()\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":6,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Capture Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":7},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Zeek Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekdrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":8},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Elastic Ingest Time Spent","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_community_id_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"community.id_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_conditional_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"conditional_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_index_name_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date.index.name_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_date_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"date_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dissect_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dissect_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_dot_expander_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"dot.expander_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_geoip_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"geoip_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_grok_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"grok_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_json_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"json_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_kv_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"kv_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_lowercase_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"lowercase_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_rename_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"rename_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_script_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"script_time\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"elasticsearch_clusterstats_nodes\")\n |> filter(fn: (r) => r.role == \"standalone\" or r.role == \"eval\" or r.role == \"import\" or r.role == \"managersearch\" or r.role == \"search\" or r.role == \"node\" or r.role == \"heavynode\")\n |> filter(fn: (r) => r[\"_field\"] == \"ingest_processor_stats_user_agent_time_in_millis\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"user.agent_time\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":10},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"sW2GqpGAsGB5Adx16jKjp","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"TsdXuXwdI5Npi9S8L4f-i","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"OGL29-SUbJ6FyQb0JzbaD","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"1m Load Average","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"system\")\n |> filter(fn: (r) => r[\"_field\"] == \"load1\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\",\"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: true)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":14,"yTickStep":1},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":" e/s"}],"colorizeRows":true,"colors":[{"id":"xflqbsX-j3iq4ry5QOntK","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#8F8AF4"},{"id":"5H28HcITm6QVfQsXon0vq","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#A51414"},{"id":"25MrINwurNBkQqeKCkMPg","name":"Do Androids Dream of Electric Sheep?","type":"scale","hex":"#F4CF31"}],"geom":"line","height":4,"heightRatio":0.301556420233463,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Logstash EPS","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"in\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"logstash_events\")\n |> filter(fn: (r) => r[\"_field\"] == \"out\")\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\") \n |> map(fn: (r) => ({r with _value: -r._value}))\n |> group(columns: [\"_field\", \"host\", \"pipeline\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.301556420233463,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":18},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear"}],"colorizeRows":true,"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":0,"height":4,"hoverDimension":"auto","kind":"Single_Stat_Plus_Line","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Kafka Under Replicated Partitions","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"kafka_partition\")\n |> filter(fn: (r) => r[\"_field\"] == \"UnderReplicatedPartitions\")\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"partition\",\"host\", \"role\"])\n |> yield(name: \"trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":22},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"UAehjIsi65P8u92M_3sQY","name":"Nineteen Eighty Four","type":"scale","hex":"#31C0F6"},{"id":"_SCP8Npp4NVMx2N4mfuzX","name":"Nineteen Eighty Four","type":"scale","hex":"#A500A5"},{"id":"BoMPg4R1KDp_UsRORdV3_","name":"Nineteen Eighty Four","type":"scale","hex":"#FF7E27"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"IO Wait","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"cpu\")\n |> filter(fn: (r) => r[\"cpu\"] == \"cpu-total\")\n |> filter(fn: (r) => r[\"_field\"] == \"usage_iowait\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":26},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"QDwChKZWuQV0BaJcEeSam","name":"Atlantis","type":"scale","hex":"#74D495"},{"id":"ThD0WTqKHltQEVlq9mo6K","name":"Atlantis","type":"scale","hex":"#3F3FBA"},{"id":"FBHYZiwDLKyQK3eRfUD-0","name":"Atlantis","type":"scale","hex":"#FF4D9E"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Swap Usage","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> roleFilter()\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":30},{"axes":[{"base":"10","name":"x","scale":"linear"},{"base":"10","name":"y","scale":"linear","suffix":"b/s"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"heightRatio":0.18482490272373542,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Monitor Interface Drops - Inbound","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"drop_in\"}))"},{"query":"import \"join\"\n\nhostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nmanints = from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"node_config\")\n |> hostFilter()\n |> filter(fn: (r) => r[\"_field\"] == \"monint\")\n |> distinct()\n |> group(columns: [\"host\"])\n\ntraffic = from(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"net\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop_in\")\n |> hostFilter()\n |> roleFilter()\n |> derivative(unit: 1s, nonNegative: true, columns: [\"_value\"], timeColumn: \"_time\")\n |> map(fn: (r) => ({r with \"_value\": r._value * 8.0}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"host\"])\n\njoin.inner(left: traffic, right: manints,\n on: (l,r) => l.interface == r._value,\n as: (l, r) => ({l with _value: l._value, result: \"Trend\"}))"}],"staticLegend":{"colorizeRows":true,"heightRatio":0.18482490272373542,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":34},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":" days"}],"colorizeRows":true,"colors":[{"id":"3PVw3hQuZUzyar7Js3mMH","name":"Ectoplasm","type":"scale","hex":"#DA6FF1"},{"id":"O34ux-D8Xq_1-eeWRyYYH","name":"Ectoplasm","type":"scale","hex":"#00717A"},{"id":"P04RoKOHBdLdvfrfFbn0F","name":"Ectoplasm","type":"scale","hex":"#ACFF76"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Stenographer PCAP Retention","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])"},{"query":"import \"join\"\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)}))\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> group(columns: [\"_field\",\"host\"])\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":4,"widthRatio":1,"xCol":"_time","xPos":8,"yCol":"_value","yPos":46},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Suricata Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"suridrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":9},{"colors":[{"id":"0","name":"viridian","type":"min","hex":"#32B08C"},{"id":"5IArg2lDb8KvnphywgUXa","name":"pineapple","type":"threshold","hex":"#FFB94A","value":50},{"id":"yFhH3mtavjuAZh6cEt5lx","name":"fire","type":"threshold","hex":"#DC4E58","value":70},{"id":"1","name":"ruby","type":"max","hex":"#BF3D5E","value":100}],"decimalPlaces":0,"height":4,"kind":"Gauge","name":"Swap Usage","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"swap\")\n |> filter(fn: (r) => r[\"_field\"] == \"used_percent\")\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n: 1)\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"}],"staticLegend":{},"suffix":"%","tickSuffix":"%","width":3,"xPos":9,"yPos":2},{"colors":[{"id":"base","name":"white","type":"text","hex":"#ffffff"}],"fieldOptions":[{"displayName":"Host","fieldName":"host","visible":true},{"displayName":"Name","fieldName":"container_name","visible":true},{"displayName":"Status","fieldName":"container_status","visible":true},{"displayName":"OOM Killed","fieldName":"_value","visible":true},{"displayName":"_start","fieldName":"_start","visible":true},{"displayName":"_stop","fieldName":"_stop","visible":true},{"displayName":"_time","fieldName":"_time","visible":true},{"displayName":"_field","fieldName":"_field","visible":true},{"displayName":"_measurement","fieldName":"_measurement","visible":true},{"displayName":"engine_host","fieldName":"engine_host","visible":true},{"displayName":"role","fieldName":"role","visible":true},{"displayName":"server_version","fieldName":"server_version","visible":true},{"displayName":"container_image","fieldName":"container_image","visible":true},{"displayName":"container_version","fieldName":"container_version","visible":true},{"displayName":"description","fieldName":"description","visible":true},{"displayName":"maintainer","fieldName":"maintainer","visible":true},{"displayName":"io.k8s.description","fieldName":"io.k8s.description","visible":true},{"displayName":"io.k8s.display-name","fieldName":"io.k8s.display-name","visible":true},{"displayName":"license","fieldName":"license","visible":true},{"displayName":"name","fieldName":"name","visible":true},{"displayName":"org.label-schema.build-date","fieldName":"org.label-schema.build-date","visible":true},{"displayName":"org.label-schema.license","fieldName":"org.label-schema.license","visible":true},{"displayName":"org.label-schema.name","fieldName":"org.label-schema.name","visible":true},{"displayName":"org.label-schema.schema-version","fieldName":"org.label-schema.schema-version","visible":true},{"displayName":"org.label-schema.url","fieldName":"org.label-schema.url","visible":true},{"displayName":"org.label-schema.vcs-ref","fieldName":"org.label-schema.vcs-ref","visible":true},{"displayName":"org.label-schema.vcs-url","fieldName":"org.label-schema.vcs-url","visible":true},{"displayName":"org.label-schema.vendor","fieldName":"org.label-schema.vendor","visible":true},{"displayName":"org.label-schema.version","fieldName":"org.label-schema.version","visible":true},{"displayName":"org.opencontainers.image.created","fieldName":"org.opencontainers.image.created","visible":true},{"displayName":"org.opencontainers.image.licenses","fieldName":"org.opencontainers.image.licenses","visible":true},{"displayName":"org.opencontainers.image.title","fieldName":"org.opencontainers.image.title","visible":true},{"displayName":"org.opencontainers.image.vendor","fieldName":"org.opencontainers.image.vendor","visible":true},{"displayName":"release","fieldName":"release","visible":true},{"displayName":"summary","fieldName":"summary","visible":true},{"displayName":"url","fieldName":"url","visible":true},{"displayName":"vendor","fieldName":"vendor","visible":true},{"displayName":"version","fieldName":"version","visible":true},{"displayName":"org.label-schema.usage","fieldName":"org.label-schema.usage","visible":true},{"displayName":"org.opencontainers.image.documentation","fieldName":"org.opencontainers.image.documentation","visible":true},{"displayName":"org.opencontainers.image.revision","fieldName":"org.opencontainers.image.revision","visible":true},{"displayName":"org.opencontainers.image.source","fieldName":"org.opencontainers.image.source","visible":true},{"displayName":"org.opencontainers.image.url","fieldName":"org.opencontainers.image.url","visible":true},{"displayName":"org.opencontainers.image.version","fieldName":"org.opencontainers.image.version","visible":true},{"displayName":"org.opencontainers.image.description","fieldName":"org.opencontainers.image.description","visible":true}],"height":4,"kind":"Table","name":"Most Recent Container Events","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"docker_container_status\")\n |> filter(fn: (r) => r[\"_field\"] == \"oomkilled\")\n |> filter(fn: (r) => r[\"container_status\"] != \"running\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"container_name\", \"host\"])\n |> last()\n |> group()\n |> keep(columns: [\"_value\", \"container_name\", \"host\", \"container_status\"])"}],"staticLegend":{},"tableOptions":{"sortBy":"container_name","verticalTimeAxis":true},"timeFormat":"YYYY-MM-DD HH:mm:ss","width":3,"xPos":9,"yPos":6},{"axes":[{"base":"10","name":"x","scale":"linear"},{"name":"y","scale":"linear","suffix":"%"}],"colorizeRows":true,"colors":[{"id":"TtgHQAXNep94KBgtu48C_","name":"Cthulhu","type":"scale","hex":"#FDC44F"},{"id":"_IuzkORho_8QXTE6vMllv","name":"Cthulhu","type":"scale","hex":"#007C76"},{"id":"bUszW_YI_9oColDbLNQ-d","name":"Cthulhu","type":"scale","hex":"#8983FF"}],"geom":"line","height":4,"hoverDimension":"auto","kind":"Xy","legendColorizeRows":true,"legendOpacity":1,"legendOrientationThreshold":100000000,"name":"Zeek Capture Loss","opacity":1,"orientationThreshold":100000000,"position":"overlaid","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")"},{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nroleFilter = (tables=<-) =>\n if v.Role != \"(All)\" then\n tables |> filter(fn: (r) => r[\"role\"] == v.Role)\n else\n tables\n\nfrom(bucket: \"telegraf/so_long_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"zeekcaptureloss\")\n |> filter(fn: (r) => r[\"_field\"] == \"loss\")\n |> hostFilter()\n |> roleFilter()\n |> group(columns: [\"_field\", \"host\", \"role\"])\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\n |> yield(name: \"Trend\")"}],"staticLegend":{"colorizeRows":true,"opacity":1,"orientationThreshold":100000000,"widthRatio":1},"width":3,"widthRatio":1,"xCol":"_time","xPos":9,"yCol":"_value","yPos":42},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"Stenographer Loss","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n\nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"stenodrop\")\n |> filter(fn: (r) => r[\"_field\"] == \"drop\")\n |> map(fn: (r) => ({r with _value: r._value * 100.0}))\n |> hostFilter()\n |> group(columns: [\"host\"])\n |> last()\n |> aggregateWindow(every: v.windowPeriod, fn: mean)\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":"%","width":1,"xPos":10},{"colors":[{"id":"base","name":"laser","type":"text","hex":"#00C9FF"}],"decimalPlaces":1,"height":2,"kind":"Single_Stat","name":"PCAP Retention","queries":[{"query":"hostFilter = (tables=<-) =>\n if v.Host != \"(All)\" then\n tables |> filter(fn: (r) => r[\"host\"] == v.Host)\n else\n tables\n \nfrom(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> hostFilter()\n |> map(fn: (r) => ({r with _value: r._value / (24.0 * 60.0 * 60.0)}))\n |> group(columns: [\"host\"])\n |> last()\n |> highestMax(n:1)"}],"staticLegend":{},"suffix":" days","width":1,"xPos":11}],"description":"Visualize the Security Onion grid performance metrics and alarm statuses.","name":"Security Onion Performance"}}] \ No newline at end of file From 59097070efc3391469418ff25beb77c04863e33e Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 28 May 2024 12:17:43 -0400 Subject: [PATCH 104/222] Revert "Remove unneeded jolokia aggregate metrics to reduce data ingested to influx" This reverts commit 1c1a1a1d3fc0138b3df81c19d61e80a9f4f62ec3. --- salt/telegraf/etc/telegraf.conf | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/salt/telegraf/etc/telegraf.conf b/salt/telegraf/etc/telegraf.conf index ecfb0730a0..1e8d9d3fee 100644 --- a/salt/telegraf/etc/telegraf.conf +++ b/salt/telegraf/etc/telegraf.conf @@ -247,25 +247,6 @@ [[inputs.jolokia2_agent]] name_prefix= "kafka_" urls = ["http://localhost:8778/jolokia"] - fieldexclude = [ - "*.EventType", - "*.FifteenMinuteRate", - "*.FiveMinuteRate", - "*.MeanRate", - "*.OneMinuteRate", - "*.RateUnit", - "*.LatencyUnit", - "*.50thPercentile", - "*.75thPercentile", - "*.95thPercentile", - "*.98thPercentile", - "*.99thPercentile", - "*.999thPercentile", - "*.Min", - "*.Mean", - "*.Max", - "*.StdDev" - ] [[inputs.jolokia2_agent.metric]] name = "topics" From 876d86048834c0dcbdf032f5f2cfa7fe61b52967 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 29 May 2024 16:40:15 -0400 Subject: [PATCH 105/222] elastic agent should be able to communicate over 9092 for sending logs to kafka brokers Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/firewall/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 6dd3fead3a..9d943ccab9 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -77,6 +77,7 @@ firewall: elastic_agent_data: tcp: - 5055 + - 9092 udp: [] elastic_agent_update: tcp: From d9ec556061bbef5fe3a6215d0707bc6746182427 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 29 May 2024 16:41:02 -0400 Subject: [PATCH 106/222] Update some annotations and defaults Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 7 ++++++- salt/kafka/soc_kafka.yaml | 10 ++++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 9a8c05c432..56ad9252fa 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -50,4 +50,9 @@ kafka: log_x_retention_x_hours: 168 log_x_segment_x_bytes: 1073741824 node_x_id: - process_x_roles: controller \ No newline at end of file + process_x_roles: controller + ssl_x_keystore_x_location: /etc/pki/kafka.p12 + ssl_x_keystore_x_type: PKCS12 + ssl_x_keystore_x_password: changeit + ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts + ssl_x_truststore_x_password: changeit \ No newline at end of file diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index ba673fa685..b1de1f2438 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -1,6 +1,6 @@ kafka: enabled: - description: Enable or disable Kafka. + description: Enable or disable Kafka. Recommended to have desired configuration staged prior to enabling Kafka. Join all receiver nodes to grid that will be converted to Kafka nodes, configure kafka_controllers with the hostnames of the nodes you want to act as controllers, and configure the default_replication_factor to the desired value for your redundancy needs. helpLink: kafka.html cluster_id: description: The ID of the Kafka cluster. @@ -13,7 +13,9 @@ kafka: sensitive: True helpLink: kafka.html kafka_controllers: - description: A list of Security Onion grid members that should act as KRaft controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. All other nodes will default to broker roles. + description: A list of Security Onion grid members that should act as controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. Keep total Kafka controllers to an odd number and ensure you do not assign ALL your Kafka nodes as controllers or this Kafka cluster will not start. + forcedType: "[]string" + multiline: True helpLink: kafka.html config: broker: @@ -27,7 +29,7 @@ kafka: forcedType: bool helpLink: kafka.html default_x_replication_x_factor: - description: The default replication factor for automatically created topics. + description: The default replication factor for automatically created topics. This value must be less than the amount of brokers in the cluster. Hosts specified in kafka_controllers should not be counted towards total broker count. title: default.replication.factor forcedType: int helpLink: kafka.html @@ -198,7 +200,7 @@ kafka: forcedType: int helpLink: kafka.html process_x_roles: - description: The role performed by KRaft controller node. + description: The role performed by controller node. title: process.roles readonly: True helpLink: kafka.html \ No newline at end of file From 386be4e746b08ebf4642ec82a2f7cecb84efe6e6 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 29 May 2024 16:48:39 -0400 Subject: [PATCH 107/222] WIP: Manage Kafka nodes pillar role value This way when kafka_controllers is updated the pillar value gets updated and any non-controllers get updated to revert to 'broker' only role. Needs more testing when a new controller joins in this manner Kafka errors due to cluster metadata being out of sync. One solution is to remove /nsm/kafka/data/__cluster_metadata-0/quorum-state and restart cluster. Alternative is working with Kafka cli tools to inform cluster of new voter, likely best option but requires a wrapper script of some sort to be created for updating cluster in-place. Easiest option is to have all receivers join grid and then configure Kafka with specific controllers via SOC UI prior to enabling Kafka. This way Kafka cluster comes up in the desired configuration with no need for immediately modifying cluster Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 19 ++++++------------- salt/kafka/nodes.map.jinja | 34 ++++++++++++++++++++++++++++++---- 2 files changed, 36 insertions(+), 17 deletions(-) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index 4e82eac422..e5b77db111 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -12,19 +12,12 @@ {# Create list of KRaft controllers #} {% set controllers = [] %} -{% if KAFKA_CONTROLLERS_PILLAR != none %} -{% for node in KAFKA_CONTROLLERS_PILLAR %} -{# Check that the user input for kafka_controllers pillar exists as a kafka:node value #} -{% if node in KAFKA_NODES_PILLAR %} -{% do controllers.append(KAFKA_NODES_PILLAR[node]['nodeid'] ~ '@' ~ node ~ ':9093') %} -{% endif %} -{% endfor %} -{% endif %} -{# Ensure in the event that the SOC controllers pillar has a single hostname and that hostname doesn't exist in kafka:nodes - that a controller is still set for the Kafka cluster. Defaulting to the grid manager #} -{% if controllers | length < 1 %} -{% do controllers.append(KAFKA_NODES_PILLAR[GLOBALS.manager]['nodeid'] ~ "@" ~ GLOBALS.manager ~ ":9093") %} -{% endif %} +{# Check for Kafka nodes with controller in process_x_roles #} +{% for node in KAFKA_NODES_PILLAR %} +{% if 'controller' in KAFKA_NODES_PILLAR[node].role %} +{% do controllers.append(KAFKA_NODES_PILLAR[node].nodeid ~ "@" ~ node ~ ":9093") %} +{% endif %} +{% endfor %} {% set kafka_controller_quorum_voters = ','.join(controllers) %} diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index 9b4979e92d..e629ff7836 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -8,6 +8,10 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% set process_x_roles = KAFKADEFAULTS.kafka.config.broker.process_x_roles %} +{# Set grid manager to default process_x_roles of broker,controller until overridden via SOC UI #} +{% if grains.role in ["so-standalone", "so-manager", "so-managersearch"] %} +{% set process_x_roles = 'broker,controller'%} +{% endif %} {% set current_kafkanodes = salt.saltutil.runner( 'mine.get', @@ -16,6 +20,7 @@ tgt_type='compound') %} {% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', default=None) %} +{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:kafka_controllers', default=None) %} {% set existing_ids = [] %} @@ -43,10 +48,6 @@ {% set NEW_KAFKANODES = {} %} {% for minionid, ip in current_kafkanodes.items() %} {% set hostname = minionid.split('_')[0] %} -{# Override the default process_x_roles for manager and set to 'broker,controller'. Changes from SOC UI will overwrite this #} -{% if hostname == GLOBALS.manager %} -{% set process_x_roles = 'broker,controller' %} -{% endif %} {% if STORED_KAFKANODES != none and hostname not in STORED_KAFKANODES.items() %} {% set new_id = available_ids.pop(0) %} {% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0], 'role': process_x_roles }}) %} @@ -67,3 +68,28 @@ {% do COMBINED_KAFKANODES.update({node: details}) %} {% endfor %} {% endif %} + +{# Update the process_x_roles value for any host in the kafka_controllers_pillar configured from SOC UI #} +{% set ns = namespace(has_controller=false) %} +{% if KAFKA_CONTROLLERS_PILLAR != none %} +{% for hostname in KAFKA_CONTROLLERS_PILLAR %} +{% if hostname in COMBINED_KAFKANODES %} +{% do COMBINED_KAFKANODES[hostname].update({'role': 'controller'}) %} +{% set ns.has_controller = true %} +{% endif %} +{% endfor %} +{% for hostname in COMBINED_KAFKANODES %} +{% if hostname not in KAFKA_CONTROLLERS_PILLAR %} +{% do COMBINED_KAFKANODES[hostname].update({'role': 'broker'}) %} +{% endif %} +{% endfor %} +{# If the kafka_controllers_pillar is NOT empty check that atleast one node contains the controller role. + otherwise default to GLOBALS.manager having broker,controller role #} +{% if not ns.has_controller %} +{% do COMBINED_KAFKANODES[GLOBALS.manager].update({'role': 'broker,controller'}) %} +{% endif %} +{# If kafka_controllers_pillar is empty, default to having grid manager as 'broker,controller' + so there is always atleast 1 controller in the cluster #} +{% else %} +{% do COMBINED_KAFKANODES[GLOBALS.manager].update({'role': 'broker,controller'}) %} +{% endif %} \ No newline at end of file From 62bdb2627aea9378f7c11ae9a72f04cbf2a14e96 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 29 May 2024 16:53:27 -0400 Subject: [PATCH 108/222] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index b3c5d8c275..d2587d8960 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.70 +2.4.80 From 949cea95f4428f259fbaac39f02b6f22d1b14ef5 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 29 May 2024 23:19:44 -0400 Subject: [PATCH 109/222] Update pillarWatch config for global.pipeline Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/salt/files/engines.conf | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index b12ba02efb..027521386e 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -39,9 +39,20 @@ engines: from: '*': to: - '*': + 'KAFKA': + - cmd.run: + cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate + 'KAFKA': + to: + 'REDIS': + - cmd.run: + cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False - cmd.run: - cmd: salt-call saltutil.kill_all_jobs + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs - cmd.run: - cmd: salt-call state.highstate + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate interval: 10 From 55c5ea5c4cf75f8c73e2762ce6382f0adbe9d120 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 30 May 2024 16:58:56 +0000 Subject: [PATCH 110/222] Add template for Suricata alerts --- salt/elasticsearch/defaults.yaml | 111 +++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index e54d58c3b6..6ecdc96a16 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -11088,6 +11088,117 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-suricata_x_alerts: + index_sorting: false + index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - suricata-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - common-settings + - common-dynamic-mappings + data_stream: {} + index_patterns: + - logs-suricata.alerts-* + priority: 500 + template: + mappings: + date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + settings: + index: + lifecycle: + name: so-suricata.alerts-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 60d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 1d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-syslog: index_sorting: false index_template: From e83135440198e8ed2bb0f79e420e6abd7d73e7be Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 30 May 2024 17:00:11 +0000 Subject: [PATCH 111/222] Add Suricata alerts setting for configuration --- salt/elasticsearch/soc_elasticsearch.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 000fd60b72..f56ed313e3 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -521,6 +521,7 @@ elasticsearch: so-endgame: *indexSettings so-idh: *indexSettings so-suricata: *indexSettings + so-suricata_x_alerts: *indexSettings so-import: *indexSettings so-kratos: *indexSettings so-kismet: *indexSettings From 48713a4e7b1eeabe0568390359ce9450e3ef0130 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 30 May 2024 13:00:34 -0400 Subject: [PATCH 112/222] revert version for soup test before 2.4.80 pipeline unpaused Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index d2587d8960..b3c5d8c275 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.80 +2.4.70 From 2c635bce6201859256af9891c4ae76411e8d38de Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 30 May 2024 17:02:31 +0000 Subject: [PATCH 113/222] Set index for Suricata alerts --- salt/elasticsearch/files/ingest/suricata.alert | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/files/ingest/suricata.alert b/salt/elasticsearch/files/ingest/suricata.alert index 87d5144ed5..68e0a5cb37 100644 --- a/salt/elasticsearch/files/ingest/suricata.alert +++ b/salt/elasticsearch/files/ingest/suricata.alert @@ -1,6 +1,7 @@ { "description" : "suricata.alert", "processors" : [ + { "set": { "field": "_index", "value": "logs-suricata.alerts-so" } }, { "set": { "field": "tags","value": "alert" }}, { "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } }, { "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } }, From 7702f05756cf83830013e41217a756aa87c5876d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 30 May 2024 15:00:32 -0400 Subject: [PATCH 114/222] upgrade salt 3006.8. soup for 2.4.80 --- salt/manager/tools/sbin/soup | 12 ++++++++++++ salt/salt/master.defaults.yaml | 2 +- salt/salt/minion.defaults.yaml | 2 +- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 525fce3f6b..30f40ee5a6 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -358,6 +358,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50 [[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60 [[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70 + [[ "$INSTALLEDVERSION" == 2.4.70 ]] && up_to_2.4.80 true } @@ -375,6 +376,7 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50 [[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60 [[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70 + [[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80 true } @@ -448,6 +450,11 @@ post_to_2.4.70() { POSTVERSION=2.4.70 } +post_to_2.4.80() { + echo "Nothing to apply" + POSTVERSION=2.4.80 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -595,6 +602,11 @@ up_to_2.4.70() { INSTALLEDVERSION=2.4.70 } +up_to_2.4.80() { + echo "Nothing to do for 2.4.80" + INSTALLEDVERSION=2.4.80 +} + add_detection_test_pillars() { if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then echo "Adding detection pillar values for automated testing" diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index 19677f70bd..24ba29d98f 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -1,4 +1,4 @@ # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: master: - version: 3006.6 + version: 3006.8 diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index 2e4ebc93e5..dddd6683bc 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -1,6 +1,6 @@ # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: minion: - version: 3006.6 + version: 3006.8 check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default service_start_delay: 30 # in seconds. From dbb99d03679f2593533e0cb806ba31b5bdbf91d2 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 30 May 2024 15:10:15 -0400 Subject: [PATCH 115/222] Remove bad config Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/nodes.map.jinja | 4 ---- 1 file changed, 4 deletions(-) diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index e629ff7836..fa33adda5b 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -8,10 +8,6 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% set process_x_roles = KAFKADEFAULTS.kafka.config.broker.process_x_roles %} -{# Set grid manager to default process_x_roles of broker,controller until overridden via SOC UI #} -{% if grains.role in ["so-standalone", "so-manager", "so-managersearch"] %} -{% set process_x_roles = 'broker,controller'%} -{% endif %} {% set current_kafkanodes = salt.saltutil.runner( 'mine.get', From 00b5a5cc0c9270c434ed0e55e1e5611497652e69 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 30 May 2024 15:13:16 -0400 Subject: [PATCH 116/222] Revert "revert version for soup test before 2.4.80 pipeline unpaused" This reverts commit 48713a4e7b1eeabe0568390359ce9450e3ef0130. --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index b3c5d8c275..d2587d8960 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.70 +2.4.80 From 85c269e69758402cc1976187ca346016b29eac2e Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Thu, 30 May 2024 15:59:03 -0600 Subject: [PATCH 117/222] Added TemplateDetections To Detection ClientParams The UI can now insert templates when you select a Detection language. These are those templates, annotated. --- salt/soc/defaults.yaml | 33 +++++++++++++++++++++++++++++++++ salt/soc/soc_soc.yaml | 13 ++++++++++++- 2 files changed, 45 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 9f5faf50bf..f5628f3c3e 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2253,3 +2253,36 @@ soc: severityTranslations: minor: low major: high + templateDetections: + suricata: | + alert tcp any any <> any any (msg:""; sid:[publicId];) + strelka: | + rule { + meta: + description = ""; + strings: + $x = \"string\"; + condition: + all of them; + } + elastalert: | + title: + id: [publicId] + status: + description: + references: + - + author: + date: + tags: + - + logsource: + product: + category: + detection: + selection: + condition: selection + falsepositives: + - + level: + diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 1f64eb0bc7..47d051e4e1 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -119,7 +119,7 @@ soc: advanced: True rulesRepos: default: &eerulesRepos - description: "Custom Git repositories to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update." + description: "Custom Git repositories to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update." global: True advanced: True forcedType: "[]{}" @@ -319,6 +319,17 @@ soc: cases: *appSettings dashboards: *appSettings detections: *appSettings + detection: + templateDetections: + suricata: + description: The template used when creating a new Suricata detection. [publicId] will be replaced with an unused Public Id. + multiline: True + strelka: + description: The template used when creating a new Strelka detection. + multiline: True + elastalert: + description: The template used when creating a new ElastAlert detection. [publicId] will be replaced with an unused Public Id. + multiline: True grid: maxUploadSize: description: The maximum number of bytes for an uploaded PCAP import file. From e3ea4776c70cf472c195c2bd50d895ba3cd4d5d4 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 31 May 2024 13:34:28 -0400 Subject: [PATCH 118/222] Update kafka nodes pillar before running highstate with pillarwatch engine. This allows configuring your Kafka controllers before cluster comes up for the first time Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/salt/files/engines.conf | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 027521386e..69d596ed06 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -44,6 +44,8 @@ engines: cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True - cmd.run: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + - cmd.run: + cmd: salt-call state.apply kafka.nodes - cmd.run: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate 'KAFKA': From f396247838bd3c3cf8f3f0c49d20b500a5f4551b Mon Sep 17 00:00:00 2001 From: Wes Date: Fri, 31 May 2024 17:46:19 +0000 Subject: [PATCH 119/222] Add index templates and lifecycle policies --- salt/elasticsearch/defaults.yaml | 72 ++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 6ecdc96a16..36d673d70b 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -170,6 +170,78 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-items: + index_sorting: false + index_template: + composed_of: + - so-items-mappings + index_patterns: + - .items-default-** + priority: 500 + template: + mappings: + date_detection: false + settings: + index: + lifecycle: + name: so-items-logs + rollover_alias: ".items-default" + routing: + allocation: + include: + _tier_preference: "data_content" + mapping: + total_fields: + limit: 10000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + hot: + actions: + rollover: + max_size: 50gb + min_age: 0ms + so-lists: + index_sorting: false + index_template: + composed_of: + - so-lists-mappings + index_patterns: + - .lists-default-** + priority: 500 + template: + mappings: + date_detection: false + settings: + index: + lifecycle: + name: so-lists-logs + rollover_alias: ".lists-default" + routing: + allocation: + include: + _tier_preference: "data_content" + mapping: + total_fields: + limit: 10000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + hot: + actions: + rollover: + max_size: 50gb + min_age: 0ms so-case: index_sorting: false index_template: From a8c231ad8c59a09de4c134b6068d3f38bebe90d5 Mon Sep 17 00:00:00 2001 From: Wes Date: Fri, 31 May 2024 17:47:01 +0000 Subject: [PATCH 120/222] Add component templates --- .../elastic-agent/so-items-mappings.json | 112 ++++++++++++++++++ .../elastic-agent/so-lists-mappings.json | 55 +++++++++ 2 files changed, 167 insertions(+) create mode 100644 salt/elasticsearch/templates/component/elastic-agent/so-items-mappings.json create mode 100644 salt/elasticsearch/templates/component/elastic-agent/so-lists-mappings.json diff --git a/salt/elasticsearch/templates/component/elastic-agent/so-items-mappings.json b/salt/elasticsearch/templates/component/elastic-agent/so-items-mappings.json new file mode 100644 index 0000000000..85e6c1984a --- /dev/null +++ b/salt/elasticsearch/templates/component/elastic-agent/so-items-mappings.json @@ -0,0 +1,112 @@ +{ + "template": { + "mappings": { + "dynamic": "strict", + "properties": { + "binary": { + "type": "binary" + }, + "boolean": { + "type": "boolean" + }, + "byte": { + "type": "byte" + }, + "created_at": { + "type": "date" + }, + "created_by": { + "type": "keyword" + }, + "date": { + "type": "date" + }, + "date_nanos": { + "type": "date_nanos" + }, + "date_range": { + "type": "date_range" + }, + "deserializer": { + "type": "keyword" + }, + "double": { + "type": "double" + }, + "double_range": { + "type": "double_range" + }, + "float": { + "type": "float" + }, + "float_range": { + "type": "float_range" + }, + "geo_point": { + "type": "geo_point" + }, + "geo_shape": { + "type": "geo_shape" + }, + "half_float": { + "type": "half_float" + }, + "integer": { + "type": "integer" + }, + "integer_range": { + "type": "integer_range" + }, + "ip": { + "type": "ip" + }, + "ip_range": { + "type": "ip_range" + }, + "keyword": { + "type": "keyword" + }, + "list_id": { + "type": "keyword" + }, + "long": { + "type": "long" + }, + "long_range": { + "type": "long_range" + }, + "meta": { + "type": "object", + "enabled": false + }, + "serializer": { + "type": "keyword" + }, + "shape": { + "type": "shape" + }, + "short": { + "type": "short" + }, + "text": { + "type": "text" + }, + "tie_breaker_id": { + "type": "keyword" + }, + "updated_at": { + "type": "date" + }, + "updated_by": { + "type": "keyword" + } + } + }, + "aliases": {} + }, + "version": 2, + "_meta": { + "managed": true, + "description": "default mappings for the .items index template installed by Kibana/Security" + } +} diff --git a/salt/elasticsearch/templates/component/elastic-agent/so-lists-mappings.json b/salt/elasticsearch/templates/component/elastic-agent/so-lists-mappings.json new file mode 100644 index 0000000000..b2b5fda23e --- /dev/null +++ b/salt/elasticsearch/templates/component/elastic-agent/so-lists-mappings.json @@ -0,0 +1,55 @@ +{ + "template": { + "mappings": { + "dynamic": "strict", + "properties": { + "created_at": { + "type": "date" + }, + "created_by": { + "type": "keyword" + }, + "description": { + "type": "keyword" + }, + "deserializer": { + "type": "keyword" + }, + "immutable": { + "type": "boolean" + }, + "meta": { + "type": "object", + "enabled": false + }, + "name": { + "type": "keyword" + }, + "serializer": { + "type": "keyword" + }, + "tie_breaker_id": { + "type": "keyword" + }, + "type": { + "type": "keyword" + }, + "updated_at": { + "type": "date" + }, + "updated_by": { + "type": "keyword" + }, + "version": { + "type": "keyword" + } + } + }, + "aliases": {} + }, + "version": 2, + "_meta": { + "managed": true, + "description": "default mappings for the .lists index template installed by Kibana/Security" + } +} From 1a832fa0a5192d326f8c6ff0073dd8e5382cef9b Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Fri, 31 May 2024 14:04:46 -0400 Subject: [PATCH 121/222] Move soup kafka needfuls to up_to_2.4.80 Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/manager/tools/sbin/soup | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 8cb8fc6f51..95ca60b959 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -600,9 +600,13 @@ up_to_2.4.70() { toggle_telemetry add_detection_test_pillars + INSTALLEDVERSION=2.4.70 +} + +up_to_2.4.80() { # Kafka configuration changes - # Global pipeline changes to REDIS or KAFKA + # Global pipeline changes to REDIS or KAFKA echo "Removing global.pipeline pillar configuration" sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls # Kafka pillars @@ -615,11 +619,6 @@ up_to_2.4.70() { kafkapass=$(get_random_value) echo ' kafka_pass: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - INSTALLEDVERSION=2.4.70 -} - -up_to_2.4.80() { - echo "Nothing to do for 2.4.80" INSTALLEDVERSION=2.4.80 } From 2e85a28c0247a7c2303f029d2777c6bc6b0d4d46 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Sun, 2 Jun 2024 18:25:59 -0400 Subject: [PATCH 122/222] Remove so-kafka-clusterid script, created during soup Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/nodes.sls | 9 +------ salt/manager/tools/sbin/so-kafka-clusterid | 29 ---------------------- 2 files changed, 1 insertion(+), 37 deletions(-) delete mode 100644 salt/manager/tools/sbin/so-kafka-clusterid diff --git a/salt/kafka/nodes.sls b/salt/kafka/nodes.sls index 7cafb10bc4..cae2a1d0f4 100644 --- a/salt/kafka/nodes.sls +++ b/salt/kafka/nodes.sls @@ -15,11 +15,4 @@ write_kafka_pillar_yaml: - source: salt://kafka/files/managed_node_pillar.jinja - template: jinja - context: - COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} - - -{% if kafka_cluster_id is none %} -generate_kafka_cluster_id: - cmd.run: - - name: /usr/sbin/so-kafka-clusterid -{% endif %} \ No newline at end of file + COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }} \ No newline at end of file diff --git a/salt/manager/tools/sbin/so-kafka-clusterid b/salt/manager/tools/sbin/so-kafka-clusterid deleted file mode 100644 index c4e449448c..0000000000 --- a/salt/manager/tools/sbin/so-kafka-clusterid +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -### THIS SCRIPT AND SALT STATE REFERENCES TO THIS SCRIPT TO BE REMOVED ONCE INITIAL TESTING IS DONE - THESE VALUES WILL GENERATED IN SETUP AND SOUP - - -local_salt_dir=/opt/so/saltstack/local - -if [[ -f /usr/sbin/so-common ]]; then - source /usr/sbin/so-common -else - source $(dirname $0)/../../../common/tools/sbin/so-common -fi - -if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafka_cluster_id=$(get_random_value 22) - echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls -fi - -if ! grep -q "^ kafka_pass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then - kafkapass=$(get_random_value) - echo ' kafka_pass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls -fi \ No newline at end of file From c88b731793045bd957b28e4d519cab0cebfc2f34 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 3 Jun 2024 15:27:08 -0400 Subject: [PATCH 123/222] revert to 3006.6 --- salt/salt/master.defaults.yaml | 2 +- salt/salt/minion.defaults.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index 24ba29d98f..19677f70bd 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -1,4 +1,4 @@ # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: master: - version: 3006.8 + version: 3006.6 diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index dddd6683bc..2e4ebc93e5 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -1,6 +1,6 @@ # version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched salt: minion: - version: 3006.8 + version: 3006.6 check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default service_start_delay: 30 # in seconds. From d9c58d9333f24f695d6bc4a663c50cfe48683134 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 4 Jun 2024 08:33:45 -0400 Subject: [PATCH 124/222] update receiver pillar access Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- pillar/top.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/pillar/top.sls b/pillar/top.sls index 61b812cc84..f7ec39957a 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -242,6 +242,7 @@ base: - kafka.nodes - kafka.soc_kafka - kafka.adv_kafka + - soc.license '*_import': - secrets From c0b2cf73883954d8e3feff579d7f19d2446c2120 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 4 Jun 2024 10:28:21 -0400 Subject: [PATCH 125/222] add the curlys --- salt/common/tools/sbin_jinja/so-tcpreplay | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin_jinja/so-tcpreplay b/salt/common/tools/sbin_jinja/so-tcpreplay index 6f3f02983c..a9551c0fab 100755 --- a/salt/common/tools/sbin_jinja/so-tcpreplay +++ b/salt/common/tools/sbin_jinja/so-tcpreplay @@ -57,8 +57,8 @@ if ! docker ps | grep -q so-tcpreplay; then fi if is_sensor_node; then - echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface $REPLAYIFACE..." - docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i $REPLAYIFACE -M${REPLAYSPEED} $@" + echo "Replaying PCAP(s) at ${REPLAYSPEED} Mbps on interface ${REPLAYIFACE}..." + docker exec so-tcpreplay /usr/bin/bash -c "/usr/local/bin/tcpreplay -i ${REPLAYIFACE} -M${REPLAYSPEED} $@" echo "Replay completed. Warnings shown above are typically expected." elif is_manager_node; then From fb1d4fdd3c1f58f4de6a100ec5f14d967aec5b92 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 4 Jun 2024 12:33:51 -0400 Subject: [PATCH 126/222] update license Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- .../so-elastic-fleet-outputs-update | 7 +++--- salt/kafka/disabled.sls | 8 +++++++ salt/kafka/elasticfleet.sls | 2 ++ salt/kafka/enabled.sls | 22 ++++++++++++++++++- salt/kafka/init.sls | 8 ++++++- 5 files changed, 42 insertions(+), 5 deletions(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 064d49d23c..b5d6e1bfeb 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -131,9 +131,10 @@ if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then # Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed printf "Checking if the correct output policy is set as default\n" OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON) - if [ "$OUTPUT_DEFAULT" = "false" ]; then + OUTPUT_DEFAULT_MONITORING=$(jq -r '.item.is_default_monitoring' <<< $RAW_JSON) + if [[ "$OUTPUT_DEFAULT" = "false" || "$OUTPUT_DEFAULT_MONITORING" = "false" ]]; then printf "Default output policy needs to be updated.\n" - {%- if GLOBALS.pipeline == "KAFKA" %} + {%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %} update_kafka_outputs {%- else %} update_logstash_outputs @@ -145,7 +146,7 @@ if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then else printf "\nHashes don't match - update needed.\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" - {%- if GLOBALS.pipeline == "KAFKA" %} + {%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %} update_kafka_outputs {%- else %} update_logstash_outputs diff --git a/salt/kafka/disabled.sls b/salt/kafka/disabled.sls index 6658f0c5e6..0027fbfb92 100644 --- a/salt/kafka/disabled.sls +++ b/salt/kafka/disabled.sls @@ -14,3 +14,11 @@ so-kafka_so-status.disabled: file.comment: - name: /opt/so/conf/so-status/so-status.conf - regex: ^so-kafka$ + +{% if grains.role in ['so-manager','so-managersearch','so-standalone'] %} +ensure_default_pipeline: + cmd.run: + - name: | + /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False; + /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/global/soc_global.sls global.pipeline REDIS +{% endif %} \ No newline at end of file diff --git a/salt/kafka/elasticfleet.sls b/salt/kafka/elasticfleet.sls index a91df765b3..ae88998217 100644 --- a/salt/kafka/elasticfleet.sls +++ b/salt/kafka/elasticfleet.sls @@ -4,6 +4,8 @@ # Elastic License 2.0. {% from 'vars/globals.map.jinja' import GLOBALS %} +include: + - elasticfleet.enabled {# Create Kafka output policy if it doesn't exist #} update_kafka_output_policy_script: diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index 833cc7f3c9..e90a314d2e 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -2,12 +2,19 @@ # or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +# +# Note: Per the Elastic License 2.0, the second limitation states: +# +# "You may not move, change, disable, or circumvent the license key functionality +# in the software, and you may not remove or obscure any functionality in the +# software that is protected by the license key." {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} -{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %} +{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %} +{% if 'gmd' in salt['pillar.get']('features', []) %} include: - elasticsearch.ca @@ -59,6 +66,19 @@ delete_so-kafka_so-status.disabled: - name: /opt/so/conf/so-status/so-status.conf - regex: ^so-kafka$ +{% else %} + +{{sls}}_no_license_detected: + test.fail_without_changes: + - name: {{sls}}_no_license_detected + - comment: + - "Kafka for Guaranteed Message Delivery is a feature supported only for customers with a valid license. + Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com + for more information about purchasing a license to enable this feature." +include: + - kafka.disabled +{% endif %} + {% else %} {{sls}}_state_not_allowed: diff --git a/salt/kafka/init.sls b/salt/kafka/init.sls index 67b66c45dd..49707033ef 100644 --- a/salt/kafka/init.sls +++ b/salt/kafka/init.sls @@ -1,7 +1,13 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +# +# Note: Per the Elastic License 2.0, the second limitation states: +# +# "You may not move, change, disable, or circumvent the license key functionality +# in the software, and you may not remove or obscure any functionality in the +# software that is protected by the license key." {% from 'kafka/map.jinja' import KAFKAMERGED %} {% from 'vars/globals.map.jinja' import GLOBALS %} From 3b0339a9b3196491dac1f9a95ed053b056ca1eb1 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 4 Jun 2024 14:27:52 -0400 Subject: [PATCH 127/222] create kafka.id from kafka {partition}-{offset}-{timestamp} for tracking event Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index c3e70ec2ce..a17b3a17a7 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -84,6 +84,7 @@ { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, + { "set": { "field": "kafka.id", "value": "{{metadata.kafka.partition}}-{{metadata.kafka.offset}}-{{metadata.kafka.timestamp}}", "ignore_failure": true } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } } ], "on_failure": [ From a2467d0418532cef20e9c13724b13ee1e4e8618f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 5 Jun 2024 08:24:57 -0400 Subject: [PATCH 128/222] move so-tcpreplay to sensor state --- salt/sensor/init.sls | 19 ++++++++++++++++++- .../tools/sbin_jinja/so-tcpreplay | 0 2 files changed, 18 insertions(+), 1 deletion(-) rename salt/{common => sensor}/tools/sbin_jinja/so-tcpreplay (100%) diff --git a/salt/sensor/init.sls b/salt/sensor/init.sls index 53cd808c6c..ca1cf13c29 100644 --- a/salt/sensor/init.sls +++ b/salt/sensor/init.sls @@ -9,4 +9,21 @@ execute_checksum: cmd.run: - name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable - onchanges: - - file: offload_script \ No newline at end of file + - file: offload_script + +sensor_sbin: + file.recurse: + - name: /usr/sbin + - source: salt://sensor/tools/sbin + - user: 939 + - group: 939 + - file_mode: 755 + +sensor_sbin_jinja: + file.recurse: + - name: /usr/sbin + - source: salt://sensor/tools/sbin_jinja + - user: 939 + - group: 939 + - file_mode: 755 + - template: jinja diff --git a/salt/common/tools/sbin_jinja/so-tcpreplay b/salt/sensor/tools/sbin_jinja/so-tcpreplay similarity index 100% rename from salt/common/tools/sbin_jinja/so-tcpreplay rename to salt/sensor/tools/sbin_jinja/so-tcpreplay From ff5773c8379d140cb6a239e89f4dd48672b15f7e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 5 Jun 2024 08:56:32 -0400 Subject: [PATCH 129/222] move so-tcpreplay back to common. return empty string if no sensor.interface pillar --- .../tools/sbin_jinja/so-tcpreplay | 2 +- salt/sensor/init.sls | 18 +----------------- 2 files changed, 2 insertions(+), 18 deletions(-) rename salt/{sensor => common}/tools/sbin_jinja/so-tcpreplay (96%) diff --git a/salt/sensor/tools/sbin_jinja/so-tcpreplay b/salt/common/tools/sbin_jinja/so-tcpreplay similarity index 96% rename from salt/sensor/tools/sbin_jinja/so-tcpreplay rename to salt/common/tools/sbin_jinja/so-tcpreplay index a9551c0fab..969ca699f7 100755 --- a/salt/sensor/tools/sbin_jinja/so-tcpreplay +++ b/salt/common/tools/sbin_jinja/so-tcpreplay @@ -10,7 +10,7 @@ . /usr/sbin/so-common . /usr/sbin/so-image-common -REPLAYIFACE=${REPLAYIFACE:-"{{pillar.sensor.interface}}"} +REPLAYIFACE=${REPLAYIFACE:-"{{salt['pillar.get']('sensor:interface', '')}}"} REPLAYSPEED=${REPLAYSPEED:-10} mkdir -p /opt/so/samples diff --git a/salt/sensor/init.sls b/salt/sensor/init.sls index ca1cf13c29..c9c6a6db5a 100644 --- a/salt/sensor/init.sls +++ b/salt/sensor/init.sls @@ -10,20 +10,4 @@ execute_checksum: - name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable - onchanges: - file: offload_script - -sensor_sbin: - file.recurse: - - name: /usr/sbin - - source: salt://sensor/tools/sbin - - user: 939 - - group: 939 - - file_mode: 755 - -sensor_sbin_jinja: - file.recurse: - - name: /usr/sbin - - source: salt://sensor/tools/sbin_jinja - - user: 939 - - group: 939 - - file_mode: 755 - - template: jinja + \ No newline at end of file From f6a8a21f94715786f6b645b6342de76a836d498a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 5 Jun 2024 08:58:46 -0400 Subject: [PATCH 130/222] remove space --- salt/sensor/init.sls | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/sensor/init.sls b/salt/sensor/init.sls index c9c6a6db5a..730a7c7ad0 100644 --- a/salt/sensor/init.sls +++ b/salt/sensor/init.sls @@ -10,4 +10,3 @@ execute_checksum: - name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable - onchanges: - file: offload_script - \ No newline at end of file From c4723263a485a5025d7793d088e00dc566d34bd0 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 6 Jun 2024 08:59:17 -0400 Subject: [PATCH 131/222] Remove unused kafka reactor Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/reactor/kafka.sls | 16 ---------------- 1 file changed, 16 deletions(-) delete mode 100644 salt/reactor/kafka.sls diff --git a/salt/reactor/kafka.sls b/salt/reactor/kafka.sls deleted file mode 100644 index 879fb54310..0000000000 --- a/salt/reactor/kafka.sls +++ /dev/null @@ -1,16 +0,0 @@ -{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -https://securityonion.net/license; you may not use this file except in compliance with the -Elastic License 2.0. #} - -{% set minionid = data['id'].split('_')[0] %} -{% set role = data['data']['process_x_roles'] %} - -{# Run so-yaml to replace kafka.node..role with the value from kafka/controllers.sls #} - -update_global_kafka_pillar: - local.cmd.run: - - tgt: 'G@role:so-manager or G@role:so-managersearch or G@role:so-standalone' - - tgt_type: compound - - arg: - - '/usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/nodes.sls kafka.nodes.{{ minionid }}.role {{ role }}' \ No newline at end of file From ccd6b3914cda395999a0fc3819b9ed0cd0ef569d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 6 Jun 2024 10:33:55 -0400 Subject: [PATCH 132/222] add final msg queue for soup. --- salt/manager/tools/sbin/soup | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 02c01920d2..258c09ed64 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -19,6 +19,9 @@ SOUP_LOG=/root/soup.log WHATWOULDYOUSAYYAHDOHERE=soup whiptail_title='Security Onion UPdater' NOTIFYCUSTOMELASTICCONFIG=false +# used to display messages to the user at the end of soup +declare -a FINAL_MESSAGE_QUEUE=() + check_err() { local exit_code=$1 @@ -344,6 +347,22 @@ masterunlock() { mv -v $BACKUPTOPFILE $TOPFILE } +phases_pillar_2_4_80() { + echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists" + + #so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases + #if so-yaml removed stuff add this message to the FINAL_MESSAGE_QUEUE + read -r -d '' msg << EOM + Found elasticsearch.index_settings.global_overrides.index_template.phases set to: + so-yaml removed stuff here + A backup of all pillars was saved to /nsm/backup/ + Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases + If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases" + EOM + + FINAL_MESSAGE_QUEUE+=("$msg") +} + preupgrade_changes() { # This function is to add any new pillar items if needed. echo "Checking to see if changes are needed." @@ -603,7 +622,7 @@ up_to_2.4.70() { } up_to_2.4.80() { - echo "Nothing to do for 2.4.80" + phases_pillar_2_4_80 INSTALLEDVERSION=2.4.80 } @@ -1267,6 +1286,14 @@ EOF fi +# check if the FINAL_MESSAGE_QUEUE is not empty +if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then + for m in "${FINAL_MESSAGE_QUEUE[@]}"; do + echo "$m" + echo + done +fi + echo "### soup has been served at $(date) ###" } From 6920b77b4a3425fd3eb8f6a5316d702d5f6cbf1f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 6 Jun 2024 11:00:43 -0400 Subject: [PATCH 133/222] fix msg --- salt/manager/tools/sbin/soup | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 258c09ed64..c510e832bd 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -352,13 +352,13 @@ phases_pillar_2_4_80() { #so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases #if so-yaml removed stuff add this message to the FINAL_MESSAGE_QUEUE - read -r -d '' msg << EOM + read -r -d '' msg << EOF Found elasticsearch.index_settings.global_overrides.index_template.phases set to: so-yaml removed stuff here A backup of all pillars was saved to /nsm/backup/ Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases - If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases" - EOM + If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases +EOF FINAL_MESSAGE_QUEUE+=("$msg") } From 5600fed9c4854f06e98c9aed31d0ce0550d13936 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 6 Jun 2024 11:56:07 -0400 Subject: [PATCH 134/222] add ability to retrieve yaml values via so-yaml.py; improve so-minion id matching --- pyci.sh | 14 +-- salt/manager/tools/sbin/so-minion | 4 +- salt/manager/tools/sbin/so-yaml.py | 74 +++++++++++----- salt/manager/tools/sbin/so-yaml_test.py | 111 +++++++++++++++++++----- 4 files changed, 149 insertions(+), 54 deletions(-) diff --git a/pyci.sh b/pyci.sh index e852870630..8cbee5e758 100755 --- a/pyci.sh +++ b/pyci.sh @@ -15,12 +15,16 @@ TARGET_DIR=${1:-.} PATH=$PATH:/usr/local/bin -if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then - echo "Missing dependencies. Consider running the following command:" - echo " python -m pip install flake8 pytest pytest-cov" +if [ ! -d .venv ]; then + python -m venv .venv +fi + +source .venv/bin/activate + +if ! pip install flake8 pytest pytest-cov pyyaml; then + echo "Unable to install dependencies." exit 1 fi -pip install pytest pytest-cov flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini" -python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR" \ No newline at end of file +python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR" diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 8b563ef1d4..da1a6d2a24 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -112,8 +112,8 @@ function testMinion() { result=$? # If this so-minion script is not running on the given minion ID, run so-test remotely on the sensor as well - local_id=$(lookup_grain id) - if [[ ! "$local_id" =~ "${MINION_ID}_" ]]; then + local_id=$(lookup_grain id) + if [[ ! "$local_id" =~ "${MINION_ID}_" && "$local_id" != "${MINION_ID}" ]]; then salt "$MINION_ID" cmd.run 'so-test' result=$? fi diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py index 5427a2e483..cddc827b51 100755 --- a/salt/manager/tools/sbin/so-yaml.py +++ b/salt/manager/tools/sbin/so-yaml.py @@ -14,19 +14,20 @@ def showUsage(args): - print('Usage: {} [ARGS...]'.format(sys.argv[0])) - print(' General commands:') - print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.') - print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.') - print(' remove - Removes a yaml key, if it exists. Requires KEY arg.') - print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.') - print(' help - Prints this usage information.') - print('') - print(' Where:') - print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml') - print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2') - print(' VALUE - Value to set for a given key') - print(' LISTITEM - Item to append to a given key\'s list value') + print('Usage: {} [ARGS...]'.format(sys.argv[0]), file=sys.stderr) + print(' General commands:', file=sys.stderr) + print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr) + print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.', file=sys.stderr) + print(' get - Displays (to stdout) the value stored in the given key. Requires KEY arg.', file=sys.stderr) + print(' remove - Removes a yaml key, if it exists. Requires KEY arg.', file=sys.stderr) + print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.', file=sys.stderr) + print(' help - Prints this usage information.', file=sys.stderr) + print('', file=sys.stderr) + print(' Where:', file=sys.stderr) + print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml', file=sys.stderr) + print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2', file=sys.stderr) + print(' VALUE - Value to set for a given key', file=sys.stderr) + print(' LISTITEM - Item to append to a given key\'s list value', file=sys.stderr) sys.exit(1) @@ -38,7 +39,7 @@ def loadYaml(filename): def writeYaml(filename, content): file = open(filename, "w") - return yaml.dump(content, file) + return yaml.safe_dump(content, file) def appendItem(content, key, listItem): @@ -49,15 +50,15 @@ def appendItem(content, key, listItem): try: content[key].append(listItem) except AttributeError: - print("The existing value for the given key is not a list. No action was taken on the file.") + print("The existing value for the given key is not a list. No action was taken on the file.", file=sys.stderr) return 1 except KeyError: - print("The key provided does not exist. No action was taken on the file.") + print("The key provided does not exist. No action was taken on the file.", file=sys.stderr) return 1 def convertType(value): - if len(value) > 0 and (not value.startswith("0") or len(value) == 1): + if isinstance(value, str) and len(value) > 0 and (not value.startswith("0") or len(value) == 1): if "." in value: try: value = float(value) @@ -83,7 +84,7 @@ def append(args): if len(args) != 3: print('Missing filename, key arg, or list item to append', file=sys.stderr) showUsage(None) - return + return 1 filename = args[0] key = args[1] @@ -112,7 +113,7 @@ def add(args): if len(args) != 3: print('Missing filename, key arg, and/or value', file=sys.stderr) showUsage(None) - return + return 1 filename = args[0] key = args[1] @@ -137,7 +138,7 @@ def remove(args): if len(args) != 2: print('Missing filename or key arg', file=sys.stderr) showUsage(None) - return + return 1 filename = args[0] key = args[1] @@ -153,7 +154,7 @@ def replace(args): if len(args) != 3: print('Missing filename, key arg, and/or value', file=sys.stderr) showUsage(None) - return + return 1 filename = args[0] key = args[1] @@ -167,6 +168,32 @@ def replace(args): return 0 +def getKeyValue(content, key): + pieces = key.split(".", 1) + if len(pieces) > 1: + return getKeyValue(content[pieces[0]], pieces[1]) + return content.get(key, None) + + +def get(args): + if len(args) != 2: + print('Missing filename or key arg', file=sys.stderr) + showUsage(None) + return 1 + + filename = args[0] + key = args[1] + + content = loadYaml(filename) + output = getKeyValue(content, key) + if output is None: + print("Not found", file=sys.stderr) + return 2 + + print(yaml.safe_dump(output)) + return 0 + + def main(): args = sys.argv[1:] @@ -178,6 +205,7 @@ def main(): "help": showUsage, "add": add, "append": append, + "get": get, "remove": remove, "replace": replace, } @@ -195,11 +223,11 @@ def main(): break except Exception: if lockAttempts == 1: - print("Waiting for lock file to be released from another process...") + print("Waiting for lock file to be released from another process...", file=sys.stderr) time.sleep(2) if lockAttempts == maxAttempts: - print("Lock file (" + lockFile + ") could not be created; proceeding without lock.") + print("Lock file (" + lockFile + ") could not be created; proceeding without lock.", file=sys.stderr) cmd = commands.get(args[0], showUsage) code = cmd(args[1:]) diff --git a/salt/manager/tools/sbin/so-yaml_test.py b/salt/manager/tools/sbin/so-yaml_test.py index 7effabac92..ca9839e02f 100644 --- a/salt/manager/tools/sbin/so-yaml_test.py +++ b/salt/manager/tools/sbin/so-yaml_test.py @@ -15,40 +15,40 @@ class TestRemove(unittest.TestCase): def test_main_missing_input(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd"] soyaml.main() sysmock.assert_called_once_with(1) - self.assertIn(mock_stdout.getvalue(), "Usage:") + self.assertIn("Usage:", mock_stderr.getvalue()) def test_main_help_locked(self): filename = "/tmp/so-yaml.lock" file = open(filename, "w") file.write = "fake lock file" with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: with patch('time.sleep', new=MagicMock()) as mock_sleep: sys.argv = ["cmd", "help"] soyaml.main() sysmock.assert_called() mock_sleep.assert_called_with(2) - self.assertIn(mock_stdout.getvalue(), "Usage:") + self.assertIn("Usage:", mock_stderr.getvalue()) def test_main_help(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "help"] soyaml.main() sysmock.assert_called() - self.assertIn(mock_stdout.getvalue(), "Usage:") + self.assertIn("Usage:", mock_stderr.getvalue()) def test_remove_missing_arg(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "help"] soyaml.remove(["file"]) sysmock.assert_called() - self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") + self.assertIn("Missing filename or key arg\n", mock_stderr.getvalue()) def test_remove(self): filename = "/tmp/so-yaml_test-remove.yaml" @@ -97,7 +97,7 @@ def test_remove_nested_deep(self): def test_remove_missing_args(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: filename = "/tmp/so-yaml_test-remove.yaml" file = open(filename, "w") file.write("{key1: { child1: 123, child2: abc }, key2: false}") @@ -112,15 +112,15 @@ def test_remove_missing_args(self): expected = "{key1: { child1: 123, child2: abc }, key2: false}" self.assertEqual(actual, expected) sysmock.assert_called_once_with(1) - self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") + self.assertIn("Missing filename or key arg\n", mock_stderr.getvalue()) def test_append_missing_arg(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "help"] soyaml.append(["file", "key"]) sysmock.assert_called() - self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, or list item to append\n") + self.assertIn("Missing filename, key arg, or list item to append\n", mock_stderr.getvalue()) def test_append(self): filename = "/tmp/so-yaml_test-remove.yaml" @@ -173,11 +173,11 @@ def test_append_key_noexist(self): file.close() with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "append", filename, "key4", "h"] soyaml.main() sysmock.assert_called() - self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n") + self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue()) def test_append_key_noexist_deep(self): filename = "/tmp/so-yaml_test-append.yaml" @@ -186,11 +186,11 @@ def test_append_key_noexist_deep(self): file.close() with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "append", filename, "key1.child2.deep3", "h"] soyaml.main() sysmock.assert_called() - self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n") + self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue()) def test_append_key_nonlist(self): filename = "/tmp/so-yaml_test-append.yaml" @@ -199,11 +199,11 @@ def test_append_key_nonlist(self): file.close() with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "append", filename, "key1", "h"] soyaml.main() sysmock.assert_called() - self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n") + self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue()) def test_append_key_nonlist_deep(self): filename = "/tmp/so-yaml_test-append.yaml" @@ -212,11 +212,11 @@ def test_append_key_nonlist_deep(self): file.close() with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stdout', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "append", filename, "key1.child2.deep1", "h"] soyaml.main() sysmock.assert_called() - self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n") + self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue()) def test_add_key(self): content = {} @@ -244,11 +244,11 @@ def test_add_key(self): def test_add_missing_arg(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "help"] soyaml.add(["file", "key"]) sysmock.assert_called() - self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n") + self.assertIn("Missing filename, key arg, and/or value\n", mock_stderr.getvalue()) def test_add(self): filename = "/tmp/so-yaml_test-add.yaml" @@ -296,11 +296,11 @@ def test_add_nested_deep(self): def test_replace_missing_arg(self): with patch('sys.exit', new=MagicMock()) as sysmock: - with patch('sys.stderr', new=StringIO()) as mock_stdout: + with patch('sys.stderr', new=StringIO()) as mock_stderr: sys.argv = ["cmd", "help"] soyaml.replace(["file", "key"]) sysmock.assert_called() - self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n") + self.assertIn("Missing filename, key arg, and/or value\n", mock_stderr.getvalue()) def test_replace(self): filename = "/tmp/so-yaml_test-add.yaml" @@ -360,3 +360,66 @@ def test_convert(self): self.assertEqual(soyaml.convertType("false"), False) self.assertEqual(soyaml.convertType("FALSE"), False) self.assertEqual(soyaml.convertType(""), "") + + def test_get_int(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key1.child2.deep1"]) + self.assertEqual(result, 0) + self.assertIn("45\n...", mock_stdout.getvalue()) + + def test_get_str(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: \"hello\" } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key1.child2.deep1"]) + self.assertEqual(result, 0) + self.assertIn("hello\n...", mock_stdout.getvalue()) + + def test_get_list(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: \"hello\" } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key3"]) + self.assertEqual(result, 0) + self.assertIn("- e\n- f\n- g\n", mock_stdout.getvalue()) + + def test_get_dict(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: \"hello\" } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key1"]) + self.assertEqual(result, 0) + self.assertIn("child1: 123\nchild2:\n deep1: hello\n", mock_stdout.getvalue()) + + def test_get_missing(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key1.child2.deep3"]) + self.assertEqual(result, 2) + self.assertEqual("", mock_stdout.getvalue()) + + def test_get_usage(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stderr: + result = soyaml.get([]) + self.assertEqual(result, 1) + self.assertIn("Missing filename or key arg", mock_stderr.getvalue()) + sysmock.assert_called_once_with(1) From a39c88c7b4c678aad35edd9c2c3e1fcff76a1dc4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 6 Jun 2024 12:56:24 -0400 Subject: [PATCH 135/222] add set to troubleshoot failure --- salt/manager/tools/sbin/soup | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index c510e832bd..c09db0626e 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -350,6 +350,7 @@ masterunlock() { phases_pillar_2_4_80() { echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists" + set +e #so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases #if so-yaml removed stuff add this message to the FINAL_MESSAGE_QUEUE read -r -d '' msg << EOF @@ -361,6 +362,7 @@ phases_pillar_2_4_80() { EOF FINAL_MESSAGE_QUEUE+=("$msg") + set -e } preupgrade_changes() { From e85c3e5b27b026238d7ebf9bb39f23f2aa6fc97a Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Wed, 5 Jun 2024 14:45:06 -0600 Subject: [PATCH 136/222] SOC Proxy Setting The so_proxy value we build during install is now copied to SOC's config. --- salt/soc/defaults.yaml | 1 + salt/soc/merged.map.jinja | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index f5628f3c3e..65fb450d9f 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1286,6 +1286,7 @@ soc: maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads + proxy: '' modules: cases: soc filedatastore: diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index f2c88fde98..4ee0eea1ec 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -1,5 +1,5 @@ {# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one - or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at https://securityonion.net/license; you may not use this file except in compliance with the Elastic License 2.0. #} @@ -11,6 +11,9 @@ {% set SOCMERGED = salt['pillar.get']('soc', SOCDEFAULTS, merge=true) %} +{% set MANAGER_PROXY = salt['pillar.get']('manager:proxy', '') %} +{% do SOCMERGED.config.server.update({'proxy': MANAGER_PROXY}) %} + {# if SOCMERGED.config.server.modules.cases == httpcase details come from the soc pillar #} {% if SOCMERGED.config.server.modules.cases != 'soc' %} {% do SOCMERGED.config.server.modules.elastic.update({'casesEnabled': false}) %} From 42818a9950bb7832e08099aa243fcafd71c8f75f Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Thu, 6 Jun 2024 13:28:07 -0600 Subject: [PATCH 137/222] Remove proxy from SOC defaults --- salt/soc/defaults.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 65fb450d9f..f5628f3c3e 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1286,7 +1286,6 @@ soc: maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads - proxy: '' modules: cases: soc filedatastore: From f37f5ba97b4fa1b79de7d1325f639db3d7f94fc5 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 6 Jun 2024 15:57:58 -0400 Subject: [PATCH 138/222] Update soc_suricata.yaml --- salt/suricata/soc_suricata.yaml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/salt/suricata/soc_suricata.yaml b/salt/suricata/soc_suricata.yaml index e157ff852b..1ecabacd85 100644 --- a/salt/suricata/soc_suricata.yaml +++ b/salt/suricata/soc_suricata.yaml @@ -150,13 +150,16 @@ suricata: helpLink: suricata.html vars: address-groups: - HOME_NET: &suriaddressgroup + HOME_NET: description: Assign a list of hosts, or networks, using CIDR notation, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable. regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$ regexFailureMessage: You must enter a valid IP address or CIDR. helpLink: suricata.html duplicates: True - EXTERNAL_NET: *suriaddressgroup + EXTERNAL_NET: &suriaddressgroup + description: Assign a list of hosts, or networks, or other customization, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable. + helpLink: suricata.html + duplicates: True HTTP_SERVERS: *suriaddressgroup SMTP_SERVERS: *suriaddressgroup SQL_SERVERS: *suriaddressgroup From d3b81babec949fc9631de3be24768ec1971389ea Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 6 Jun 2024 16:15:21 -0400 Subject: [PATCH 139/222] check for phases with so-yaml, remove if exists --- salt/manager/tools/sbin/soup | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index c09db0626e..1850c2b9bf 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -349,20 +349,24 @@ masterunlock() { phases_pillar_2_4_80() { echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists" - - set +e - #so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases - #if so-yaml removed stuff add this message to the FINAL_MESSAGE_QUEUE - read -r -d '' msg << EOF - Found elasticsearch.index_settings.global_overrides.index_template.phases set to: - so-yaml removed stuff here - A backup of all pillars was saved to /nsm/backup/ - Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases - If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases + PHASES=$(so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases) + case $? in + 0) + so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases + set +e + read -r -d '' msg << EOF + Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: + ${PHASES} + Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases + If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases + A backup of all pillars was saved to /nsm/backup/ EOF - - FINAL_MESSAGE_QUEUE+=("$msg") - set -e + FINAL_MESSAGE_QUEUE+=("$msg") + set -e + ;; + 2) echo "Pillar elasticsearch.index_settings.global_overrides.index_template.phases does not exist. No action taken." ;; + *) echo "so-yaml.py returned something other than 0 or 2 exit code" ;; # we shouldn't see this + esac } preupgrade_changes() { From d39c8fae54abfb4625a0de35de2d4ee3b0d7ac83 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Jun 2024 09:01:16 -0400 Subject: [PATCH 140/222] format output --- salt/manager/tools/sbin/soup | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 1850c2b9bf..81a7545d78 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -354,12 +354,13 @@ phases_pillar_2_4_80() { 0) so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases set +e - read -r -d '' msg << EOF - Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: - ${PHASES} - Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases - If you want to set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases - A backup of all pillars was saved to /nsm/backup/ + read -r -d '' msg <<- EOF + Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: + ${PHASES} + + Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases + To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases + A backup of all pillar files was saved to /nsm/backup/ EOF FINAL_MESSAGE_QUEUE+=("$msg") set -e From f5cc35509b48bbb944b4922a94105193c89e545e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Jun 2024 11:03:26 -0400 Subject: [PATCH 141/222] fix output alignment --- salt/manager/tools/sbin/soup | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 81a7545d78..0ab8d9d46e 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -355,12 +355,12 @@ phases_pillar_2_4_80() { so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases set +e read -r -d '' msg <<- EOF - Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: - ${PHASES} +Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: +${PHASES} - Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases - To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases - A backup of all pillar files was saved to /nsm/backup/ +Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases +To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases +A backup of all pillar files was saved to /nsm/backup/ EOF FINAL_MESSAGE_QUEUE+=("$msg") set -e From fa063722e102cc07da2f076f64375f061bfea2ad Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Thu, 6 Jun 2024 16:36:09 -0600 Subject: [PATCH 142/222] RootCA and InsecureSkipVerify New empty settings and their annotations. --- salt/soc/defaults.yaml | 2 ++ salt/soc/soc_soc.yaml | 8 ++++++++ 2 files changed, 10 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index f5628f3c3e..03476c3f58 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1286,6 +1286,8 @@ soc: maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads + rootCA: '' + insecureSkipVerify: false modules: cases: soc filedatastore: diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 47d051e4e1..ec633f7731 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -81,6 +81,14 @@ soc: description: Maximum number of packets to show in the PCAP viewer. Larger values can cause more resource utilization on both the SOC server and the browser. global: True advanced: True + rootCA: + description: Root Certificate Authority (CA) public key in PEM format that SOC will use to validate outgoing requests. This is useful when the SOC server is behind a reverse proxy that performs SSL termination. + multiline: True + advanced: True + insecureSkipVerify: + description: Disable TLS verification for outgoing requests. This will make your installation less secure to MITM attacks. Recommended only for debugging purposes. + advanced: True + forcedType: bool modules: elastalertengine: additionalAlerters: From 5d3fd3d389b7ed5b751d0229153c05461966f472 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 7 Jun 2024 12:47:09 -0600 Subject: [PATCH 143/222] AdditionalCA and InsecureSkipVerify New fields have been added to manager and then duplicated over to SOC's config in the same vein as how proxy was updated earlier this week. AdditionalCA holds the PEM formatted public keys that should be trusted when making requests. It has been implemented for both Sigma's zip downloads and Sigma and Suricata's repository clones and pulls. InsecureSkipVerify has been added to help our users troubleshoot their configuration. Setting it to true will not verify the cert on outgoing requests. Self signed, missing, or invalid certs will not throw an error. --- salt/manager/defaults.yaml | 4 +++- salt/manager/map.jinja | 7 +++++++ salt/manager/soc_manager.yaml | 18 +++++++++++++++--- salt/soc/merged.map.jinja | 6 ++++-- salt/soc/soc_soc.yaml | 8 -------- 5 files changed, 29 insertions(+), 14 deletions(-) create mode 100644 salt/manager/map.jinja diff --git a/salt/manager/defaults.yaml b/salt/manager/defaults.yaml index 8bb34690eb..708900af6a 100644 --- a/salt/manager/defaults.yaml +++ b/salt/manager/defaults.yaml @@ -2,4 +2,6 @@ manager: reposync: enabled: True hour: 3 - minute: 0 \ No newline at end of file + minute: 0 + additionalCA: '' + insecureSkipVerify: False diff --git a/salt/manager/map.jinja b/salt/manager/map.jinja new file mode 100644 index 0000000000..1ab9c12c3f --- /dev/null +++ b/salt/manager/map.jinja @@ -0,0 +1,7 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + +{% import_yaml 'manager/defaults.yaml' as MANAGERDEFAULTS %} +{% set MANAGERMERGED = salt['pillar.get']('manager', MANAGERDEFAULTS.manager, merge=True) %} \ No newline at end of file diff --git a/salt/manager/soc_manager.yaml b/salt/manager/soc_manager.yaml index f6461a0c7b..f3346269ec 100644 --- a/salt/manager/soc_manager.yaml +++ b/salt/manager/soc_manager.yaml @@ -7,7 +7,7 @@ manager: hour: description: The hour of the day in which the repo sync takes place. global: True - helpLink: soup.html + helpLink: soup.html minute: description: The minute within the hour to run the repo sync. global: True @@ -16,11 +16,23 @@ manager: description: Enable elastalert 1=enabled 0=disabled. global: True helpLink: elastalert.html - no_proxy: - description: String of hosts to ignore the proxy settings for. + no_proxy: + description: String of hosts to ignore the proxy settings for. global: True helpLink: proxy.html proxy: description: Proxy server to use for updates. global: True helpLink: proxy.html + additionalCA: + description: Additional CA certificates to trust in PEM format. + global: True + advanced: True + multiline: True + helpLink: proxy.html + insecureSkipVerify: + description: Disable TLS verification for outgoing requests. This will make your installation less secure to MITM attacks. Recommended only for debugging purposes. + advanced: True + forcedType: bool + global: True + helpLink: proxy.html diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 4ee0eea1ec..c823175cba 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -6,13 +6,15 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'soc/defaults.map.jinja' import SOCDEFAULTS with context %} {% from 'logstash/map.jinja' import LOGSTASH_NODES %} +{% from 'manager/map.jinja' import MANAGERMERGED %} {% set DOCKER_EXTRA_HOSTS = LOGSTASH_NODES %} {% do DOCKER_EXTRA_HOSTS.append({GLOBALS.influxdb_host:pillar.node_data[GLOBALS.influxdb_host].ip}) %} {% set SOCMERGED = salt['pillar.get']('soc', SOCDEFAULTS, merge=true) %} -{% set MANAGER_PROXY = salt['pillar.get']('manager:proxy', '') %} -{% do SOCMERGED.config.server.update({'proxy': MANAGER_PROXY}) %} +{% do SOCMERGED.config.server.update({'proxy': MANAGERMERGED.proxy}) %} +{% do SOCMERGED.config.server.update({'additionalCA': MANAGERMERGED.additionalCA}) %} +{% do SOCMERGED.config.server.update({'insecureSkipVerify': MANAGERMERGED.insecureSkipVerify}) %} {# if SOCMERGED.config.server.modules.cases == httpcase details come from the soc pillar #} {% if SOCMERGED.config.server.modules.cases != 'soc' %} diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index ec633f7731..47d051e4e1 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -81,14 +81,6 @@ soc: description: Maximum number of packets to show in the PCAP viewer. Larger values can cause more resource utilization on both the SOC server and the browser. global: True advanced: True - rootCA: - description: Root Certificate Authority (CA) public key in PEM format that SOC will use to validate outgoing requests. This is useful when the SOC server is behind a reverse proxy that performs SSL termination. - multiline: True - advanced: True - insecureSkipVerify: - description: Disable TLS verification for outgoing requests. This will make your installation less secure to MITM attacks. Recommended only for debugging purposes. - advanced: True - forcedType: bool modules: elastalertengine: additionalAlerters: From ee696be51d5fd01276aa8db15b26af4a3a44d40c Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Fri, 7 Jun 2024 13:04:54 -0600 Subject: [PATCH 144/222] Remove rootCA and insecureSkipVerify from SOC defaults --- salt/soc/defaults.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 03476c3f58..f5628f3c3e 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1286,8 +1286,6 @@ soc: maxPacketCount: 5000 htmlDir: html importUploadDir: /nsm/soc/uploads - rootCA: '' - insecureSkipVerify: false modules: cases: soc filedatastore: From dbc56ffee787feb6f124db5e54bbd66b7c87734f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Jun 2024 15:09:09 -0400 Subject: [PATCH 145/222] Update defaults.yaml --- salt/firewall/defaults.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index b105059565..0f7ce911af 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -1293,6 +1293,9 @@ firewall: beats_endpoint_ssl: portgroups: - beats_5644 + elastic_agent_endpoint: + portgroups: + - elastic_agent_data endgame: portgroups: - endgame From 4057238185aad7a4c8c58b226e25f23b84ef09bb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 7 Jun 2024 15:33:49 -0400 Subject: [PATCH 146/222] Update defaults.yaml --- salt/firewall/defaults.yaml | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 0f7ce911af..5d53b98644 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -1267,26 +1267,37 @@ firewall: chain: DOCKER-USER: hostgroups: + desktop: + portgroups: + - elastic_agent_data fleet: portgroups: - - beats_5056 + - elastic_agent_data + idh: + portgroups: + - elastic_agent_data sensor: portgroups: - - beats_5044 - - beats_5644 - elastic_agent_data searchnode: portgroups: - redis - - beats_5644 + - elastic_agent_data + standalone: + portgroups: + - redis + - elastic_agent_data + manager: + portgroups: + - elastic_agent_data managersearch: portgroups: - redis - - beats_5644 + - elastic_agent_data self: portgroups: - redis - - beats_5644 + - elastic_agent_data beats_endpoint: portgroups: - beats_5044 From 0139e1827113b3ef81a20487409145116fdd46f6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 7 Jun 2024 16:03:21 -0400 Subject: [PATCH 147/222] additional description --- salt/manager/tools/sbin/soup | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0ab8d9d46e..6adb39f2f7 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1295,6 +1295,7 @@ EOF # check if the FINAL_MESSAGE_QUEUE is not empty if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then +echo "The following additional information specifically applies to your grid:\n" for m in "${FINAL_MESSAGE_QUEUE[@]}"; do echo "$m" echo From f2f688b9b8b1e5eb1467eb140efbba6df590d87e Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 7 Jun 2024 16:18:09 -0400 Subject: [PATCH 148/222] Update soup --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 6adb39f2f7..0d52e5c16a 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1295,7 +1295,7 @@ EOF # check if the FINAL_MESSAGE_QUEUE is not empty if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then -echo "The following additional information specifically applies to your grid:\n" + echo "The following additional information applies specifically to your grid:\n" for m in "${FINAL_MESSAGE_QUEUE[@]}"; do echo "$m" echo From f96b82b11203e7ca400f5dfa131556fc075799e2 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sat, 8 Jun 2024 07:44:46 -0400 Subject: [PATCH 149/222] gracefully handle missing parent key --- salt/manager/tools/sbin/so-yaml.py | 2 +- salt/manager/tools/sbin/so-yaml_test.py | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py index cddc827b51..275032ee08 100755 --- a/salt/manager/tools/sbin/so-yaml.py +++ b/salt/manager/tools/sbin/so-yaml.py @@ -170,7 +170,7 @@ def replace(args): def getKeyValue(content, key): pieces = key.split(".", 1) - if len(pieces) > 1: + if len(pieces) > 1 and pieces[0] in content: return getKeyValue(content[pieces[0]], pieces[1]) return content.get(key, None) diff --git a/salt/manager/tools/sbin/so-yaml_test.py b/salt/manager/tools/sbin/so-yaml_test.py index ca9839e02f..5ca46cb68c 100644 --- a/salt/manager/tools/sbin/so-yaml_test.py +++ b/salt/manager/tools/sbin/so-yaml_test.py @@ -416,6 +416,17 @@ def test_get_missing(self): self.assertEqual(result, 2) self.assertEqual("", mock_stdout.getvalue()) + def test_get_missing_parent(self): + with patch('sys.stdout', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-get.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}") + file.close() + + result = soyaml.get([filename, "key1.child3.deep3"]) + self.assertEqual(result, 2) + self.assertEqual("", mock_stdout.getvalue()) + def test_get_usage(self): with patch('sys.exit', new=MagicMock()) as sysmock: with patch('sys.stderr', new=StringIO()) as mock_stderr: From f1638faa3a669c91f93e866a34ce19424fb34b7f Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sat, 8 Jun 2024 08:18:34 -0400 Subject: [PATCH 150/222] correct placement of error check override --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0d52e5c16a..9ee4058f9c 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -349,11 +349,11 @@ masterunlock() { phases_pillar_2_4_80() { echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists" + set +e PHASES=$(so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases) case $? in 0) so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases - set +e read -r -d '' msg <<- EOF Found elasticsearch.index_settings.global_overrides.index_template.phases was set to: ${PHASES} @@ -363,11 +363,11 @@ To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.inde A backup of all pillar files was saved to /nsm/backup/ EOF FINAL_MESSAGE_QUEUE+=("$msg") - set -e ;; 2) echo "Pillar elasticsearch.index_settings.global_overrides.index_template.phases does not exist. No action taken." ;; *) echo "so-yaml.py returned something other than 0 or 2 exit code" ;; # we shouldn't see this esac + set -e } preupgrade_changes() { From 284c1be85fc058945e772b5ee02898d13594e070 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 10 Jun 2024 11:08:54 -0400 Subject: [PATCH 151/222] Update Kafka controller(s) via SOC UI Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.map.jinja | 2 -- salt/kafka/config.sls | 6 ++++++ salt/kafka/defaults.yaml | 2 +- salt/kafka/enabled.sls | 2 -- salt/kafka/nodes.map.jinja | 5 +++-- salt/kafka/soc_kafka.yaml | 5 ++--- salt/salt/files/engines.conf | 18 ++++++++++++++++++ 7 files changed, 30 insertions(+), 10 deletions(-) diff --git a/salt/kafka/config.map.jinja b/salt/kafka/config.map.jinja index e5b77db111..88d27c1a80 100644 --- a/salt/kafka/config.map.jinja +++ b/salt/kafka/config.map.jinja @@ -7,8 +7,6 @@ {% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %} -{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:kafka_controllers', default=None) %} - {# Create list of KRaft controllers #} {% set controllers = [] %} diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 5cf6f8201c..165daf7eb1 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -66,6 +66,12 @@ kafka_kraft_{{sc}}_properties: - show_changes: False {% endfor %} +reset_quorum_on_changes: + cmd.run: + - name: rm -f /nsm/kafka/data/__cluster_metadata-0/quorum-state + - watch: + - file: /opt/so/conf/kafka/server.properties + {% else %} {{sls}}_state_not_allowed: diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index 56ad9252fa..f45560e60d 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -2,7 +2,7 @@ kafka: enabled: False cluster_id: kafka_pass: - kafka_controllers: [] + kafka_controllers: config: broker: advertised_x_listeners: diff --git a/salt/kafka/enabled.sls b/salt/kafka/enabled.sls index e90a314d2e..75cf711482 100644 --- a/salt/kafka/enabled.sls +++ b/salt/kafka/enabled.sls @@ -53,9 +53,7 @@ so-kafka: - /nsm/kafka/data/:/nsm/kafka/data/:rw - /opt/so/log/kafka:/opt/kafka/logs/:rw - /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro - {% if GLOBALS.is_manager %} - /opt/so/conf/kafka/client.properties:/opt/kafka/config/kraft/client.properties - {% endif %} - watch: {% for sc in ['server', 'client'] %} - file: kafka_kraft_{{sc}}_properties diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index fa33adda5b..c0b98de147 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -68,14 +68,15 @@ {# Update the process_x_roles value for any host in the kafka_controllers_pillar configured from SOC UI #} {% set ns = namespace(has_controller=false) %} {% if KAFKA_CONTROLLERS_PILLAR != none %} -{% for hostname in KAFKA_CONTROLLERS_PILLAR %} +{% set KAFKA_CONTROLLERS_PILLAR_LIST = KAFKA_CONTROLLERS_PILLAR.split(',') %} +{% for hostname in KAFKA_CONTROLLERS_PILLAR_LIST %} {% if hostname in COMBINED_KAFKANODES %} {% do COMBINED_KAFKANODES[hostname].update({'role': 'controller'}) %} {% set ns.has_controller = true %} {% endif %} {% endfor %} {% for hostname in COMBINED_KAFKANODES %} -{% if hostname not in KAFKA_CONTROLLERS_PILLAR %} +{% if hostname not in KAFKA_CONTROLLERS_PILLAR_LIST %} {% do COMBINED_KAFKANODES[hostname].update({'role': 'broker'}) %} {% endif %} {% endfor %} diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index b1de1f2438..05f047c4a8 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -13,9 +13,8 @@ kafka: sensitive: True helpLink: kafka.html kafka_controllers: - description: A list of Security Onion grid members that should act as controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. Keep total Kafka controllers to an odd number and ensure you do not assign ALL your Kafka nodes as controllers or this Kafka cluster will not start. - forcedType: "[]string" - multiline: True + description: A comma-seperated list of Security Onion grid members that should act as controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. Keep total Kafka controllers to an odd number and ensure you do not assign ALL your Kafka nodes as controllers or this Kafka cluster will not start. + forcedType: "string" helpLink: kafka.html config: broker: diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 69d596ed06..de5685fffb 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -57,4 +57,22 @@ engines: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs - cmd.run: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate + - files: + - /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + - /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls + pillar: kafka.kafka_controllers + default: '' + actions: + from: + '*': + to: + '*': + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + - cmd.run: + cmd: salt-call state.apply kafka.nodes + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka + - cmd.run: + cmd: salt-call state.apply elasticfleet interval: 10 From adeab10f6d88bdf733e7b7284178eb172fbe95d9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Jun 2024 12:14:27 -0400 Subject: [PATCH 152/222] upgrade docker and containerd.io for oracle --- salt/docker/init.sls | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index 769c58af87..281fbc4ac6 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -51,10 +51,10 @@ dockerheldpackages: dockerheldpackages: pkg.installed: - pkgs: - - containerd.io: 1.6.21-3.1.el9 - - docker-ce: 24.0.4-1.el9 - - docker-ce-cli: 24.0.4-1.el9 - - docker-ce-rootless-extras: 24.0.4-1.el9 + - containerd.io: 1.6.33-3.1.el9 + - docker-ce: 26.1.4-1.el9 + - docker-ce-cli: 26.1.4-1.el9 + - docker-ce-rootless-extras: 26.1.4-1.el9 - hold: True - update_holds: True {% endif %} From c6d0a1766937ebc7ae7425e7cba2d23271c93c17 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Jun 2024 15:43:29 -0400 Subject: [PATCH 153/222] docker upgrade debian 12 --- salt/docker/init.sls | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index 281fbc4ac6..732a9d7ddb 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -20,10 +20,10 @@ dockergroup: dockerheldpackages: pkg.installed: - pkgs: - - containerd.io: 1.6.21-1 - - docker-ce: 5:24.0.3-1~debian.12~bookworm - - docker-ce-cli: 5:24.0.3-1~debian.12~bookworm - - docker-ce-rootless-extras: 5:24.0.3-1~debian.12~bookworm + - containerd.io: 1.6.33-1 + - docker-ce: 5:26.1.4-1~debian.12~bookworm + - docker-ce-cli: 5:26.1.4-1~debian.12~bookworm + - docker-ce-rootless-extras: 5:26.1.4-1~debian.12~bookworm - hold: True - update_holds: True {% elif grains.oscodename == 'jammy' %} From dbd987345075eea5a3d28f188eb6543b2d8bc30e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Jun 2024 16:04:11 -0400 Subject: [PATCH 154/222] upgrade docker for jammy --- salt/docker/init.sls | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index 732a9d7ddb..9140c1c33d 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -30,10 +30,10 @@ dockerheldpackages: dockerheldpackages: pkg.installed: - pkgs: - - containerd.io: 1.6.21-1 - - docker-ce: 5:24.0.2-1~ubuntu.22.04~jammy - - docker-ce-cli: 5:24.0.2-1~ubuntu.22.04~jammy - - docker-ce-rootless-extras: 5:24.0.2-1~ubuntu.22.04~jammy + - containerd.io: 1.6.33-1 + - docker-ce: 5:26.1.4-1~ubuntu.22.04~jammy + - docker-ce-cli: 5:26.1.4-1~ubuntu.22.04~jammy + - docker-ce-rootless-extras: 5:26.1.4-1~ubuntu.22.04~jammy - hold: True - update_holds: True {% else %} From 0b1e3b2a7f066b3bfe219ee028acda912422c06c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 10 Jun 2024 16:24:44 -0400 Subject: [PATCH 155/222] upgrade docker for focal --- salt/docker/init.sls | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index 9140c1c33d..a02aa4a3b6 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -40,10 +40,10 @@ dockerheldpackages: dockerheldpackages: pkg.installed: - pkgs: - - containerd.io: 1.4.9-1 - - docker-ce: 5:20.10.8~3-0~ubuntu-focal - - docker-ce-cli: 5:20.10.5~3-0~ubuntu-focal - - docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-focal + - containerd.io: 1.6.33-1 + - docker-ce: 5:26.1.4-1~ubuntu.20.04~focal + - docker-ce-cli: 5:26.1.4-1~ubuntu.20.04~focal + - docker-ce-rootless-extras: 5:26.1.4-1~ubuntu.20.04~focal - hold: True - update_holds: True {% endif %} From 4b481bd405ea088a9d7e6f36d6b512ad2fa4caea Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 11 Jun 2024 09:41:58 -0400 Subject: [PATCH 156/222] add epoch to docker for oracle --- salt/docker/init.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index a02aa4a3b6..1e37364bcc 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -52,8 +52,8 @@ dockerheldpackages: pkg.installed: - pkgs: - containerd.io: 1.6.33-3.1.el9 - - docker-ce: 26.1.4-1.el9 - - docker-ce-cli: 26.1.4-1.el9 + - docker-ce: 3:26.1.4-1.el9 + - docker-ce-cli: 1:26.1.4-1.el9 - docker-ce-rootless-extras: 26.1.4-1.el9 - hold: True - update_holds: True From 08d2a6242dfa11ce9a89798d4fd8007df9d4cb9f Mon Sep 17 00:00:00 2001 From: DefensiveDepth Date: Tue, 11 Jun 2024 10:03:33 -0400 Subject: [PATCH 157/222] Add new bind - suricata all.rules --- salt/soc/enabled.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/soc/enabled.sls b/salt/soc/enabled.sls index 4d4b5f6fdc..9b50b449b8 100644 --- a/salt/soc/enabled.sls +++ b/salt/soc/enabled.sls @@ -27,6 +27,7 @@ so-soc: - /opt/so/conf/strelka:/opt/sensoroni/yara:rw - /opt/so/conf/sigma:/opt/sensoroni/sigma:rw - /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw + - /opt/so/rules/nids/suri:/opt/sensoroni/nids:ro - /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw - /nsm/soc/jobs:/opt/sensoroni/jobs:rw - /nsm/soc/uploads:/nsm/soc/uploads:rw From 08557ae2875ff6fb8959bd28573394acede60a6b Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 11 Jun 2024 11:01:34 -0400 Subject: [PATCH 158/222] kafka.id field should only be present when metadata for kafka exists Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index a17b3a17a7..899ec0c64a 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -84,7 +84,7 @@ { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, - { "set": { "field": "kafka.id", "value": "{{metadata.kafka.partition}}-{{metadata.kafka.offset}}-{{metadata.kafka.timestamp}}", "ignore_failure": true } }, + { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}-{{metadata.kafka.offset}}-{{metadata.kafka.timestamp}}", "ignore_failure": true } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } } ], "on_failure": [ From ca7b89c308ede9fad5429c7e6d4061590a936911 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 11 Jun 2024 11:21:13 -0400 Subject: [PATCH 159/222] Added Kafka reset to SOC UI. Incase of changing an active broker to a controller topics may become unavailable. Resolving this would require manual intervention. This option allows running a reset to start from a clean slate to then configure cluster to desired state before reenabling Kafka as global pipeline. Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 1 + salt/kafka/disabled.sls | 3 --- salt/kafka/reset_kafka.sls | 9 +++++++++ salt/kafka/soc_kafka.yaml | 7 +++++-- salt/salt/files/engines.conf | 16 ++++++++++++++++ 5 files changed, 31 insertions(+), 5 deletions(-) create mode 100644 salt/kafka/reset_kafka.sls diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index f45560e60d..e029bc2516 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -3,6 +3,7 @@ kafka: cluster_id: kafka_pass: kafka_controllers: + reset_kafka: config: broker: advertised_x_listeners: diff --git a/salt/kafka/disabled.sls b/salt/kafka/disabled.sls index 0027fbfb92..4678e26023 100644 --- a/salt/kafka/disabled.sls +++ b/salt/kafka/disabled.sls @@ -3,9 +3,6 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. -include: - - kafka.sostatus - so-kafka: docker_container.absent: - force: True diff --git a/salt/kafka/reset_kafka.sls b/salt/kafka/reset_kafka.sls new file mode 100644 index 0000000000..8789516cdf --- /dev/null +++ b/salt/kafka/reset_kafka.sls @@ -0,0 +1,9 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +wipe_kafka_data: + file.absent: + - name: /nsm/kafka/data/ + - force: True \ No newline at end of file diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 05f047c4a8..686b2ad972 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -1,6 +1,6 @@ kafka: enabled: - description: Enable or disable Kafka. Recommended to have desired configuration staged prior to enabling Kafka. Join all receiver nodes to grid that will be converted to Kafka nodes, configure kafka_controllers with the hostnames of the nodes you want to act as controllers, and configure the default_replication_factor to the desired value for your redundancy needs. + description: Enable or disable Kafka. Recommended to have desired configuration staged prior to enabling Kafka. Configure kafka_controllers with the hostnames of the nodes you want to act as controllers, join all receiver nodes to grid that will be converted to Kafka nodes, and configure the default_replication_factor to the desired value for your redundancy needs. helpLink: kafka.html cluster_id: description: The ID of the Kafka cluster. @@ -13,9 +13,12 @@ kafka: sensitive: True helpLink: kafka.html kafka_controllers: - description: A comma-seperated list of Security Onion grid members that should act as controllers for this Kafka cluster. By default, the grid manager will use a 'combined' role where it will act as both a broker and controller. Keep total Kafka controllers to an odd number and ensure you do not assign ALL your Kafka nodes as controllers or this Kafka cluster will not start. + description: A comma-seperated list of Security Onion hosts that will act as Kafka controllers. These hosts will be responsible for managing the Kafka cluster. WARNING - The hostnames of receiver nodes intended to be controllers should be added here BEFORE they have joined the Security Onion grid or BEFORE enabling KAFKA. This is to ensure that data is not lost by converting a data broker to a controller. Failure to do so may result in topics becoming unavailable and requiring manual intervention to repair or resetting Kafka data. forcedType: "string" helpLink: kafka.html + reset_kafka: + description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. + helpLink: kafka.html config: broker: advertised_x_listeners: diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index de5685fffb..dbfb89973e 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -75,4 +75,20 @@ engines: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka - cmd.run: cmd: salt-call state.apply elasticfleet + - files: + - /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + - /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls + pillar: kafka.reset_kafka + default: '' + actions: + from: + '*': + to: + 'YES_RESET_KAFKA': + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs + - cmd.run: + cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka.disabled,kafka.reset_kafka + - cmd.run: + cmd: /usr/sbin/so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.reset_kafka interval: 10 From a81e4c33625cbdfd0ceb3ac7ad3a001ce4f8ad96 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 11 Jun 2024 11:55:17 -0400 Subject: [PATCH 160/222] remove dash(-) from kafka.id Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index 899ec0c64a..233cd647b4 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -84,7 +84,7 @@ { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } }, { "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } }, - { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}-{{metadata.kafka.offset}}-{{metadata.kafka.timestamp}}", "ignore_failure": true } }, + { "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } } ], "on_failure": [ From 628893fd5b85da670d7fd4860c00dfe93ae208b5 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Tue, 11 Jun 2024 11:56:21 -0400 Subject: [PATCH 161/222] remove redundant 'kafka_' from annotations & defaults Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/defaults.yaml | 6 +++--- salt/kafka/soc_kafka.yaml | 13 +++++++------ salt/manager/tools/sbin/soup | 2 +- salt/salt/files/engines.conf | 6 +++--- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/salt/kafka/defaults.yaml b/salt/kafka/defaults.yaml index e029bc2516..062c2d5ca9 100644 --- a/salt/kafka/defaults.yaml +++ b/salt/kafka/defaults.yaml @@ -1,9 +1,9 @@ kafka: enabled: False cluster_id: - kafka_pass: - kafka_controllers: - reset_kafka: + password: + controllers: + reset: config: broker: advertised_x_listeners: diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 686b2ad972..59816cef36 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -1,6 +1,6 @@ kafka: enabled: - description: Enable or disable Kafka. Recommended to have desired configuration staged prior to enabling Kafka. Configure kafka_controllers with the hostnames of the nodes you want to act as controllers, join all receiver nodes to grid that will be converted to Kafka nodes, and configure the default_replication_factor to the desired value for your redundancy needs. + description: Enable or disable Kafka. Recommended to have desired configuration staged prior to enabling Kafka. Configure controllers with the hostnames of the nodes you want to act as controllers, join all receiver nodes to grid that will be converted to Kafka nodes, and configure the default_replication_factor to the desired value for your redundancy needs. helpLink: kafka.html cluster_id: description: The ID of the Kafka cluster. @@ -8,16 +8,17 @@ kafka: advanced: True sensitive: True helpLink: kafka.html - kafka_pass: + password: description: The password to use for the Kafka certificates. sensitive: True helpLink: kafka.html - kafka_controllers: + controllers: description: A comma-seperated list of Security Onion hosts that will act as Kafka controllers. These hosts will be responsible for managing the Kafka cluster. WARNING - The hostnames of receiver nodes intended to be controllers should be added here BEFORE they have joined the Security Onion grid or BEFORE enabling KAFKA. This is to ensure that data is not lost by converting a data broker to a controller. Failure to do so may result in topics becoming unavailable and requiring manual intervention to repair or resetting Kafka data. forcedType: "string" helpLink: kafka.html - reset_kafka: - description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. + reset: + description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_reset' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed. + advanced: True helpLink: kafka.html config: broker: @@ -31,7 +32,7 @@ kafka: forcedType: bool helpLink: kafka.html default_x_replication_x_factor: - description: The default replication factor for automatically created topics. This value must be less than the amount of brokers in the cluster. Hosts specified in kafka_controllers should not be counted towards total broker count. + description: The default replication factor for automatically created topics. This value must be less than the amount of brokers in the cluster. Hosts specified in controllers should not be counted towards total broker count. title: default.replication.factor forcedType: int helpLink: kafka.html diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 56d2d7de3c..e38ba40e0c 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -644,7 +644,7 @@ up_to_2.4.80() { kafka_cluster_id=$(get_random_value 22) echo ' cluster_id: '$kafka_cluster_id >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafkapass=$(get_random_value) - echo ' kafka_pass: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls + echo ' password: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls INSTALLEDVERSION=2.4.80 } diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index dbfb89973e..7b1f9bf3e3 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -60,7 +60,7 @@ engines: - files: - /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls - pillar: kafka.kafka_controllers + pillar: kafka.controllers default: '' actions: from: @@ -78,7 +78,7 @@ engines: - files: - /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls - /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls - pillar: kafka.reset_kafka + pillar: kafka.reset default: '' actions: from: @@ -90,5 +90,5 @@ engines: - cmd.run: cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka.disabled,kafka.reset_kafka - cmd.run: - cmd: /usr/sbin/so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.reset_kafka + cmd: /usr/sbin/so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.reset interval: 10 From d5ef0e57443edd62a801392cc7bcb52d11f12d56 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Tue, 11 Jun 2024 12:34:32 -0600 Subject: [PATCH 162/222] Fix unnecessary escaping --- salt/soc/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index f5628f3c3e..f0d028fdbd 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -2261,7 +2261,7 @@ soc: meta: description = ""; strings: - $x = \"string\"; + $x = "string"; condition: all of them; } From c38f48c7f28f8b8e669eb2ae6dcf092fbf8896b8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 12 Jun 2024 10:34:32 -0400 Subject: [PATCH 163/222] remove this \n --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0d52e5c16a..821feed734 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -1295,7 +1295,7 @@ EOF # check if the FINAL_MESSAGE_QUEUE is not empty if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then - echo "The following additional information applies specifically to your grid:\n" + echo "The following additional information applies specifically to your grid:" for m in "${FINAL_MESSAGE_QUEUE[@]}"; do echo "$m" echo From b7eebad2a570d53383194014787df2dffa8bad5d Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 12 Jun 2024 11:01:40 -0400 Subject: [PATCH 164/222] Update Kafka self reset & add initial Kafka wrapper scripts to build out Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/kafka/config.sls | 5 +- salt/kafka/nodes.map.jinja | 2 +- salt/kafka/{reset_kafka.sls => reset.sls} | 0 salt/kafka/soc_kafka.yaml | 2 +- salt/kafka/tools/sbin/so-kafka-cli | 47 +++++++++++ salt/kafka/tools/sbin/so-kafka-config-update | 87 ++++++++++++++++++++ salt/salt/files/engines.conf | 2 +- 7 files changed, 139 insertions(+), 6 deletions(-) rename salt/kafka/{reset_kafka.sls => reset.sls} (100%) create mode 100644 salt/kafka/tools/sbin/so-kafka-cli create mode 100644 salt/kafka/tools/sbin/so-kafka-config-update diff --git a/salt/kafka/config.sls b/salt/kafka/config.sls index 165daf7eb1..1c3d8c26ba 100644 --- a/salt/kafka/config.sls +++ b/salt/kafka/config.sls @@ -20,14 +20,13 @@ kafka: - uid: 960 - gid: 960 -{# Future tools to query kafka directly / show consumer groups kafka_sbin_tools: file.recurse: - name: /usr/sbin - source: salt://kafka/tools/sbin - user: 960 - group: 960 - - file_mode: 755 #} + - file_mode: 755 kafka_sbin_jinja_tools: file.recurse: @@ -69,7 +68,7 @@ kafka_kraft_{{sc}}_properties: reset_quorum_on_changes: cmd.run: - name: rm -f /nsm/kafka/data/__cluster_metadata-0/quorum-state - - watch: + - onchanges: - file: /opt/so/conf/kafka/server.properties {% else %} diff --git a/salt/kafka/nodes.map.jinja b/salt/kafka/nodes.map.jinja index c0b98de147..3a73b038fa 100644 --- a/salt/kafka/nodes.map.jinja +++ b/salt/kafka/nodes.map.jinja @@ -16,7 +16,7 @@ tgt_type='compound') %} {% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', default=None) %} -{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:kafka_controllers', default=None) %} +{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:controllers', default=None) %} {% set existing_ids = [] %} diff --git a/salt/kafka/reset_kafka.sls b/salt/kafka/reset.sls similarity index 100% rename from salt/kafka/reset_kafka.sls rename to salt/kafka/reset.sls diff --git a/salt/kafka/soc_kafka.yaml b/salt/kafka/soc_kafka.yaml index 59816cef36..1172fc5b94 100644 --- a/salt/kafka/soc_kafka.yaml +++ b/salt/kafka/soc_kafka.yaml @@ -17,7 +17,7 @@ kafka: forcedType: "string" helpLink: kafka.html reset: - description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_reset' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed. + description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed. advanced: True helpLink: kafka.html config: diff --git a/salt/kafka/tools/sbin/so-kafka-cli b/salt/kafka/tools/sbin/so-kafka-cli new file mode 100644 index 0000000000..41993f67f3 --- /dev/null +++ b/salt/kafka/tools/sbin/so-kafka-cli @@ -0,0 +1,47 @@ +#! /bin/bash +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +if [ -z "$NOROOT" ]; then + # Check for prerequisites + if [ "$(id -u)" -ne 0 ]; then + echo "This script must be run using sudo!" + exit 1 + fi +fi + +function usage() { + echo -e "\nUsage: $0