diff --git a/alert-policies/amazon-cloudwatch-network-monitor/HighPacketLoss.yml b/alert-policies/amazon-cloudwatch-network-monitor/HighPacketLoss.yml
new file mode 100644
index 0000000000..76ba9108b5
--- /dev/null
+++ b/alert-policies/amazon-cloudwatch-network-monitor/HighPacketLoss.yml
@@ -0,0 +1,43 @@
+# Name of the alert
+name: High Packet Loss
+
+# Description and details
+description: |+
+ The latency for successful requests between the time that the request was made and the response was passed back
+
+# Type of alert
+type: STATIC
+
+# NRQL query
+nrql:
+ query: "SELECT max(`aws.networkmonitor.PacketLoss`) FROM Metric"
+
+# Function used to aggregate the NRQL query value(s) for comparison to the terms.threshold (Default: SINGLE_VALUE)
+valueFunction: SINGLE_VALUE
+
+# List of Critical and Warning thresholds for the condition
+terms:
+ - priority: CRITICAL
+ # Operator used to compare against the threshold.
+ operator: ABOVE
+ # Value that triggers a violation; float value
+ threshold: 30
+ # Time in seconds; 120 - 3600
+ thresholdDuration: 300
+ # How many data points must be in violation for the duration
+ thresholdOccurrences: ALL
+
+ # Adding a Warning threshold is optional
+ - priority: WARNING
+ operator: ABOVE
+ threshold: 20
+ thresholdDuration: 300
+ thresholdOccurrences: ALL
+
+
+# OPTIONAL: URL of runbook to be sent with notification
+runbookUrl:
+
+# Duration after which a violation automatically closes
+# Time in seconds; 300 - 2592000 (Default: 86400 [1 day])
+violationTimeLimitSeconds: 86400
diff --git a/alert-policies/amazon-cloudwatch-network-monitor/Network Unhealthy.yml b/alert-policies/amazon-cloudwatch-network-monitor/Network Unhealthy.yml
new file mode 100644
index 0000000000..04ddcd7cb3
--- /dev/null
+++ b/alert-policies/amazon-cloudwatch-network-monitor/Network Unhealthy.yml
@@ -0,0 +1,38 @@
+# Name of the alert
+name: Network Unhealthy
+
+# Description and details
+description: |+
+ The Network Health Indicator supports two values – 0 (AWS Network is health) or 1(AWS Network is
+ Degraded)
+
+# Type of alert
+type: STATIC
+
+# NRQL query
+nrql:
+ query: "SELECT sum(aws.networkmonitor.PacketLoss) from Metric"
+
+# Function used to aggregate the NRQL query value(s) for comparison to the terms.threshold (Default: SINGLE_VALUE)
+valueFunction: SINGLE_VALUE
+
+# List of Critical and Warning thresholds for the condition
+terms:
+ - priority: CRITICAL
+ # Operator used to compare against the threshold.
+ operator: EQUALS
+ # Value that triggers a violation; float value
+ threshold: 1
+ # Time in seconds; 120 - 3600
+ thresholdDuration: 300
+ # How many data points must be in violation for the duration
+ thresholdOccurrences: ALL
+
+
+
+# OPTIONAL: URL of runbook to be sent with notification
+runbookUrl:
+
+# Duration after which a violation automatically closes
+# Time in seconds; 300 - 2592000 (Default: 86400 [1 day])
+violationTimeLimitSeconds: 86400
diff --git a/dashboards/amazon-cloudwatch-network-monitor/amazon-cloudwatch-network-monitor.json b/dashboards/amazon-cloudwatch-network-monitor/amazon-cloudwatch-network-monitor.json
new file mode 100644
index 0000000000..c9139d1387
--- /dev/null
+++ b/dashboards/amazon-cloudwatch-network-monitor/amazon-cloudwatch-network-monitor.json
@@ -0,0 +1,175 @@
+{
+ "name": "Amazon CloudWatch Network Monitor",
+ "description": null,
+ "pages": [
+ {
+ "name": "Amazon CloudWatch Network Monitor",
+ "description": null,
+ "widgets": [
+ {
+ "title": "",
+ "layout": {
+ "column": 1,
+ "row": 1,
+ "width": 3,
+ "height": 2
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.markdown"
+ },
+ "rawConfiguration": {
+ "text": "# Amazon CloudWatch Network Monitor\nIt is an active network monitoring service to troubleshoot issues in hybrid network connectivity from on-premise to AWS via AWS Direct Connect. Using this real-time data, customers can mitigate the issue quickly by routing the traffic to a redundant healthy path."
+ }
+ },
+ {
+ "title": "Network Health",
+ "layout": {
+ "column": 4,
+ "row": 1,
+ "width": 3,
+ "height": 2
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.billboard"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [],
+ "query": "FROM Metric select if(sum(aws.networkmonitor.HealthIndicator) = 1, 'Unhealthy', 'Healthy') as 'Status'"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ }
+ }
+ },
+ {
+ "title": "Overlay Network Performance",
+ "layout": {
+ "column": 7,
+ "row": 1,
+ "width": 6,
+ "height": 2
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "colors": {
+ "seriesOverrides": [
+ {
+ "color": "#2bf410",
+ "seriesName": "Health Indicator"
+ }
+ ]
+ },
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [],
+ "query": "FROM Metric select sum(aws.networkmonitor.HealthIndicator) as 'Health Indicator' TIMESERIES AUTO"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Round Trip Latency",
+ "layout": {
+ "column": 1,
+ "row": 3,
+ "width": 12,
+ "height": 2
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [],
+ "query": "select min(aws.networkmonitor.RTT) as 'Minimum Latency',max(aws.networkmonitor.RTT) as 'Maximum Latency',average(aws.networkmonitor.RTT) as 'Average Latency' from Metric where metricName = 'aws.networkmonitor.RTT' TIMESERIES AUTO facet aws.networkmonitor.Monitor"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "units": {
+ "unit": "MS"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ },
+ {
+ "title": "Packet Loss (%)",
+ "layout": {
+ "column": 1,
+ "row": 5,
+ "width": 12,
+ "height": 2
+ },
+ "linkedEntityGuids": null,
+ "visualization": {
+ "id": "viz.line"
+ },
+ "rawConfiguration": {
+ "facet": {
+ "showOtherSeries": false
+ },
+ "legend": {
+ "enabled": true
+ },
+ "nrqlQueries": [
+ {
+ "accountIds": [],
+ "query": "SELECT max(`aws.networkmonitor.PacketLoss`) FROM Metric FACET aws.networkmonitor.Monitor TIMESERIES AUTO"
+ }
+ ],
+ "platformOptions": {
+ "ignoreTimeRange": false
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "variables": []
+}
\ No newline at end of file
diff --git a/dashboards/amazon-cloudwatch-network-monitor/amazon-cloudwatch-network-monitor.png b/dashboards/amazon-cloudwatch-network-monitor/amazon-cloudwatch-network-monitor.png
new file mode 100644
index 0000000000..ee036b8364
Binary files /dev/null and b/dashboards/amazon-cloudwatch-network-monitor/amazon-cloudwatch-network-monitor.png differ
diff --git a/dashboards/confluent-cloud/confluent-cloud.json b/dashboards/confluent-cloud/confluent-cloud.json
index 343d4e3cd8..3714f81e0a 100644
--- a/dashboards/confluent-cloud/confluent-cloud.json
+++ b/dashboards/confluent-cloud/confluent-cloud.json
@@ -7,122 +7,120 @@
"description": null,
"widgets": [
{
- "title": "Number of Brokers",
+ "title": "Confluent Cloud",
"layout": {
"column": 1,
"row": 1,
- "width": 4,
- "height": 3
+ "width": 2,
+ "height": 4
},
"linkedEntityGuids": null,
"visualization": {
- "id": "viz.billboard"
+ "id": "viz.markdown"
},
"rawConfiguration": {
- "facet": {
- "showOtherSeries": false
- },
- "nrqlQueries": [
- {
- "accountId": 0,
- "query": "SELECT latest(kafka.brokers) as 'Kafka Brokers' FROM Metric "
- }
- ],
- "platformOptions": {
- "ignoreTimeRange": false
- }
+ "text": "\n# Confluent Cloud Kafka\n#### [Confluent API docs](https://api.telemetry.confluent.cloud/docs/descriptors/datasets/cloud?_ga=2.214885143.1264186867.1705940186-6520871.1686857317&_gl=1*xevtc5*_ga*NjUyMDg3MS4xNjg2ODU3MzE3*_ga_D2D3EGKSGD*MTcwNTk0MDA5MC41My4xLjE3MDU5NDAxODUuNjAuMC4w)"
}
},
{
- "title": "Consumer Group Lag",
+ "title": "Cluster Specs",
"layout": {
- "column": 5,
+ "column": 3,
"row": 1,
- "width": 4,
- "height": 3
+ "width": 5,
+ "height": 4
},
"linkedEntityGuids": null,
"visualization": {
- "id": "viz.line"
+ "id": "viz.billboard"
},
"rawConfiguration": {
+ "dataFormatters": [
+ {
+ "name": "Last Update",
+ "type": "date"
+ }
+ ],
"facet": {
"showOtherSeries": false
},
- "legend": {
- "enabled": true
- },
"nrqlQueries": [
{
- "accountId": 0,
- "query": "SELECT average(kafka.consumer_group.lag) FROM Metric TIMESERIES FACET `topic` LIMIT MAX"
+ "accountIds": [],
+ "query": "SELECT uniqueCount(topic) as 'Topics',\nlatest(confluent_kafka_server_partition_count) as 'Partitions', uniqueCount(consumer_group_id) as 'Consumer Groups', uniqueCount(connector_id) AS 'Connectors' FROM Metric where `kafka_id` in ({{kafka_id}})"
}
],
"platformOptions": {
"ignoreTimeRange": false
- },
- "yAxisLeft": {
- "zero": true
}
}
},
{
- "title": "Active Connections",
+ "title": "Request Bytes (bytes/second) Per Type",
"layout": {
- "column": 9,
+ "column": 8,
"row": 1,
- "width": 4,
- "height": 3
+ "width": 5,
+ "height": 4
},
"linkedEntityGuids": null,
"visualization": {
- "id": "viz.line"
+ "id": "viz.pie"
},
"rawConfiguration": {
"facet": {
- "showOtherSeries": false
+ "showOtherSeries": true
},
"legend": {
"enabled": true
},
"nrqlQueries": [
{
- "accountId": 0,
- "query": "SELECT max(confluent_kafka_server_active_connection_count) AS 'Active Connections' FROM Metric facet kafka_id LIMIT MAX TIMESERIES"
+ "accountIds": [],
+ "query": "SELECT rate(sum(confluent_kafka_server_request_bytes), 1 second) FROM Metric WHERE\n`kafka_id` in ({{kafka_id}}) FACET type LIMIT MAX"
}
],
"platformOptions": {
"ignoreTimeRange": false
- },
- "yAxisLeft": {
- "zero": true
}
}
},
{
- "title": "Topics",
+ "title": "Retained Bytes Per Topic",
"layout": {
"column": 1,
- "row": 4,
+ "row": 5,
"width": 4,
"height": 3
},
"linkedEntityGuids": null,
"visualization": {
- "id": "viz.table"
+ "id": "viz.line"
},
"rawConfiguration": {
"facet": {
"showOtherSeries": false
},
+ "legend": {
+ "enabled": true
+ },
"nrqlQueries": [
{
- "accountId": 0,
- "query": "SELECT latest(kafka.topic.partitions) FROM Metric FACET `topic` LIMIT 100"
+ "accountIds": [],
+ "query": "SELECT latest(confluent_kafka_server_retained_bytes) FROM Metric TIMESERIES WHERE kafka_id IN ({{kafka_id}}) FACET topic LIMIT MAX"
}
],
"platformOptions": {
"ignoreTimeRange": false
+ },
+ "units": {
+ "unit": "BYTES"
+ },
+ "yAxisLeft": {
+ "zero": true
+ },
+ "yAxisRight": {
+ "zero": true
}
}
},
@@ -130,7 +128,7 @@
"title": "Records Sent Per Minute",
"layout": {
"column": 5,
- "row": 4,
+ "row": 5,
"width": 4,
"height": 3
},
@@ -160,10 +158,10 @@
}
},
{
- "title": "Records Recieved Per Minute",
+ "title": "Active Connections",
"layout": {
"column": 9,
- "row": 4,
+ "row": 5,
"width": 4,
"height": 3
},
@@ -180,8 +178,8 @@
},
"nrqlQueries": [
{
- "accountId": 0,
- "query": "SELECT rate(sum(confluent_kafka_server_received_records),1 minute ) FROM Metric TIMESERIES FACET `topic` where `kafka_id` in ({{kafka_id}}) LIMIT MAX"
+ "accountIds": [],
+ "query": "SELECT max(confluent_kafka_server_active_connection_count) AS 'Active Connections' FROM Metric facet kafka_id WHERE kafka_id IN ({{kafka_id}}) LIMIT MAX TIMESERIES"
}
],
"platformOptions": {
@@ -189,39 +187,42 @@
},
"yAxisLeft": {
"zero": true
+ },
+ "yAxisRight": {
+ "zero": true
}
}
},
{
- "title": "Cluster Specs",
+ "title": "Partitions ",
"layout": {
"column": 1,
- "row": 7,
+ "row": 8,
"width": 4,
"height": 3
},
"linkedEntityGuids": null,
"visualization": {
- "id": "viz.billboard"
+ "id": "viz.line"
},
"rawConfiguration": {
- "dataFormatters": [
- {
- "name": "Last Update",
- "type": "date"
- }
- ],
"facet": {
"showOtherSeries": false
},
+ "legend": {
+ "enabled": true
+ },
"nrqlQueries": [
{
"accountId": 0,
- "query": "SELECT uniqueCount(topic) as 'Topics',\nlatest(confluent_kafka_server_partition_count) as 'Partitions', uniqueCount(consumer_group_id) as 'Consumer Groups' FROM Metric where `kafka_id` in ({{kafka_id}})"
+ "query": "SELECT latest(confluent_kafka_server_partition_count) AS partitions FROM Metric WHERE `kafka_id` in ({{kafka_id}}) LIMIT MAX TIMESERIES"
}
],
"platformOptions": {
"ignoreTimeRange": false
+ },
+ "yAxisLeft": {
+ "zero": true
}
}
},
@@ -229,7 +230,7 @@
"title": "Recieved Bytes by Topic",
"layout": {
"column": 5,
- "row": 7,
+ "row": 8,
"width": 4,
"height": 3
},
@@ -262,10 +263,10 @@
}
},
{
- "title": "Sent Bytes by Topic",
+ "title": "Records Recieved Per Minute",
"layout": {
"column": 9,
- "row": 7,
+ "row": 8,
"width": 4,
"height": 3
},
@@ -283,25 +284,22 @@
"nrqlQueries": [
{
"accountId": 0,
- "query": "SELECT sum(confluent_kafka_server_sent_bytes) FROM Metric TIMESERIES FACET `topic` where `kafka_id` in ({{kafka_id}}) LIMIT MAX "
+ "query": "SELECT rate(sum(confluent_kafka_server_received_records),1 minute ) FROM Metric TIMESERIES FACET `topic` where `kafka_id` in ({{kafka_id}}) LIMIT MAX"
}
],
"platformOptions": {
"ignoreTimeRange": false
},
- "units": {
- "unit": "BYTES"
- },
"yAxisLeft": {
"zero": true
}
}
},
{
- "title": "Retained Bytes Per Topic",
+ "title": "Successful Auth Attempts",
"layout": {
"column": 1,
- "row": 10,
+ "row": 11,
"width": 4,
"height": 3
},
@@ -319,14 +317,14 @@
"nrqlQueries": [
{
"accountId": 0,
- "query": "SELECT latest(confluent_kafka_server_retained_bytes) FROM Metric TIMESERIES FACET topic LIMIT MAX"
+ "query": "SELECT rate(sum(confluent_kafka_server_successful_authentication_count), 1 minute) AS 'Auths Per Minute' FROM Metric WHERE `kafka_id` in ({{kafka_id}}) LIMIT MAX TIMESERIES"
}
],
"platformOptions": {
"ignoreTimeRange": false
},
"units": {
- "unit": "BYTES"
+ "unit": "COUNT"
},
"yAxisLeft": {
"zero": true
@@ -337,7 +335,7 @@
"title": "Egres rate (bytes/minute)",
"layout": {
"column": 5,
- "row": 10,
+ "row": 11,
"width": 4,
"height": 3
},
@@ -354,8 +352,8 @@
},
"nrqlQueries": [
{
- "accountId": 0,
- "query": "SELECT sum(l) as egress from (SELECT rate(sum(confluent_kafka_server_response_bytes), 1 minute) as l FROM Metric WHERE kafka_id IN {{kafka_id}} FACET topic LIMIT MAX TIMESERIES) LIMIT MAX TIMESERIES"
+ "accountIds": [],
+ "query": "SELECT sum(l) as egress from (SELECT rate(sum(confluent_kafka_server_response_bytes), 1 minute) as l FROM Metric WHERE kafka_id IN ({{kafka_id}}) TIMESERIES) LIMIT MAX TIMESERIES"
}
],
"platformOptions": {
@@ -363,14 +361,17 @@
},
"yAxisLeft": {
"zero": true
+ },
+ "yAxisRight": {
+ "zero": true
}
}
},
{
- "title": "Ingress rate (bytes/minute)",
+ "title": "Sent Bytes by Topic",
"layout": {
"column": 9,
- "row": 10,
+ "row": 11,
"width": 4,
"height": 3
},
@@ -388,22 +389,25 @@
"nrqlQueries": [
{
"accountId": 0,
- "query": "SELECT sum(l) as ingress from (SELECT rate(sum(confluent_kafka_server_request_bytes), 1 minute) as l FROM Metric WHERE kafka_id IN {{kafka_id}} FACET topic LIMIT MAX TIMESERIES) LIMIT MAX TIMESERIES"
+ "query": "SELECT sum(confluent_kafka_server_sent_bytes) FROM Metric TIMESERIES FACET `topic` where `kafka_id` in ({{kafka_id}}) LIMIT MAX "
}
],
"platformOptions": {
"ignoreTimeRange": false
},
+ "units": {
+ "unit": "BYTES"
+ },
"yAxisLeft": {
"zero": true
}
}
},
{
- "title": "Partitions ",
+ "title": "Response Bytes (bytes/second) Per Kafka Protocol Request Type",
"layout": {
"column": 1,
- "row": 13,
+ "row": 14,
"width": 4,
"height": 3
},
@@ -421,12 +425,15 @@
"nrqlQueries": [
{
"accountId": 0,
- "query": "SELECT latest(confluent_kafka_server_partition_count) AS partitions FROM Metric WHERE `kafka_id` in ({{kafka_id}}) LIMIT MAX TIMESERIES"
+ "query": "SELECT rate(sum(confluent_kafka_server_response_bytes), 1 second) FROM Metric WHERE\n`kafka_id` in ({{kafka_id}}) FACET type LIMIT MAX TIMESERIES"
}
],
"platformOptions": {
"ignoreTimeRange": false
},
+ "units": {
+ "unit": "BYTES_PER_SECOND"
+ },
"yAxisLeft": {
"zero": true
}
@@ -436,7 +443,7 @@
"title": "Request Rate",
"layout": {
"column": 5,
- "row": 13,
+ "row": 14,
"width": 4,
"height": 3
},
@@ -469,10 +476,10 @@
}
},
{
- "title": "Response Rate ",
+ "title": "Ingress rate (bytes/minute)",
"layout": {
"column": 9,
- "row": 13,
+ "row": 14,
"width": 4,
"height": 3
},
@@ -489,53 +496,17 @@
},
"nrqlQueries": [
{
- "accountId": 0,
- "query": "SELECT rate(sum(confluent_kafka_server_response_count), 1 minute) as 'Responses Per Minute' FROM Metric WHERE `kafka_id` in ({{kafka_id}}) LIMIT MAX TIMESERIES"
+ "accountIds": [],
+ "query": "SELECT rate(sum(confluent_kafka_server_request_bytes), 1 minute) as 'Ingress' FROM Metric WHERE kafka_id IN ({{kafka_id}}) LIMIT MAX TIMESERIES"
}
],
"platformOptions": {
"ignoreTimeRange": false
},
- "units": {
- "unit": "REQUESTS_PER_SECOND"
- },
"yAxisLeft": {
"zero": true
- }
- }
- },
- {
- "title": "Successful Auth Attempts",
- "layout": {
- "column": 1,
- "row": 16,
- "width": 4,
- "height": 3
- },
- "linkedEntityGuids": null,
- "visualization": {
- "id": "viz.line"
- },
- "rawConfiguration": {
- "facet": {
- "showOtherSeries": false
- },
- "legend": {
- "enabled": true
},
- "nrqlQueries": [
- {
- "accountId": 0,
- "query": "SELECT rate(sum(confluent_kafka_server_successful_authentication_count), 1 minute) AS 'Auths Per Minute' FROM Metric WHERE `kafka_id` in ({{kafka_id}}) LIMIT MAX TIMESERIES"
- }
- ],
- "platformOptions": {
- "ignoreTimeRange": false
- },
- "units": {
- "unit": "COUNT"
- },
- "yAxisLeft": {
+ "yAxisRight": {
"zero": true
}
}
@@ -543,9 +514,9 @@
{
"title": "Request Bytes (bytes/second) Per Kafka Protocol Request Type",
"layout": {
- "column": 5,
- "row": 16,
- "width": 4,
+ "column": 1,
+ "row": 17,
+ "width": 6,
"height": 3
},
"linkedEntityGuids": null,
@@ -577,11 +548,11 @@
}
},
{
- "title": "Response Bytes (bytes/second) Per Kafka Protocol Request Type",
+ "title": "Response Rate ",
"layout": {
- "column": 9,
- "row": 16,
- "width": 4,
+ "column": 7,
+ "row": 17,
+ "width": 6,
"height": 3
},
"linkedEntityGuids": null,
@@ -597,18 +568,21 @@
},
"nrqlQueries": [
{
- "accountId": 0,
- "query": "SELECT rate(sum(confluent_kafka_server_response_bytes), 1 second) FROM Metric WHERE\n`kafka_id` in ({{kafka_id}}) FACET type LIMIT MAX TIMESERIES"
+ "accountIds": [],
+ "query": "SELECT rate(sum(confluent_kafka_server_response_bytes), 1 minute) as 'Bytes Per Minute' FROM Metric WHERE `kafka_id` in ({{kafka_id}}) FACET kafka_id LIMIT MAX TIMESERIES"
}
],
"platformOptions": {
"ignoreTimeRange": false
},
"units": {
- "unit": "BYTES_PER_SECOND"
+ "unit": "REQUESTS_PER_SECOND"
},
"yAxisLeft": {
"zero": true
+ },
+ "yAxisRight": {
+ "zero": true
}
}
}
@@ -619,15 +593,24 @@
{
"name": "kafka_id",
"items": null,
- "defaultValues": [],
+ "defaultValues": [
+ {
+ "value": {
+ "string": "*"
+ }
+ }
+ ],
"nrqlQuery": {
"accountIds": [],
"query": "select uniques(kafka_id) from Metric"
},
+ "options": {
+ "ignoreTimeRange": true
+ },
"title": "Kafka Cluster Id",
"type": "NRQL",
- "isMultiSelection": false,
+ "isMultiSelection": true,
"replacementStrategy": "STRING"
}
]
-}
+}
\ No newline at end of file
diff --git a/dashboards/confluent-cloud/confluent-cloud1.png b/dashboards/confluent-cloud/confluent-cloud1.png
new file mode 100644
index 0000000000..b87465f28e
Binary files /dev/null and b/dashboards/confluent-cloud/confluent-cloud1.png differ
diff --git a/dashboards/confluent-cloud/confluent-cloud2.png b/dashboards/confluent-cloud/confluent-cloud2.png
new file mode 100644
index 0000000000..2ca33d017c
Binary files /dev/null and b/dashboards/confluent-cloud/confluent-cloud2.png differ
diff --git a/dashboards/confluent-cloud/confluent-cloud3.png b/dashboards/confluent-cloud/confluent-cloud3.png
new file mode 100644
index 0000000000..2cd1434f1a
Binary files /dev/null and b/dashboards/confluent-cloud/confluent-cloud3.png differ
diff --git a/dashboards/confluent-cloud/confluentcloud01.png b/dashboards/confluent-cloud/confluentcloud01.png
deleted file mode 100644
index 8f2bbb8901..0000000000
Binary files a/dashboards/confluent-cloud/confluentcloud01.png and /dev/null differ
diff --git a/dashboards/confluent-cloud/confluentcloud02.png b/dashboards/confluent-cloud/confluentcloud02.png
deleted file mode 100644
index baaec85fa9..0000000000
Binary files a/dashboards/confluent-cloud/confluentcloud02.png and /dev/null differ
diff --git a/dashboards/confluent-cloud/confluentcloud03.png b/dashboards/confluent-cloud/confluentcloud03.png
deleted file mode 100644
index 46fd83bacf..0000000000
Binary files a/dashboards/confluent-cloud/confluentcloud03.png and /dev/null differ
diff --git a/data-sources/amazon-cloudwatch-network-monitor/config.yml b/data-sources/amazon-cloudwatch-network-monitor/config.yml
new file mode 100644
index 0000000000..1b3be5a76b
--- /dev/null
+++ b/data-sources/amazon-cloudwatch-network-monitor/config.yml
@@ -0,0 +1,8 @@
+id: amazon-cloudwatch-network-monitor
+displayName: Amazon CloudWatch Network Monitor
+description: Monitor hybrid network by Amazon CloudWatch Network Monitor
+install:
+ primary:
+ link:
+ url: https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/amazon-cloudwatch-network-monitor
+icon: logo.svg
\ No newline at end of file
diff --git a/data-sources/amazon-cloudwatch-network-monitor/logo.svg b/data-sources/amazon-cloudwatch-network-monitor/logo.svg
new file mode 100644
index 0000000000..b933e4da9b
--- /dev/null
+++ b/data-sources/amazon-cloudwatch-network-monitor/logo.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/data-sources/confluent-cloud/config.yml b/data-sources/confluent-cloud/config.yml
index a28a990fd3..4d88e76123 100644
--- a/data-sources/confluent-cloud/config.yml
+++ b/data-sources/confluent-cloud/config.yml
@@ -1,6 +1,6 @@
id: confluent-cloud
displayName: Confluent Cloud
-description: Observability for Confluent Cloud's fully managed service for Apache Kafka on New Relic delivered using the OpenTelemetry Collector.
+description: Observability for Confluent Cloud's fully managed service for Apache Kafka delivered using an OpenTelemetry Collector.
icon: logo.png
install:
primary:
diff --git a/data-sources/contentsquare/config.yml b/data-sources/contentsquare/config.yml
index 6770709260..bd1b506ef9 100644
--- a/data-sources/contentsquare/config.yml
+++ b/data-sources/contentsquare/config.yml
@@ -1,13 +1,7 @@
id: contentsquare
displayName: Contentsquare
description: |
- Contentsquare is a powerful, yet easy-to-use, digital experience analytics
- platform that collects and quantifies user behavior across all your digital
- properties showing you what users like, what they don’t like, where they
- struggle, and why they leave.
- Contentsquare surfaces opportunities to improve your customer experience
- automatically and easily so you can increase your conversion rates, improve
- customer loyalty, reduce costs and drive revenue.
+ The integration with Contentsquare and New Relic adds a Contentsquare session replay link to your browser monitoring data in New Relic, so that you can quickly navigate to your sessions from the context of New Relic.
icon: logo.png
install:
primary:
diff --git a/data-sources/cribl/config.yml b/data-sources/cribl/config.yml
index 065c26b328..8f893aff55 100644
--- a/data-sources/cribl/config.yml
+++ b/data-sources/cribl/config.yml
@@ -1,12 +1,7 @@
id: cribl
displayName: Cribl Stream
description: |
- Cribl Stream helps you process machine data – logs, instrumentation data, application data, metrics, etc. – in real time, and deliver them to your analysis platform of choice.
-
- It allows you to:
- Add context to your data, by enriching it with information from external data sources.
- Help secure your data, by redacting, obfuscating, or encrypting sensitive fields.
- Optimize your data, per your performance and cost requirements.
+ The Cribl Stream integration with New Relic helps you monitor your machine data,logs, instrumentation data,application data, metrics, etc. in real time, alongside your New Relic observability stack.
icon: logo.png
install:
primary:
diff --git a/data-sources/kentik/config.yml b/data-sources/kentik/config.yml
index a20fbc00f0..7e80e85094 100644
--- a/data-sources/kentik/config.yml
+++ b/data-sources/kentik/config.yml
@@ -1,9 +1,7 @@
id: kentik
displayName: Kentik Firehose
description: |
- This quickstart gives you visibility into data ingested via the Kentik Firehose. The data is sent to Kentik and enriched before being sent to New Relic.
- Deploying this quickstart gives insights into Network Flows, Network Synthetics, and performance telemetry associated with Kentik-monitored devices.
- For more information or support, please go to https://www.kentik.com/customer-care/
+ The Kentik Firehose integration provides a simple tool to send Kentik network observability data into New Relic, providing insights into Network Flows, Network Synthetics, and performance telemetry associated with Kentik-monitored devices.
icon: logo.png
install:
primary:
diff --git a/data-sources/trendmicro-cloudone-conformity/config.yml b/data-sources/trendmicro-cloudone-conformity/config.yml
index 067b8e579b..c7c64ba175 100644
--- a/data-sources/trendmicro-cloudone-conformity/config.yml
+++ b/data-sources/trendmicro-cloudone-conformity/config.yml
@@ -1,9 +1,9 @@
id: trendmicro-cloudone-conformity
displayName: Trend Micro Cloud One - Conformity
description: |
- New Relic’s integration with Trend Micro Cloud One - Conformity ingests cloud security posture management (CSPM) data from Conformity into New Relic in real-time.
- The integration deploys a Amazon Web Services (AWS) CloudFormation stack in your AWS account. Bring your Conformity generated CSPM data into New Relic one to contextualize and correlate it with workload telemetry data, delivering AI powered visualizations and quick insights.
- Conformity generated CSPM data into New Relic One where it's contextualized and correlated with workload telemetry data delivering AI powered visualizations and quick insights.
+ New Relic’s integration with Trend Micro Cloud One - Conformity ingests real-time cloud security posture management (CSPM) data into New Relic,
+ allowing you to contextualize it with telemetry data through AI-powered visualizations.
+
icon: logo.png
install:
primary:
diff --git a/quickstarts/aws/amazon-cloudwatch-network-monitor/config.yml b/quickstarts/aws/amazon-cloudwatch-network-monitor/config.yml
new file mode 100644
index 0000000000..3d1fb7c5e3
--- /dev/null
+++ b/quickstarts/aws/amazon-cloudwatch-network-monitor/config.yml
@@ -0,0 +1,32 @@
+id: 6ce6d1ba-e218-4d7d-851d-7c0e9c755ce0
+slug: amazon-cloudwatch-network-monitor
+description: |-
+ ## Monitor your Amazon CloudWatch Network Monitor Metrics
+
+ With New Relic's Amazon CloudWatch Network Monitor integration, you now can monitor whether network impairment lies within the AWS network or the customer’s network
+
+ ### Get started!
+
+ Check out the [documentation](https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/amazon-cloudwatch-network-monitor/) to learn more about New
+ Relic monitoring and integration with Amazon CloudWatch Network Monitor.
+
+summary: Use New Relic - Amazon CloudWatch Network Monitor Integration to troubleshoot issues in hybrid network connectivity
+icon: logo.svg
+level: New Relic
+authors:
+ - New Relic
+title: Amazon CloudWatch Network Monitor
+documentation:
+ - name: Amazon CloudWatch Network Monitor installation docs
+ description: |
+ Monitor hybrid networks by monitoring with Amazon CloudWatch Network Monitor
+ url: https://docs.newrelic.com/docs/infrastructure/amazon-integrations/aws-integrations-list/amazon-cloudwatch-network-monitor/
+keywords:
+ - aws
+ - amazon web services
+ - Amazon CloudWatch Network Monitor
+ - active network monitoring
+dataSourceIds:
+ - amazon-cloudwatch-network-monitor
+dashboards:
+ - amazon-cloudwatch-network-monitor
\ No newline at end of file
diff --git a/quickstarts/aws/amazon-cloudwatch-network-monitor/logo.svg b/quickstarts/aws/amazon-cloudwatch-network-monitor/logo.svg
new file mode 100644
index 0000000000..b933e4da9b
--- /dev/null
+++ b/quickstarts/aws/amazon-cloudwatch-network-monitor/logo.svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/quickstarts/aws/amazon-eks-on-aws-fargate/config.yml b/quickstarts/aws/amazon-eks-on-aws-fargate/config.yml
index 8dd06feb9a..50ecf7688d 100644
--- a/quickstarts/aws/amazon-eks-on-aws-fargate/config.yml
+++ b/quickstarts/aws/amazon-eks-on-aws-fargate/config.yml
@@ -1,11 +1,9 @@
id: 066eb2a4-65a4-456d-a5ac-0e1fcc2740a2
slug: amazon-eks-on-aws-fargate
description: |
- Telemetry from Kube State Metrics, Kubelet, and cAdvisor for full
- observability of Kubernetes clusters running on EKS in Fargate.
+ Telemetry from Kube State Metrics, Kubelet, and cAdvisor for full observability of Kubernetes clusters running on EKS in Fargate.
summary: |
- Telemetry from Kube State Metrics, Kubelet, and cAdvisor for full
- observability of Kubernetes clusters running on EKS in Fargate.
+ Telemetry from Kube State Metrics, Kubelet, and cAdvisor for full observability of Kubernetes clusters running on EKS in Fargate.
icon: logo.svg
level: New Relic
authors:
@@ -14,8 +12,7 @@ title: Amazon EKS on AWS Fargate
documentation:
- name: Amazon EKS on AWS Fargate installation docs
description: |
- Telemetry from Kube State Metrics, Kubelet, and cAdvisor for full
- observability of Kubernetes clusters running on EKS in Fargate.
+ Telemetry from Kube State Metrics, Kubelet, and cAdvisor for full observability of Kubernetes clusters running on EKS in Fargate.
url: https://docs.newrelic.com/docs/integrations/kubernetes-integration/installation/install-fargate-integration/
keywords:
- aws
diff --git a/quickstarts/azure/azure-functions-workflow/config.yml b/quickstarts/azure/azure-functions-workflow/config.yml
index 6b5ee062dd..be19235214 100644
--- a/quickstarts/azure/azure-functions-workflow/config.yml
+++ b/quickstarts/azure/azure-functions-workflow/config.yml
@@ -18,7 +18,7 @@ description: |-
Start ingesting your Azure data today and get immediate access to our visualization dashboards so you can optimize your Azure service.
summary: |-
- Monitoring Azure Functions Workflow is critical to track the performance through key metrics. Download New Relic Azure Functions Workflow monitoring quickstart to get a pre-built dashboard tailored to monitor your Azure Functions Workflow service.
+ Integrate the New Relic Azure Functions Workflow monitoring quickstart to gain a pre-built dashboard specifically designed to monitor your Azure Functions Workflow service.
icon: logo.png
level: New Relic
authors:
@@ -40,4 +40,4 @@ keywords:
dashboards:
- azure-functions-workflow
dataSourceIds:
- - azure-monitor
\ No newline at end of file
+ - azure-monitor
diff --git a/quickstarts/azure/azure-media-services-live-events/config.yml b/quickstarts/azure/azure-media-services-live-events/config.yml
index 5053055a03..65458a9936 100644
--- a/quickstarts/azure/azure-media-services-live-events/config.yml
+++ b/quickstarts/azure/azure-media-services-live-events/config.yml
@@ -17,7 +17,7 @@ description: |-
Start ingesting your Azure data today and get immediate access to our visualization dashboards so you can optimize your Azure service.
summary: |-
- Monitoring Azure Media Services Live Events is critical to track the performance of the live events via key metrics. Download New Relic Azure Media Services Live Events monitoring quickstart to get a pre-built dashboard tailored to monitor your Azure SignalR Service.
+ Track Azure Media Services Live Events performance effortlessly with key metrics. Integrate the pre-built New Relic dashboard for SignalR Service insights.
icon: logo.png
level: New Relic
authors:
diff --git a/quickstarts/contentsquare/config.yml b/quickstarts/contentsquare/config.yml
index 7de985aec3..8648e50d75 100644
--- a/quickstarts/contentsquare/config.yml
+++ b/quickstarts/contentsquare/config.yml
@@ -2,10 +2,7 @@ id: 875f2e3d-7c1b-436b-ab77-602090de1134
slug: contentsquare
title: Contentsquare
summary: |
- Contentsquare is a powerful, yet easy-to-use, digital experience analytics
- platform that collects and quantifies user behavior across all your digital
- properties showing you what users like, what they don’t like, where they
- struggle, and why they leave.
+ Contentsquare is a powerful, yet easy-to-use, digital experience analytics platform that collects and quantifies user behavior across all your digital properties showing you what users like, what they don’t like, where they struggle, and why they leave.
description: |
## Contentsquare performance monitoring
diff --git a/quickstarts/drupal/config.yml b/quickstarts/drupal/config.yml
index e050346d41..25996c3249 100644
--- a/quickstarts/drupal/config.yml
+++ b/quickstarts/drupal/config.yml
@@ -15,8 +15,7 @@ description: |
Check out the [documentation](https://docs.newrelic.com/docs/agents/php-agent/) to learn more about New Relic monitoring for Drupal.
summary: |
- Drupal is a free and open-source web content management framework written in
- PHP. You can monitor Drupal with New Relic's PHP agent.
+ Effectively monitor Drupal using New Relic's PHP agent for enhanced performance insights.
icon: logo.svg
level: New Relic
authors:
diff --git a/quickstarts/gatsby-build/config.yml b/quickstarts/gatsby-build/config.yml
index 00e00d47a3..b8b67aaa9c 100644
--- a/quickstarts/gatsby-build/config.yml
+++ b/quickstarts/gatsby-build/config.yml
@@ -2,8 +2,8 @@ id: d234c09c-3338-4713-8340-ca75766445d6
slug: gatsby-build
title: Gatsby Build
summary: |
- The Gatsby quickstart allows you to get visibility into the build time of your Gatsby Sites,
- using OpenTelemetry to collect each step as a span in a Distributed Trace. Monitor your build in real-time to highlight which steps are affecting performance, so you can improve them faster.
+ The Gatsby quickstart allows you to get visibility into the build time of your Gatsby sites, using OpenTelemetry to collect each step as a span in a Distributed Trace.
+ Monitor your build in real-time to highlight which steps are affecting performance, so you can improve them faster.
description: |
The Gatsby quickstart allows you to get visibility into the build time of your Gatsby Sites,
using [OpenTelemetry](https://docs.newrelic.com/docs/integrations/open-source-telemetry-integrations/opentelemetry/introduction-opentelemetry-new-relic/)
diff --git a/quickstarts/kentik/config.yml b/quickstarts/kentik/config.yml
index 58bdf03126..68fb662ba9 100644
--- a/quickstarts/kentik/config.yml
+++ b/quickstarts/kentik/config.yml
@@ -2,14 +2,23 @@ id: a781c760-fd25-458f-a170-dae8c722b2ea
slug: kentik-firehose-pack
title: Kentik Firehose
description: |
- This quickstart gives you visibility into data ingested via the Kentik Firehose. The data is sent to Kentik and enriched before being sent to New Relic.
- Deploying this quickstart gives insights into Network Flows, Network Synthetics, and performance telemetry associated with Kentik-monitored devices. Includes example alerts to get you started.
- For more information or support, please go to https://www.kentik.com/customer-care/
+ # About Kentik
+ Kentik is a Network observability platform that gives you all the information and tools to monitor your network infrastructure.
+
+ ## About this integration
+ This integration gives you visibility into data ingested via the Kentik Firehose. The data is sent to Kentik and enriched before being sent to New Relic. There are also a few relevant alert examples included.
+ Deploying this quickstart gives insights into the following:
+
+ - Network Flows
+ - Network Synthetics
+ - Performance Telemetry
+
+ For more information or support, you can [reach out to Kentik](https://www.kentik.com/customer-care/)
summary: |
- The Kentik Firehose quickstart visualizes network data within New Relic One and includes some example alerts.
+ Kentik Firehose visualizes network data within New Relic
level: Verified
authors:
- - Josh Biggley (New Relic)
+ - New Relic
keywords:
- kentik
- firehose
diff --git a/quickstarts/python/cherrypy/config.yml b/quickstarts/python/cherrypy/config.yml
index 12dfd6fbb3..8fc15c9848 100644
--- a/quickstarts/python/cherrypy/config.yml
+++ b/quickstarts/python/cherrypy/config.yml
@@ -25,7 +25,7 @@ level: New Relic
authors:
- New Relic
- Emil Hammarstrand
-title: cherrypy
+title: CherryPy
documentation:
- name: cherrypy installation docs
description: Object-oriented web framework built on Python.
diff --git a/quickstarts/python/jinja2/config.yml b/quickstarts/python/jinja2/config.yml
index b4be9cecb8..cc67681a63 100644
--- a/quickstarts/python/jinja2/config.yml
+++ b/quickstarts/python/jinja2/config.yml
@@ -1,7 +1,7 @@
id: 09e0582c-dd93-4bf9-9b5c-d4faa4aa2c28
slug: jinja2
description: |
- ## What is jinja2?
+ ## What is Jinja2?
Python-based web templating language designed to make it easier for designers
to work quickly and efficiently.
@@ -9,20 +9,20 @@ description: |
## Get started!
- Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments jinja2 with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up [errors inbox](https://docs.newrelic.com/docs/apm/apm-ui-pages/errors-inbox/errors-inbox/), [transaction tracing](https://docs.newrelic.com/docs/apm/transactions/transaction-traces/introduction-transaction-traces/), and [service maps](https://docs.newrelic.com/docs/understand-dependencies/understand-system-dependencies/service-maps/introduction-service-maps/).
+ Leverage community expertise and instantly get value out of your telemetry data. This quickstart automatically instruments Jinja2 with the New Relic Python agent, and allows you to instantly monitor your Python application with out-of-the-box dashboards and alerts. Further leverage New Relic's APM capabilities by setting up [errors inbox](https://docs.newrelic.com/docs/apm/apm-ui-pages/errors-inbox/errors-inbox/), [transaction tracing](https://docs.newrelic.com/docs/apm/transactions/transaction-traces/introduction-transaction-traces/), and [service maps](https://docs.newrelic.com/docs/understand-dependencies/understand-system-dependencies/service-maps/introduction-service-maps/).
## More info
- Check out the [documentation](https://docs.newrelic.com/docs/agents/python-agent/) to learn more about New Relic monitoring for jinja2.
+ Check out the [documentation](https://docs.newrelic.com/docs/agents/python-agent/) to learn more about New Relic monitoring for Jinja2.
summary: |
- Monitor jinja2 with New Relic's Python agent
+ Monitor Jinja2 with New Relic's Python agent
icon: logo.svg
level: New Relic
authors:
- New Relic
-title: jinja2
+title: Jinja2
documentation:
- - name: jinja2 installation docs
+ - name: Jinja2 installation docs
description: |
Python-based web templating language designed to make it easier for
designers to work quickly and efficiently.
diff --git a/utils/constants.ts b/utils/constants.ts
index c5e603f47b..12267b2409 100644
--- a/utils/constants.ts
+++ b/utils/constants.ts
@@ -211,3 +211,11 @@ export const ALERT_POLICY_SET_REQUIRED_DATA_SOURCES_MUTATION = gql`
}
}
`;
+
+/**
+ * When submitting quickstarts (or other resources), we have found that
+ * submitting items too frequently can cause the process to fail. By introducing
+ * a slight amount of delay between submissions, we can avoid a possible race
+ * condition.
+ */
+export const SUBMIT_THROTTLE_MS = 250;
diff --git a/utils/create_validate_pr_quickstarts.ts b/utils/create_validate_pr_quickstarts.ts
index ff2cad73fd..6b949d66fd 100644
--- a/utils/create_validate_pr_quickstarts.ts
+++ b/utils/create_validate_pr_quickstarts.ts
@@ -14,14 +14,18 @@ import {
getComponentLocalPath,
} from './lib/helpers';
-import { QUICKSTART_CONFIG_REGEXP, COMPONENT_PREFIX_REGEXP } from './constants';
+import {
+ QUICKSTART_CONFIG_REGEXP,
+ COMPONENT_PREFIX_REGEXP,
+ SUBMIT_THROTTLE_MS,
+} from './constants';
import {
NerdGraphResponseWithLocalErrors,
NerdGraphError,
} from './types/nerdgraph';
import logger from './logger';
-const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms))
+const sleep = (ms: number) => new Promise((resolve) => setTimeout(resolve, ms));
type ResponseWithErrors =
NerdGraphResponseWithLocalErrors & {
@@ -130,7 +134,7 @@ export const createValidateQuickstarts = async (
for (const c of quickstarts) {
try {
const res = await c.submitMutation(isDryRun);
- await sleep(10);
+ await sleep(SUBMIT_THROTTLE_MS);
results = [...results, res];
} catch (err) {
diff --git a/utils/release.ts b/utils/release.ts
index 62f3fac582..b358c9c4d1 100644
--- a/utils/release.ts
+++ b/utils/release.ts
@@ -55,9 +55,9 @@ const bootstrap = async (): Promise => {
}
if (!NR_API_TOKEN) {
- stepMessage(`No API token found for ${ENVIRONMENT}.`);
+ stepMessage(`No API key found for ${ENVIRONMENT}.`);
NR_API_TOKEN = await password({
- message: `What is your NR API token for ${ENVIRONMENT}?`,
+ message: `What is your NR API key for ${ENVIRONMENT}?`,
});
appendFileSync(
@@ -65,7 +65,7 @@ const bootstrap = async (): Promise => {
`\nNR_API_TOKEN_${ENVIRONMENT.toUpperCase()}=${NR_API_TOKEN}`
);
console.log(
- `Your NR API token for ${ENVIRONMENT} has been saved to the .env file. Do not commit this file.`
+ `Your NR API key for ${ENVIRONMENT} has been saved to the .env file. Do not commit this file.`
);
}
@@ -109,7 +109,7 @@ const bootstrap = async (): Promise => {
};
const main = async () => {
- const { ENVIRONMENT, GH_TOKEN, NR_API_TOKEN, PR_URL } = await bootstrap();
+ const { ENVIRONMENT, GH_TOKEN, PR_URL } = await bootstrap();
stepMessage(`Performing dry run release to ${ENVIRONMENT}...`);