Skip to content

Commit

Permalink
first pass on docs to send data to grafana cloud (#762)
Browse files Browse the repository at this point in the history
* first pass on docs to send data to grafana cloud

* Adding flags to listen for flow also
  • Loading branch information
i3149 authored Oct 17, 2024
1 parent fb229ab commit 248bfbe
Show file tree
Hide file tree
Showing 4 changed files with 280 additions and 0 deletions.
10 changes: 10 additions & 0 deletions hack/grafana-cloud/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
# Grafana Cloud Example

This example shows how to send SNMP to grafana cloud via the alloy open telemetry collector.

To use it, replace X_MY_USERNAME and X_MY_PASSWORD values in config.alloy.
Then update the snmp.yml file to include your devices to poll.

# Run

`docker compose up`
189 changes: 189 additions & 0 deletions hack/grafana-cloud/config.alloy
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
prometheus.exporter.self "alloy_check" { }

discovery.relabel "alloy_check" {
targets = prometheus.exporter.self.alloy_check.targets

rule {
target_label = "instance"
replacement = constants.hostname
}

rule {
target_label = "alloy_hostname"
replacement = constants.hostname
}

rule {
target_label = "job"
replacement = "integrations/alloy-check"
}
}

prometheus.scrape "alloy_check" {
targets = discovery.relabel.alloy_check.output
forward_to = [prometheus.relabel.alloy_check.receiver]

scrape_interval = "60s"
}

prometheus.relabel "alloy_check" {
forward_to = [prometheus.remote_write.metrics_service.receiver]

rule {
source_labels = ["__name__"]
regex = "(prometheus_target_sync_length_seconds_sum|prometheus_target_scrapes_.*|prometheus_target_interval.*|prometheus_sd_discovered_targets|alloy_build.*|prometheus_remote_write_wal_samples_appended_total|process_start_time_seconds)"
action = "keep"
}
}

prometheus.remote_write "metrics_service" {
endpoint {
url = "https://prometheus-us-central1.grafana.net/api/prom/push"

basic_auth {
username = "X_MY_USER"
password = "X_MY_PASSWORD"
}
}
}

loki.write "grafana_cloud_loki" {
endpoint {
url = "https://logs-prod-017.grafana.net/loki/api/v1/push"

basic_auth {
username = "X_MY_USER"
password = "X_MY_PASSWORD"
}
}
}

otelcol.receiver.otlp "default" {
// configures the default grpc endpoint "127.0.0.1:4317"
grpc { }
// configures the default http/protobuf endpoint "127.0.0.1:4318"
http { }

output {
metrics = [otelcol.processor.resourcedetection.default.input]
logs = [otelcol.processor.resourcedetection.default.input]
traces = [otelcol.processor.resourcedetection.default.input]
}
}

otelcol.processor.resourcedetection "default" {
detectors = ["env", "system"] // add "gcp", "ec2", "ecs", "elastic_beanstalk", "eks", "lambda", "azure", "aks", "consul", "heroku" if you want to use cloud resource detection

system {
hostname_sources = ["os"]
}

output {
metrics = [otelcol.processor.transform.drop_unneeded_resource_attributes.input]
logs = [otelcol.processor.transform.drop_unneeded_resource_attributes.input]
traces = [otelcol.processor.transform.drop_unneeded_resource_attributes.input]
}
}

otelcol.processor.transform "drop_unneeded_resource_attributes" {
// https://grafana.com/docs/alloy/latest/reference/components/otelcol.processor.transform/
error_mode = "ignore"

trace_statements {
context = "resource"
statements = [
"delete_key(attributes, \"k8s.pod.start_time\")",
"delete_key(attributes, \"os.description\")",
"delete_key(attributes, \"os.type\")",
"delete_key(attributes, \"process.command_args\")",
"delete_key(attributes, \"process.executable.path\")",
"delete_key(attributes, \"process.pid\")",
"delete_key(attributes, \"process.runtime.description\")",
"delete_key(attributes, \"process.runtime.name\")",
"delete_key(attributes, \"process.runtime.version\")",
]
}

metric_statements {
context = "resource"
statements = [
"delete_key(attributes, \"k8s.pod.start_time\")",
"delete_key(attributes, \"os.description\")",
"delete_key(attributes, \"os.type\")",
"delete_key(attributes, \"process.command_args\")",
"delete_key(attributes, \"process.executable.path\")",
"delete_key(attributes, \"process.pid\")",
"delete_key(attributes, \"process.runtime.description\")",
"delete_key(attributes, \"process.runtime.name\")",
"delete_key(attributes, \"process.runtime.version\")",
]
}

log_statements {
context = "resource"
statements = [
"delete_key(attributes, \"k8s.pod.start_time\")",
"delete_key(attributes, \"os.description\")",
"delete_key(attributes, \"os.type\")",
"delete_key(attributes, \"process.command_args\")",
"delete_key(attributes, \"process.executable.path\")",
"delete_key(attributes, \"process.pid\")",
"delete_key(attributes, \"process.runtime.description\")",
"delete_key(attributes, \"process.runtime.name\")",
"delete_key(attributes, \"process.runtime.version\")",
]
}

output {
metrics = [otelcol.processor.transform.add_resource_attributes_as_metric_attributes.input]
logs = [otelcol.processor.batch.default.input]
traces = [
otelcol.processor.batch.default.input,
otelcol.connector.host_info.default.input,
]
}
}

otelcol.connector.host_info "default" {
host_identifiers = ["host.name"]

output {
metrics = [otelcol.processor.batch.default.input]
}
}

otelcol.processor.transform "add_resource_attributes_as_metric_attributes" {
error_mode = "ignore"

metric_statements {
context = "datapoint"
statements = [
"set(attributes[\"deployment.environment\"], resource.attributes[\"deployment.environment\"])",
"set(attributes[\"service.version\"], resource.attributes[\"service.version\"])",
]
}

output {
metrics = [otelcol.processor.batch.default.input]
}
}

otelcol.processor.batch "default" {
output {
metrics = [otelcol.exporter.otlphttp.grafana_cloud.input]
logs = [otelcol.exporter.otlphttp.grafana_cloud.input]
traces = [otelcol.exporter.otlphttp.grafana_cloud.input]
}
}

otelcol.exporter.otlphttp "grafana_cloud" {
client {
endpoint = "https://otlp-gateway-prod-us-central-0.grafana.net/otlp"
auth = otelcol.auth.basic.grafana_cloud.handler
}
}

otelcol.auth.basic "grafana_cloud" {
username = "X_MY_USERNAME"
password = "X_MY_PASSWORD"
}
39 changes: 39 additions & 0 deletions hack/grafana-cloud/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
services:
ktranslate:
image: kentik/ktranslate:latest
restart: always
pull_policy: always
environment:
- "OTEL_SERVICE_NAME=ktranslate"
- "OTEL_EXPORTER_OTLP_COMPRESSION=gzip"
volumes:
- type: bind
source: ./snmp.yml
target: /snmp.yml
depends_on:
- alloy
command:
- --format=otel
- --otel.protocol=grpc
- --otel.endpoint=http://alloy:4317/
- --snmp=/snmp.yml
- --nf.source=auto
ports:
- 0.0.0.0:9995:9995/udp
- 1620:1620/udp
alloy:
image: grafana/alloy:latest
restart: always
pull_policy: always
volumes:
- type: bind
source: ./config.alloy
target: /config.alloy
ports:
- 4317:4317
- 4318:4318
command:
- --server.http.listen-addr=0.0.0.0:4319
- --storage.path=/var/lib/alloy/data
- run
- /config.alloy
42 changes: 42 additions & 0 deletions hack/grafana-cloud/snmp.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
devices:
bart__192.168.0.1:
device_name: bart
device_ip: 192.168.0.1
snmp_comm: public
debug: false
port: 161
oid: .1.3.6.1.4.1.8072.3.2.10
trap:
listen: 0.0.0.0:1620
community: public
version: v2c
trap_only: false
drop_undefined: true
discovery:
cidrs:
- 127.0.0.1/32
ignore_list: []
debug: false
ports:
- 161
default_communities:
- public
use_snmp_v1: false
default_v3: null
add_devices: true
add_mibs: false
threads: 16
replace_devices: true
global:
poll_time_sec: 30
drop_if_outside_poll: false
mib_profile_dir: /etc/ktranslate/profiles
mibs_db: /etc/ktranslate/mibs.db
mibs_enabled:
- IF-MIB
timeout_ms: 3000
retries: 0
global_v3: null
response_time: false
user_tags: {}
match_attributes: {}

0 comments on commit 248bfbe

Please sign in to comment.