diff --git a/.github/actions/spelling/allow.txt b/.github/actions/spelling/allow.txt index 410cb8d060cc7..3fc1fd855edd8 100644 --- a/.github/actions/spelling/allow.txt +++ b/.github/actions/spelling/allow.txt @@ -29,6 +29,9 @@ Apanda apikey apimachinery apiserver +appsignal +Appsignal +APPSIGNAL archlinux Archos Arival diff --git a/.github/semantic.yml b/.github/semantic.yml index 55f71699574ea..b794afc310dbd 100644 --- a/.github/semantic.yml +++ b/.github/semantic.yml @@ -185,6 +185,7 @@ scopes: # sinks - amqp sink # Anything `amqp` sink related + - appsignal sink # Anything `appsignal` sink related - aws_cloudwatch_logs sink # Anything `aws_cloudwatch_logs` sink related - aws_cloudwatch_metrics sink # Anything `aws_cloudwatch_metrics` sink related - aws_kinesis_firehose sink # Anything `aws_kinesis_firehose` sink related diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml index 02e573f1d62d2..958adb45a3871 100644 --- a/.github/workflows/integration-test.yml +++ b/.github/workflows/integration-test.yml @@ -57,6 +57,7 @@ jobs: matrix: include: - test: 'amqp' + - test: 'appsignal' - test: 'aws' - test: 'axiom' - test: 'azure' diff --git a/Cargo.toml b/Cargo.toml index de0d8b0c0a6ef..971455343460b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -594,6 +594,7 @@ transforms-throttle = ["dep:governor"] sinks = ["sinks-logs", "sinks-metrics"] sinks-logs = [ "sinks-amqp", + "sinks-appsignal", "sinks-aws_cloudwatch_logs", "sinks-aws_kinesis_firehose", "sinks-aws_kinesis_streams", @@ -635,6 +636,7 @@ sinks-logs = [ "sinks-websocket", ] sinks-metrics = [ + "sinks-appsignal", "sinks-aws_cloudwatch_metrics", "sinks-blackhole", "sinks-console", @@ -650,6 +652,7 @@ sinks-metrics = [ ] sinks-amqp = ["lapin"] +sinks-appsignal = [] sinks-aws_cloudwatch_logs = ["aws-core", "dep:aws-sdk-cloudwatchlogs"] sinks-aws_cloudwatch_metrics = ["aws-core", "dep:aws-sdk-cloudwatch"] sinks-aws_kinesis_firehose = ["aws-core", "dep:aws-sdk-firehose"] @@ -714,6 +717,7 @@ nightly = [] # Testing-related features all-integration-tests = [ "amqp-integration-tests", + "appsignal-integration-tests", "aws-integration-tests", "axiom-integration-tests", "azure-integration-tests", @@ -751,6 +755,7 @@ all-integration-tests = [ ] amqp-integration-tests = ["sources-amqp", "sinks-amqp"] +appsignal-integration-tests = ["sinks-appsignal"] aws-integration-tests = [ "aws-cloudwatch-logs-integration-tests", diff --git a/Makefile b/Makefile index 0c9bc7a7119fa..e3da4736b22b9 100644 --- a/Makefile +++ b/Makefile @@ -330,7 +330,7 @@ test-behavior: test-behavior-transforms test-behavior-formats test-behavior-conf .PHONY: test-integration test-integration: ## Runs all integration tests -test-integration: test-integration-amqp test-integration-aws test-integration-axiom test-integration-azure test-integration-chronicle test-integration-clickhouse +test-integration: test-integration-amqp test-integration-appsignal test-integration-aws test-integration-axiom test-integration-azure test-integration-chronicle test-integration-clickhouse test-integration: test-integration-databend test-integration-docker-logs test-integration-elasticsearch test-integration: test-integration-eventstoredb test-integration-fluent test-integration-gcp test-integration-humio test-integration-http-client test-integration-influxdb test-integration: test-integration-kafka test-integration-logstash test-integration-loki test-integration-mongodb test-integration-nats diff --git a/scripts/integration/appsignal/test.yaml b/scripts/integration/appsignal/test.yaml new file mode 100644 index 0000000000000..0f0970f75e4ac --- /dev/null +++ b/scripts/integration/appsignal/test.yaml @@ -0,0 +1,11 @@ +features: +- appsignal-integration-tests + +test_filter: '::appsignal::integration_tests::' + +runner: + env: + TEST_APPSIGNAL_PUSH_API_KEY: + +matrix: + version: [latest] diff --git a/src/sinks/appsignal/integration_tests.rs b/src/sinks/appsignal/integration_tests.rs new file mode 100644 index 0000000000000..bcbee6a8052fc --- /dev/null +++ b/src/sinks/appsignal/integration_tests.rs @@ -0,0 +1,93 @@ +use futures::stream; +use indoc::indoc; +use vector_core::event::{BatchNotifier, BatchStatus, Event, Metric, MetricKind, MetricValue}; + +use crate::{ + config::SinkConfig, + sinks::appsignal::AppsignalSinkConfig, + sinks::util::test::load_sink, + test_util::{ + components::{ + assert_sink_compliance, assert_sink_error, run_and_assert_sink_compliance, + COMPONENT_ERROR_TAGS, SINK_TAGS, + }, + generate_lines_with_stream, map_event_batch_stream, + }, +}; + +#[tokio::test] +async fn logs_real_endpoint() { + let config = indoc! {r#" + push_api_key = "${TEST_APPSIGNAL_PUSH_API_KEY}" + "#}; + let api_key = std::env::var("TEST_APPSIGNAL_PUSH_API_KEY") + .expect("couldn't find the AppSignal push API key in environment variables"); + assert!(!api_key.is_empty(), "$TEST_APPSIGNAL_PUSH_API_KEY required"); + let config = config.replace("${TEST_APPSIGNAL_PUSH_API_KEY}", &api_key); + let (config, cx) = load_sink::(config.as_str()).unwrap(); + + let (sink, _) = config.build(cx).await.unwrap(); + let (batch, receiver) = BatchNotifier::new_with_receiver(); + let generator = |index| format!("this is a log with index {}", index); + let (_, events) = generate_lines_with_stream(generator, 10, Some(batch)); + + run_and_assert_sink_compliance(sink, events, &SINK_TAGS).await; + + assert_eq!(receiver.await, BatchStatus::Delivered); +} + +#[tokio::test] +async fn metrics_real_endpoint() { + assert_sink_compliance(&SINK_TAGS, async { + let config = indoc! {r#" + push_api_key = "${TEST_APPSIGNAL_PUSH_API_KEY}" + "#}; + let api_key = std::env::var("TEST_APPSIGNAL_PUSH_API_KEY") + .expect("couldn't find the AppSignal push API key in environment variables"); + assert!(!api_key.is_empty(), "$TEST_APPSIGNAL_PUSH_API_KEY required"); + let config = config.replace("${TEST_APPSIGNAL_PUSH_API_KEY}", &api_key); + let (config, cx) = load_sink::(config.as_str()).unwrap(); + + let (sink, _) = config.build(cx).await.unwrap(); + let (batch, receiver) = BatchNotifier::new_with_receiver(); + let events: Vec<_> = (0..10) + .map(|index| { + Event::Metric(Metric::new( + "counter", + MetricKind::Absolute, + MetricValue::Counter { + value: index as f64, + }, + )) + }) + .collect(); + let stream = map_event_batch_stream(stream::iter(events.clone()), Some(batch)); + + sink.run(stream).await.unwrap(); + assert_eq!(receiver.await, BatchStatus::Delivered); + }) + .await; +} + +#[tokio::test] +async fn error_scenario_real_endpoint() { + assert_sink_error(&COMPONENT_ERROR_TAGS, async { + let config = indoc! {r#" + push_api_key = "invalid key" + "#}; + let (config, cx) = load_sink::(config).unwrap(); + + let (sink, _) = config.build(cx).await.unwrap(); + let (batch, receiver) = BatchNotifier::new_with_receiver(); + let events = vec![Event::Metric(Metric::new( + "counter", + MetricKind::Absolute, + MetricValue::Counter { value: 1.0 }, + ))]; + let stream = map_event_batch_stream(stream::iter(events.clone()), Some(batch)); + + sink.run(stream).await.unwrap(); + assert_eq!(receiver.await, BatchStatus::Rejected); + }) + .await; +} diff --git a/src/sinks/appsignal/mod.rs b/src/sinks/appsignal/mod.rs new file mode 100644 index 0000000000000..aff1fdb5093e3 --- /dev/null +++ b/src/sinks/appsignal/mod.rs @@ -0,0 +1,273 @@ +//! The AppSignal sink +//! +//! This sink provides downstream support for `AppSignal` to collect logs and a subset of Vector +//! metric types. These events are sent to the `appsignal-endpoint.net` domain, which is part of +//! the `appsignal.com` infrastructure. +//! +//! Logs and metrics are stored on an per app basis and require an app-level Push API key. + +#[cfg(all(test, feature = "appsignal-integration-tests"))] +mod integration_tests; + +use bytes::Bytes; +use futures::{FutureExt, SinkExt}; +use http::{header::AUTHORIZATION, Request, Uri}; +use hyper::Body; +use serde_json::json; +use snafu::{ResultExt, Snafu}; +use vector_common::sensitive_string::SensitiveString; +use vector_config::configurable_component; + +use crate::{ + codecs::Transformer, + config::{AcknowledgementsConfig, DataType, Input, SinkConfig, SinkContext}, + event::Event, + http::HttpClient, + sinks::{ + util::{ + encoding::write_all, + http::{BatchedHttpSink, HttpEventEncoder, HttpSink}, + BatchConfig, BoxedRawValue, Compression, Compressor, JsonArrayBuffer, + SinkBatchSettings, TowerRequestConfig, + }, + BuildError, + }, + tls::TlsSettings, +}; + +#[derive(Debug, Snafu)] +enum FinishError { + #[snafu(display( + "Failure occurred during writing to or finalizing the compressor: {}", + source + ))] + CompressionFailed { source: std::io::Error }, +} + +/// Configuration for the `appsignal` sink. +#[configurable_component(sink("appsignal"))] +#[derive(Clone, Debug, Default)] +pub struct AppsignalSinkConfig { + /// The URI for the AppSignal API to send data to. + #[configurable(validation(format = "uri"))] + #[configurable(metadata(docs::examples = "https://appsignal-endpoint.net"))] + #[serde(default = "default_endpoint")] + endpoint: String, + + /// A valid app-level AppSignal Push API key. + #[configurable(metadata(docs::examples = "00000000-0000-0000-0000-000000000000"))] + #[configurable(metadata(docs::examples = "${APPSIGNAL_PUSH_API_KEY}"))] + push_api_key: SensitiveString, + + #[configurable(derived)] + #[serde(default = "Compression::gzip_default")] + compression: Compression, + + #[configurable(derived)] + #[serde(default)] + batch: BatchConfig, + + #[configurable(derived)] + #[serde(default)] + request: TowerRequestConfig, + + #[configurable(derived)] + #[serde( + default, + skip_serializing_if = "crate::serde::skip_serializing_if_default" + )] + encoding: Transformer, + + #[configurable(derived)] + #[serde( + default, + deserialize_with = "crate::serde::bool_or_struct", + skip_serializing_if = "crate::serde::skip_serializing_if_default" + )] + acknowledgements: AcknowledgementsConfig, +} + +fn default_endpoint() -> String { + "https://appsignal-endpoint.net".to_string() +} + +#[derive(Clone, Copy, Debug, Default)] +struct AppsignalDefaultBatchSettings; + +impl SinkBatchSettings for AppsignalDefaultBatchSettings { + const MAX_EVENTS: Option = Some(100); + const MAX_BYTES: Option = Some(450_000); + const TIMEOUT_SECS: f64 = 1.0; +} + +impl_generate_config_from_default!(AppsignalSinkConfig); + +#[async_trait::async_trait] +impl SinkConfig for AppsignalSinkConfig { + async fn build( + &self, + cx: SinkContext, + ) -> crate::Result<(super::VectorSink, super::Healthcheck)> { + let push_api_key = self.push_api_key.inner().to_string(); + let request_settings = self.request.unwrap_with(&TowerRequestConfig::default()); + let batch_settings = self.batch.into_batch_settings()?; + + let buffer = JsonArrayBuffer::new(batch_settings.size); + + let tls_settings = TlsSettings::from_options(&None)?; + let client = HttpClient::new(tls_settings, cx.proxy())?; + + let sink = BatchedHttpSink::new( + self.clone(), + buffer, + request_settings, + batch_settings.timeout, + client.clone(), + ) + .sink_map_err(|error| error!(message = "Fatal AppSignal sink error.", %error)); + + let healthcheck = healthcheck( + endpoint_uri(&self.endpoint, "vector/healthcheck")?, + push_api_key, + client, + ) + .boxed(); + + Ok((super::VectorSink::from_event_sink(sink), healthcheck)) + } + + fn input(&self) -> Input { + Input::new(DataType::Metric | DataType::Log) + } + + fn acknowledgements(&self) -> &AcknowledgementsConfig { + &self.acknowledgements + } +} + +/// Encode logs and metrics for requests to the AppSignal API. +/// It will use a JSON format wrapping events in either "log" or "metric", based on the type of event. +pub struct AppsignalEventEncoder { + transformer: Transformer, +} + +impl HttpEventEncoder for AppsignalEventEncoder { + fn encode_event(&mut self, mut event: Event) -> Option { + self.transformer.transform(&mut event); + + match event { + Event::Log(log) => Some(json!({ "log": log })), + Event::Metric(metric) => Some(json!({ "metric": metric })), + _ => panic!("The AppSignal sink does not support this type of event: {event:?}"), + } + } +} + +#[async_trait::async_trait] +impl HttpSink for AppsignalSinkConfig { + type Input = serde_json::Value; + type Output = Vec; + type Encoder = AppsignalEventEncoder; + + fn build_encoder(&self) -> Self::Encoder { + AppsignalEventEncoder { + transformer: self.encoding.clone(), + } + } + + async fn build_request(&self, events: Self::Output) -> crate::Result> { + let uri = endpoint_uri(&self.endpoint, "vector/events")?; + let mut request = Request::post(uri).header( + AUTHORIZATION, + format!("Bearer {}", self.push_api_key.inner()), + ); + + let mut body = crate::serde::json::to_bytes(&events)?.freeze(); + if let Some(ce) = self.compression.content_encoding() { + request = request.header("Content-Encoding", ce); + } + let mut compressor = Compressor::from(self.compression); + write_all(&mut compressor, 0, &body)?; + body = compressor.finish().context(CompressionFailedSnafu)?.into(); + request.body(body).map_err(Into::into) + } +} + +async fn healthcheck(uri: Uri, push_api_key: String, client: HttpClient) -> crate::Result<()> { + let request = Request::get(uri).header(AUTHORIZATION, format!("Bearer {}", push_api_key)); + let response = client.send(request.body(Body::empty()).unwrap()).await?; + + match response.status() { + status if status.is_success() => Ok(()), + other => Err(super::HealthcheckError::UnexpectedStatus { status: other }.into()), + } +} + +fn endpoint_uri(endpoint: &str, path: &str) -> crate::Result { + let uri = if endpoint.ends_with('/') { + format!("{endpoint}{path}") + } else { + format!("{endpoint}/{path}") + }; + match uri.parse::() { + Ok(u) => Ok(u), + Err(e) => Err(Box::new(BuildError::UriParseError { source: e })), + } +} + +#[cfg(test)] +mod test { + use futures::{future::ready, stream}; + use serde::Deserialize; + use vector_core::event::{Event, LogEvent}; + + use crate::{ + config::{GenerateConfig, SinkConfig, SinkContext}, + test_util::{ + components::{run_and_assert_sink_compliance, HTTP_SINK_TAGS}, + http::{always_200_response, spawn_blackhole_http_server}, + }, + }; + + use super::{endpoint_uri, AppsignalSinkConfig}; + + #[test] + fn generate_config() { + crate::test_util::test_generate_config::(); + } + + #[tokio::test] + async fn component_spec_compliance() { + let mock_endpoint = spawn_blackhole_http_server(always_200_response).await; + + let config = AppsignalSinkConfig::generate_config().to_string(); + let mut config = + AppsignalSinkConfig::deserialize(toml::de::ValueDeserializer::new(&config)) + .expect("config should be valid"); + config.endpoint = mock_endpoint.to_string(); + + let context = SinkContext::new_test(); + let (sink, _healthcheck) = config.build(context).await.unwrap(); + + let event = Event::Log(LogEvent::from("simple message")); + run_and_assert_sink_compliance(sink, stream::once(ready(event)), &HTTP_SINK_TAGS).await; + } + + #[test] + fn endpoint_uri_with_path() { + let uri = endpoint_uri("https://appsignal-endpoint.net", "vector/events"); + assert_eq!( + uri.expect("Not a valid URI").to_string(), + "https://appsignal-endpoint.net/vector/events" + ); + } + + #[test] + fn endpoint_uri_with_trailing_slash() { + let uri = endpoint_uri("https://appsignal-endpoint.net/", "vector/events"); + assert_eq!( + uri.expect("Not a valid URI").to_string(), + "https://appsignal-endpoint.net/vector/events" + ); + } +} diff --git a/src/sinks/mod.rs b/src/sinks/mod.rs index d78b6ade94fda..b9d6955642e85 100644 --- a/src/sinks/mod.rs +++ b/src/sinks/mod.rs @@ -7,6 +7,8 @@ pub mod util; #[cfg(feature = "sinks-amqp")] pub mod amqp; +#[cfg(feature = "sinks-appsignal")] +pub mod appsignal; #[cfg(feature = "sinks-aws_cloudwatch_logs")] pub mod aws_cloudwatch_logs; #[cfg(feature = "sinks-aws_cloudwatch_metrics")] @@ -143,6 +145,11 @@ pub enum Sinks { #[configurable(metadata(docs::label = "AMQP"))] Amqp(amqp::AmqpSinkConfig), + /// Send events to AppSignal. + #[cfg(feature = "sinks-appsignal")] + #[configurable(metadata(docs::label = "AppSignal"))] + Appsignal(appsignal::AppsignalSinkConfig), + /// Publish log events to AWS CloudWatch Logs. #[cfg(feature = "sinks-aws_cloudwatch_logs")] #[configurable(metadata(docs::label = "AWS CloudWatch Logs"))] @@ -434,6 +441,8 @@ impl NamedComponent for Sinks { match self { #[cfg(feature = "sinks-amqp")] Self::Amqp(config) => config.get_component_name(), + #[cfg(feature = "sinks-appsignal")] + Self::Appsignal(config) => config.get_component_name(), #[cfg(feature = "sinks-aws_cloudwatch_logs")] Self::AwsCloudwatchLogs(config) => config.get_component_name(), #[cfg(feature = "sinks-aws_cloudwatch_metrics")] diff --git a/website/content/en/docs/reference/configuration/sinks/appsignal.md b/website/content/en/docs/reference/configuration/sinks/appsignal.md new file mode 100644 index 0000000000000..b6167a2de4f0f --- /dev/null +++ b/website/content/en/docs/reference/configuration/sinks/appsignal.md @@ -0,0 +1,15 @@ +--- +title: AppSignal +description: Deliver events to [AppSignal](https://www.appsignal.com/) +kind: sink +layout: component +tags: ["appsignal", "component", "sink", "logs", "metrics"] +aliases: ["/docs/reference/configuration/sinks/appsignal"] +--- + +{{/* +This doc is generated using: + +1. The template in layouts/docs/component.html +2. The relevant CUE data in cue/reference/components/... + */}} diff --git a/website/cue/reference/components/sinks/appsignal.cue b/website/cue/reference/components/sinks/appsignal.cue new file mode 100644 index 0000000000000..4a6c77728a0e2 --- /dev/null +++ b/website/cue/reference/components/sinks/appsignal.cue @@ -0,0 +1,80 @@ +package metadata + +components: sinks: appsignal: { + title: "AppSignal" + + classes: { + commonly_used: false + delivery: "at_least_once" + development: "beta" + egress_method: "batch" + service_providers: ["AppSignal"] + stateful: false + } + + features: { + auto_generated: true + acknowledgements: true + healthcheck: enabled: true + send: { + batch: { + enabled: true + common: false + max_events: 100 + max_bytes: 450_000_000 + timeout_secs: 1.0 + } + compression: { + enabled: true + default: "gzip" + algorithms: ["gzip"] + levels: [6] + } + encoding: { + enabled: true + codec: enabled: false + } + proxy: enabled: true + request: { + enabled: true + concurrency: 100 + headers: false + } + tls: enabled: false + to: { + service: services.appsignal + + interface: { + socket: { + direction: "outgoing" + protocols: ["http"] + ssl: "required" + } + } + } + } + } + + support: { + requirements: [] + warnings: [] + notices: [] + } + + configuration: base.components.sinks.appsignal.configuration + + input: { + logs: true + metrics: { + counter: true + distribution: true + gauge: true + histogram: false + set: false + summary: false + } + traces: false + } + + telemetry: components.sinks.http.telemetry +} diff --git a/website/cue/reference/components/sinks/base/appsignal.cue b/website/cue/reference/components/sinks/base/appsignal.cue new file mode 100644 index 0000000000000..61bf184f1cae8 --- /dev/null +++ b/website/cue/reference/components/sinks/base/appsignal.cue @@ -0,0 +1,269 @@ +package metadata + +base: components: sinks: appsignal: configuration: { + acknowledgements: { + description: """ + Controls how acknowledgements are handled for this sink. + + See [End-to-end Acknowledgements][e2e_acks] for more information on how event acknowledgement is handled. + + [e2e_acks]: https://vector.dev/docs/about/under-the-hood/architecture/end-to-end-acknowledgements/ + """ + required: false + type: object: options: enabled: { + description: """ + Whether or not end-to-end acknowledgements are enabled. + + When enabled for a sink, any source connected to that sink, where the source supports + end-to-end acknowledgements as well, will wait for events to be acknowledged by the sink + before acknowledging them at the source. + + Enabling or disabling acknowledgements at the sink level takes precedence over any global + [`acknowledgements`][global_acks] configuration. + + [global_acks]: https://vector.dev/docs/reference/configuration/global-options/#acknowledgements + """ + required: false + type: bool: {} + } + } + batch: { + description: "Event batching behavior." + required: false + type: object: options: { + max_bytes: { + description: """ + The maximum size of a batch that will be processed by a sink. + + This is based on the uncompressed size of the batched events, before they are + serialized / compressed. + """ + required: false + type: uint: { + default: 450000 + unit: "bytes" + } + } + max_events: { + description: "The maximum size of a batch before it is flushed." + required: false + type: uint: { + default: 100 + unit: "events" + } + } + timeout_secs: { + description: "The maximum age of a batch before it is flushed." + required: false + type: float: { + default: 1.0 + unit: "seconds" + } + } + } + } + compression: { + description: """ + Compression configuration. + + All compression algorithms use the default compression level unless otherwise specified. + """ + required: false + type: string: { + default: "gzip" + enum: { + gzip: """ + [Gzip][gzip] compression. + + [gzip]: https://www.gzip.org/ + """ + none: "No compression." + zlib: """ + [Zlib][zlib] compression. + + [zlib]: https://zlib.net/ + """ + } + } + } + encoding: { + description: "Transformations to prepare an event for serialization." + required: false + type: object: options: { + except_fields: { + description: "List of fields that will be excluded from the encoded event." + required: false + type: array: items: type: string: {} + } + only_fields: { + description: "List of fields that will be included in the encoded event." + required: false + type: array: items: type: string: {} + } + timestamp_format: { + description: "Format used for timestamp fields." + required: false + type: string: enum: { + rfc3339: "Represent the timestamp as a RFC 3339 timestamp." + unix: "Represent the timestamp as a Unix timestamp." + } + } + } + } + endpoint: { + description: "The URI for the AppSignal API to send data to." + required: false + type: string: { + default: "https://appsignal-endpoint.net" + examples: ["https://appsignal-endpoint.net"] + } + } + push_api_key: { + description: "A valid app-level AppSignal Push API key." + required: true + type: string: examples: ["00000000-0000-0000-0000-000000000000", "${APPSIGNAL_PUSH_API_KEY}"] + } + request: { + description: """ + Middleware settings for outbound requests. + + Various settings can be configured, such as concurrency and rate limits, timeouts, etc. + """ + required: false + type: object: options: { + adaptive_concurrency: { + description: """ + Configuration of adaptive concurrency parameters. + + These parameters typically do not require changes from the default, and incorrect values can lead to meta-stable or + unstable performance and sink behavior. Proceed with caution. + """ + required: false + type: object: options: { + decrease_ratio: { + description: """ + The fraction of the current value to set the new concurrency limit when decreasing the limit. + + Valid values are greater than `0` and less than `1`. Smaller values cause the algorithm to scale back rapidly + when latency increases. + + Note that the new limit is rounded down after applying this ratio. + """ + required: false + type: float: default: 0.9 + } + ewma_alpha: { + description: """ + The weighting of new measurements compared to older measurements. + + Valid values are greater than `0` and less than `1`. + + ARC uses an exponentially weighted moving average (EWMA) of past RTT measurements as a reference to compare with + the current RTT. Smaller values cause this reference to adjust more slowly, which may be useful if a service has + unusually high response variability. + """ + required: false + type: float: default: 0.4 + } + rtt_deviation_scale: { + description: """ + Scale of RTT deviations which are not considered anomalous. + + Valid values are greater than or equal to `0`, and we expect reasonable values to range from `1.0` to `3.0`. + + When calculating the past RTT average, we also compute a secondary “deviation” value that indicates how variable + those values are. We use that deviation when comparing the past RTT average to the current measurements, so we + can ignore increases in RTT that are within an expected range. This factor is used to scale up the deviation to + an appropriate range. Larger values cause the algorithm to ignore larger increases in the RTT. + """ + required: false + type: float: default: 2.5 + } + } + } + concurrency: { + description: "Configuration for outbound request concurrency." + required: false + type: { + string: { + default: "none" + enum: { + adaptive: """ + Concurrency will be managed by Vector's [Adaptive Request Concurrency][arc] feature. + + [arc]: https://vector.dev/docs/about/under-the-hood/networking/arc/ + """ + none: """ + A fixed concurrency of 1. + + Only one request can be outstanding at any given time. + """ + } + } + uint: {} + } + } + rate_limit_duration_secs: { + description: "The time window used for the `rate_limit_num` option." + required: false + type: uint: { + default: 1 + unit: "seconds" + } + } + rate_limit_num: { + description: "The maximum number of requests allowed within the `rate_limit_duration_secs` time window." + required: false + type: uint: { + default: 9223372036854775807 + unit: "requests" + } + } + retry_attempts: { + description: """ + The maximum number of retries to make for failed requests. + + The default, for all intents and purposes, represents an infinite number of retries. + """ + required: false + type: uint: { + default: 9223372036854775807 + unit: "retries" + } + } + retry_initial_backoff_secs: { + description: """ + The amount of time to wait before attempting the first retry for a failed request. + + After the first retry has failed, the fibonacci sequence will be used to select future backoffs. + """ + required: false + type: uint: { + default: 1 + unit: "seconds" + } + } + retry_max_duration_secs: { + description: "The maximum amount of time to wait between retries." + required: false + type: uint: { + default: 3600 + unit: "seconds" + } + } + timeout_secs: { + description: """ + The time a request can take before being aborted. + + It is highly recommended that you do not lower this value below the service’s internal timeout, as this could + create orphaned requests, pile on retries, and result in duplicate data downstream. + """ + required: false + type: uint: { + default: 60 + unit: "seconds" + } + } + } + } +} diff --git a/website/cue/reference/services/appsignal.cue b/website/cue/reference/services/appsignal.cue new file mode 100644 index 0000000000000..f9d8543c97a15 --- /dev/null +++ b/website/cue/reference/services/appsignal.cue @@ -0,0 +1,10 @@ +package metadata + +services: appsignal: { + name: "AppSignal" + thing: "an \(name) app" + url: urls.appsignal + versions: null + + description: "[AppSignal](\(urls.appsignal)) is an all-in-one application monitoring tool. We help developers monitor their applications from A-Z with ease." +} diff --git a/website/cue/reference/urls.cue b/website/cue/reference/urls.cue index 849d9abb9b81e..0344812ffa7bf 100644 --- a/website/cue/reference/urls.cue +++ b/website/cue/reference/urls.cue @@ -1,6 +1,7 @@ package metadata urls: { + appsignal: "https://www.appsignal.com/" azure_blob_storage: "https://azure.microsoft.com/en-us/services/storage/blobs/" azure_event_hubs: "https://learn.microsoft.com/en-us/azure/event-hubs/" azure_event_hubs_kafka: "https://learn.microsoft.com/en-us/azure/event-hubs/event-hubs-for-kafka-ecosystem-overview"