Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -1280,7 +1280,9 @@
"@opentelemetry/api": "1.9.0",
"@opentelemetry/api-logs": "0.213.0",
"@opentelemetry/context-async-hooks": "2.6.0",
"@opentelemetry/exporter-logs-otlp-grpc": "0.213.0",
"@opentelemetry/exporter-logs-otlp-http": "0.213.0",
"@opentelemetry/exporter-logs-otlp-proto": "0.213.0",
"@opentelemetry/exporter-metrics-otlp-grpc": "0.213.0",
"@opentelemetry/exporter-metrics-otlp-http": "0.213.0",
"@opentelemetry/exporter-metrics-otlp-proto": "0.213.0",
Expand Down
2 changes: 2 additions & 0 deletions renovate.json
Original file line number Diff line number Diff line change
Expand Up @@ -2348,7 +2348,9 @@
"@opentelemetry/api",
"@opentelemetry/api-logs",
"@opentelemetry/context-async-hooks",
"@opentelemetry/exporter-logs-otlp-grpc",
"@opentelemetry/exporter-logs-otlp-http",
"@opentelemetry/exporter-logs-otlp-proto",
"@opentelemetry/exporter-metrics-otlp-http",
"@opentelemetry/exporter-metrics-otlp-grpc",
"@opentelemetry/exporter-metrics-otlp-proto",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,18 +26,20 @@ jest.mock('@opentelemetry/sdk-logs', () => ({
jest.mock('@opentelemetry/exporter-logs-otlp-http', () => ({
OTLPLogExporter: jest.fn(),
}));
jest.mock('@opentelemetry/resources', () => {
jest.mock('@elastic/opentelemetry-node/sdk', () => {
interface MockResource {
merge: jest.Mock<MockResource>;
}
const makeMergeableResource = (): MockResource => ({ merge: jest.fn(makeMergeableResource) });
return {
detectResources: jest.fn(makeMergeableResource),
resourceFromAttributes: jest.fn(makeMergeableResource),
envDetector: 'envDetector',
hostDetector: 'hostDetector',
osDetector: 'osDetector',
processDetector: 'processDetector',
resources: {
detectResources: jest.fn(makeMergeableResource),
resourceFromAttributes: jest.fn(makeMergeableResource),
envDetector: 'envDetector',
hostDetector: 'hostDetector',
osDetector: 'osDetector',
processDetector: 'processDetector',
},
};
});
jest.mock('@opentelemetry/api', () => ({
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ test('`configSchema` creates correct schema.', () => {

const validConfig3 = {
type: 'otel',
protocol: 'http',
url: 'http://collector:4318/v1/logs',
headers: { Authorization: 'Bearer token' },
attributes: { 'service.name': 'kibana' },
Expand Down Expand Up @@ -91,6 +92,7 @@ test('`create()` creates correct appender.', () => {

const otelAppender = Appenders.create({
type: 'otel',
protocol: 'http',
url: 'http://collector:4318/v1/logs',
headers: {},
attributes: {},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,23 @@ jest.mock('@opentelemetry/exporter-logs-otlp-http', () => ({
OTLPLogExporter: mockOTLPLogExporter,
}));

jest.mock('@opentelemetry/resources', () => ({
detectResources: mockDetectResources,
resourceFromAttributes: mockResourceFromAttributes,
envDetector: 'envDetector',
hostDetector: 'hostDetector',
osDetector: 'osDetector',
processDetector: 'processDetector',
jest.mock('@opentelemetry/exporter-logs-otlp-grpc', () => ({
OTLPLogExporter: mockOTLPLogExporter,
}));

jest.mock('@opentelemetry/exporter-logs-otlp-proto', () => ({
OTLPLogExporter: mockOTLPLogExporter,
}));

jest.mock('@elastic/opentelemetry-node/sdk', () => ({
resources: {
detectResources: mockDetectResources,
resourceFromAttributes: mockResourceFromAttributes,
envDetector: 'envDetector',
hostDetector: 'hostDetector',
osDetector: 'osDetector',
processDetector: 'processDetector',
},
}));

jest.mock('@opentelemetry/api', () => ({
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import { OtelAppender } from './otel_appender';

const validConfig = {
type: 'otel' as const,
protocol: 'http' as const,
url: 'http://collector:4318/v1/logs',
headers: { Authorization: 'Bearer token' },
};
Expand Down Expand Up @@ -112,7 +113,7 @@ describe('OtelAppender constructor', () => {
});

it('defaults to empty headers when none are provided', () => {
new OtelAppender({ type: 'otel', url: validConfig.url });
new OtelAppender({ type: 'otel', url: validConfig.url, protocol: 'http' });

expect(mockOTLPLogExporter).toHaveBeenCalledWith({
url: validConfig.url,
Expand Down Expand Up @@ -168,7 +169,7 @@ describe('OtelAppender constructor', () => {
'telemetry.sdk.language': 'nodejs',
'service.name': 'kibana',
'service.version': '9.4.0',
'deployment.environment': 'production',
'deployment.environment.name': 'production',
});
});

Expand Down Expand Up @@ -298,11 +299,13 @@ describe('OtelAppender.append() — severity mapping', () => {
it.each([
['off', LogLevel.Off],
['all', LogLevel.All],
])('silently drops records with filter-only level %s', (_name, level) => {
])('emits records with filter-only level %s as severityNumber UNSPECIFIED', (_name, level) => {
const appender = new OtelAppender(validConfig);
appender.append(makeRecord({ level }));

expect(mockEmit).not.toHaveBeenCalled();
expect(mockEmit).toHaveBeenCalledWith(
expect.objectContaining({ severityNumber: SeverityNumber.UNSPECIFIED })
);
});
});

Expand Down Expand Up @@ -361,8 +364,7 @@ describe('OtelAppender.append() — trace context', () => {
expect(trace.setSpanContext).toHaveBeenCalledWith('root-context', {
traceId: 'abc123',
spanId: 'def456',
traceFlags: 1, // TraceFlags.SAMPLED
isRemote: false,
traceFlags: undefined, // TraceFlags.NONE
});
const emittedContext = mockEmit.mock.calls[0][0].context;
expect(emittedContext).toBeDefined();
Expand Down Expand Up @@ -453,7 +455,10 @@ describe('OtelAppender.append() — attributes', () => {

expect(mockEmit).toHaveBeenCalledWith(
expect.objectContaining({
attributes: expect.objectContaining({ 'log.meta': JSON.stringify(meta) }),
attributes: expect.objectContaining({
'kibana.log.meta.http.method': 'GET',
'kibana.log.meta.tags': ['api'],
}),
})
);
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,70 +7,36 @@
* License v3.0 only", or the "Server Side Public License, v 1".
*/

import type { OTLPLogExporter as OTLPLogExporterHTTP } from '@opentelemetry/exporter-logs-otlp-http';
import type { OTLPLogExporter as OTLPLogExporterGRPC } from '@opentelemetry/exporter-logs-otlp-grpc';
import type { OTLPLogExporter as OTLPLogExporterPROTO } from '@opentelemetry/exporter-logs-otlp-proto';
import { schema } from '@kbn/config-schema';
import type { DisposableAppender, Layout, LogLevel, LogRecord } from '@kbn/logging';
import { ROOT_CONTEXT, TraceFlags, trace, type Context } from '@opentelemetry/api';
import {
ROOT_CONTEXT,
TraceFlags,
trace,
type Context,
type Attributes,
type AttributeValue,
} from '@opentelemetry/api';
import type { AnyValueMap } from '@opentelemetry/api-logs';
import { SeverityNumber, type Logger } from '@opentelemetry/api-logs';
import { OTLPLogExporter } from '@opentelemetry/exporter-logs-otlp-http';
import {
detectResources,
envDetector,
hostDetector,
osDetector,
processDetector,
resourceFromAttributes,
} from '@opentelemetry/resources';
import { resources } from '@elastic/opentelemetry-node/sdk';
import { BatchLogRecordProcessor, LoggerProvider } from '@opentelemetry/sdk-logs';
import type { OtelAppenderConfig, LayoutConfigType } from '@kbn/core-logging-server';
// getConfiguration returns the already-initialised APM config singleton (loaded at bootstrap).
// Both packages are platform/private so this import is within the allowed visibility boundary.
import { getConfiguration } from '@kbn/apm-config-loader';
import { buildOtelResources } from '@kbn/telemetry/src/build_otel_resources';
import { getFlattenedObject } from '@kbn/std';
import { Layouts } from '../../layouts/layouts';

const DISPOSE_TIMEOUT_MS = 5_000;

/**
* Derives OTel service resource attributes from the APM configuration that was
* already loaded at bootstrap time. This mirrors the approach used by
* `initTelemetry` for traces so that all signals share a consistent service
* identity without requiring the user to re-declare it in `kibana.yml`.
*/
const deriveServiceAttributes = (): Record<string, string> => {
// telemetry.sdk.language is always known for this Node.js process.
const attrs: Record<string, string> = { 'telemetry.sdk.language': 'nodejs' };

const apmConfig = getConfiguration('kibana');
if (!apmConfig) {
return attrs;
}
if (apmConfig.serviceName) {
attrs['service.name'] = String(apmConfig.serviceName);
}
if (apmConfig.serviceVersion) {
attrs['service.version'] = String(apmConfig.serviceVersion);
}
if (apmConfig.environment) {
attrs['deployment.environment'] = String(apmConfig.environment);
}
// The Kibana node UUID is stored in globalLabels.kibana_uuid by ApmConfiguration.
// We surface it as service.instance.id per the OTel Resource semantic conventions.
const { globalLabels } = apmConfig;
if (globalLabels && typeof globalLabels === 'object' && !Array.isArray(globalLabels)) {
const kibanaUuid = globalLabels.kibana_uuid;
if (kibanaUuid) {
attrs['service.instance.id'] = String(kibanaUuid);
}
}
return attrs;
};

/**
* Maps a Kibana log level to the corresponding OTel SeverityNumber.
* Returns `undefined` for filter-only levels ('all', 'off') that should never
* appear on an actual log record, causing the record to be silently dropped.
*/
const toSeverityNumber = (level: LogLevel): SeverityNumber | undefined => {
const toSeverityNumber = (level: LogLevel): SeverityNumber => {
switch (level.id) {
case 'trace':
return SeverityNumber.TRACE;
Expand All @@ -86,7 +52,7 @@ const toSeverityNumber = (level: LogLevel): SeverityNumber | undefined => {
return SeverityNumber.FATAL;
default:
// 'all' and 'off' are filter thresholds, not record severities.
return undefined;
return SeverityNumber.UNSPECIFIED;
}
};

Expand All @@ -100,12 +66,11 @@ const toTraceContext = (record: LogRecord): Context | undefined => {
if (!record.traceId || !record.spanId) {
return undefined;
}

return trace.setSpanContext(ROOT_CONTEXT, {
traceId: record.traceId,
spanId: record.spanId,
// Kibana only propagates IDs for sampled traces.
traceFlags: TraceFlags.SAMPLED,
isRemote: false,
traceFlags: TraceFlags.NONE,
});
};

Expand Down Expand Up @@ -160,11 +125,8 @@ const resolveLayoutConfig = (config?: LayoutConfigType): LayoutConfigType => {
* - When using the JSON layout, `meta` is already part of the structured body,
* so `log.meta` is omitted from attributes to avoid duplication.
*/
const toAttributes = (
record: LogRecord,
includeLogMeta: boolean
): Record<string, string | number | boolean> => {
const attrs: Record<string, string | number | boolean> = {
const toAttributes = (record: LogRecord, includeLogMeta: boolean): Attributes => {
const attrs: Attributes = {
'log.logger': record.context,
};

Expand All @@ -182,10 +144,30 @@ const toAttributes = (
}
}

if (includeLogMeta && record.meta) {
if (
includeLogMeta &&
record.meta &&
typeof record.meta === 'object' &&
!Array.isArray(record.meta)
) {
// Extract the service object because we know that it always exists. Mapping it directly avoids calling the more expensive getFlattenedObject function for every log entry.
const {
service: { version, type, state, node: { roles } = {}, id, ...serviceRest } = {},
...rest
} = record.meta;
attrs['service.version'] = version;
attrs['service.type'] = type;
attrs['service.state'] = state;
attrs['service.node.roles'] = roles;
attrs['service.id'] = id;
// Flatten anything that we don't know about into the service object (ideally, nothing).
Object.entries(getFlattenedObject(serviceRest)).forEach(([key, value]) => {
attrs[key] = value;
});

// Only included for pattern layout: with JSON layout the meta is part of
// the structured body and repeating it here would be redundant.
attrs['log.meta'] = JSON.stringify(record.meta);
attrs['kibana.log.meta'] = rest as unknown as AttributeValue; // Force-casting because "objects" are actually allowed (and indexed as "flattened" types) in ES.
}

return attrs;
Expand All @@ -200,6 +182,10 @@ const toAttributes = (
export class OtelAppender implements DisposableAppender {
public static configSchema = schema.object({
type: schema.literal('otel'),
protocol: schema.oneOf(
[schema.literal('http'), schema.literal('proto'), schema.literal('grpc')],
{ defaultValue: 'proto' }
),
url: schema.string(),
headers: schema.recordOf(schema.string(), schema.string(), { defaultValue: {} }),
/**
Expand All @@ -219,17 +205,15 @@ export class OtelAppender implements DisposableAppender {
private readonly useStructuredBody: boolean;

constructor(config: OtelAppenderConfig) {
const exporter = new OTLPLogExporter({ url: config.url, headers: config.headers ?? {} });
const exporter = createExporter(config);
// Layer the resource from three sources (each overriding the previous):
// 1. Auto-detected: host, OS, process, env-var OTel attributes
// 2. Derived: service.name / service.version / deployment.environment from the
// APM config singleton (mirrors how initTelemetry builds trace resources)
// 3. User overrides: explicit attributes from kibana.yml (optional)
const resource = detectResources({
detectors: [envDetector, hostDetector, osDetector, processDetector],
})
.merge(resourceFromAttributes(deriveServiceAttributes()))
.merge(resourceFromAttributes(config.attributes ?? {}));
const resource = buildOtelResources().merge(
resources.resourceFromAttributes(config.attributes ?? {})
);
this.loggerProvider = new LoggerProvider({
processors: [new BatchLogRecordProcessor(exporter)],
resource,
Expand Down Expand Up @@ -281,3 +265,35 @@ export class OtelAppender implements DisposableAppender {
]);
}
}

const createExporter = (
config: OtelAppenderConfig
): OTLPLogExporterHTTP | OTLPLogExporterGRPC | OTLPLogExporterPROTO => {
switch (config.protocol) {
case 'http': {
// No need to import the module at the top if not used in the switch statement.
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { OTLPLogExporter } = require('@opentelemetry/exporter-logs-otlp-http');
Copy link
Copy Markdown
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Love it! 👏🏼

return new OTLPLogExporter({ url: config.url, headers: config.headers ?? {} });
}
case 'proto': {
// No need to import the module at the top if not used in the switch statement.
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { OTLPLogExporter } = require('@opentelemetry/exporter-logs-otlp-proto');
return new OTLPLogExporter({ url: config.url, headers: config.headers ?? {} });
}
case 'grpc': {
// No need to import the module at the top if not used in the switch statement.
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { Metadata } = require('@grpc/grpc-js');
const metadata = new Metadata();
Object.entries(config.headers ?? {}).forEach(([key, value]) => {
metadata.add(key, value);
});
// No need to import the module at the top if not used in the switch statement.
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { OTLPLogExporter } = require('@opentelemetry/exporter-logs-otlp-grpc');
return new OTLPLogExporter({ url: config.url, metadata });
}
}
};
5 changes: 5 additions & 0 deletions src/core/packages/logging/server/src/appenders/otel.ts
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,11 @@ import type { LayoutConfigType } from '../layout';
export interface OtelAppenderConfig {
/** Discriminator for this appender type. */
type: 'otel';
/**
* The protocol to use for the OTLP exporter.
* Defaults to 'grpc'.
*/
protocol: 'http' | 'proto' | 'grpc';
/** OTLP HTTP endpoint URL, e.g. https://collector:4318/v1/logs */
url: string;
/**
Expand Down
Loading
Loading