Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ You can use the following arguments with `database_observability.mysql`:
| `targets` | `list(map(string))` | List of targets to scrape. | | yes |
| `disable_collectors` | `list(string)` | A list of collectors to disable from the default set. | | no |
| `enable_collectors` | `list(string)` | A list of collectors to enable on top of the default set. | | no |
| `exclude_schemas` | `list(string)` | A list of schemas to exclude from monitoring. | | no |
| `allow_update_performance_schema_settings` | `boolean` | Whether to allow updates to `performance_schema` settings in any collector. Enable this in conjunction with other collector-specific settings where required. | `false` | no |

The following collectors are configurable:
Expand Down Expand Up @@ -133,7 +134,6 @@ The `azure` block supplies the identifying information for the database being mo
| Name | Type | Description | Default | Required |
| ------------------------------ | -------------- | ------------------------------------------------------------------------------- | ------- | -------- |
| `collect_interval` | `duration` | How frequently to collect information from database. | `"1m"` | no |
| `explain_plan_exclude_schemas` | `list(string)` | List of schemas to exclude from explain plan collection. | | no |
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can be deleted as it was not wired

| `initial_lookback` | `duration` | How far back to look for explain plan queries on the first collection interval. | `"24h"` | no |
| `per_collect_ratio` | `float` | Ratio of explain plan queries to collect per collect interval. | `1.0` | no |

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ You can use the following arguments with `database_observability.postgres`:
| `targets` | `list(map(string))` | List of targets to scrape. | | yes |
| `disable_collectors` | `list(string)` | A list of collectors to disable from the default set. | | no |
| `enable_collectors` | `list(string)` | A list of collectors to enable on top of the default set. | | no |
| `exclude_databases` | `list(string)` | A list of databases to exclude from monitoring. | | no |

The following collectors are configurable:

Expand Down Expand Up @@ -122,11 +123,10 @@ The `azure` block supplies the identifying information for the database being mo

### `explain_plans`

| Name | Type | Description | Default | Required |
|--------------------------------|----------------|------------------------------------------------------|---------|----------|
| `collect_interval` | `duration` | How frequently to collect information from database. | `"1m"` | no |
| `per_collect_ratio` | `float64` | The ratio of queries to collect explain plans for. | `1.0` | no |
| `explain_plan_exclude_schemas` | `list(string)` | Schemas to exclude from explain plans. | `[]` | no |
| Name | Type | Description | Default | Required |
|---------------------|----------------|------------------------------------------------------|---------|----------|
| `collect_interval` | `duration` | How frequently to collect information from database. | `"1m"` | no |
| `per_collect_ratio` | `float64` | The ratio of queries to collect explain plans for. | `1.0` | no |

### `health_check`

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -397,9 +397,9 @@ type ExplainPlansArguments struct {
DB *sql.DB
ScrapeInterval time.Duration
PerScrapeRatio float64
InitialLookback time.Time
ExcludeSchemas []string
EntryHandler loki.EntryHandler
InitialLookback time.Time
DBVersion string

Logger log.Logger
Expand Down Expand Up @@ -427,12 +427,12 @@ func NewExplainPlans(args ExplainPlansArguments) (*ExplainPlans, error) {
dbConnection: args.DB,
dbVersion: args.DBVersion,
scrapeInterval: args.ScrapeInterval,
perScrapeRatio: args.PerScrapeRatio,
excludeSchemas: args.ExcludeSchemas,
lastSeen: args.InitialLookback,
queryCache: make(map[string]*queryInfo),
queryDenylist: make(map[string]*queryInfo),
excludeSchemas: args.ExcludeSchemas,
perScrapeRatio: args.PerScrapeRatio,
entryHandler: args.EntryHandler,
lastSeen: args.InitialLookback,
logger: log.With(args.Logger, "collector", ExplainPlansCollector),
running: atomic.NewBool(false),
}, nil
Expand Down
12 changes: 7 additions & 5 deletions internal/component/database_observability/mysql/component.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ type Arguments struct {
Targets []discovery.Target `alloy:"targets,attr"`
EnableCollectors []string `alloy:"enable_collectors,attr,optional"`
DisableCollectors []string `alloy:"disable_collectors,attr,optional"`
ExcludeSchemas []string `alloy:"exclude_schemas,attr,optional"`
AllowUpdatePerfSchemaSettings bool `alloy:"allow_update_performance_schema_settings,attr,optional"`

CloudProvider *CloudProvider `alloy:"cloud_provider,block,optional"`
Expand Down Expand Up @@ -110,10 +111,9 @@ type SetupActorsArguments struct {
}

type ExplainPlansArguments struct {
CollectInterval time.Duration `alloy:"collect_interval,attr,optional"`
PerCollectRatio float64 `alloy:"per_collect_ratio,attr,optional"`
InitialLookback time.Duration `alloy:"initial_lookback,attr,optional"`
ExplainPlanExcludeSchemas []string `alloy:"explain_plan_exclude_schemas,attr,optional"`
CollectInterval time.Duration `alloy:"collect_interval,attr,optional"`
PerCollectRatio float64 `alloy:"per_collect_ratio,attr,optional"`
InitialLookback time.Duration `alloy:"initial_lookback,attr,optional"`
}

type LocksArguments struct {
Expand All @@ -133,6 +133,7 @@ type HealthCheckArguments struct {
}

var DefaultArguments = Arguments{
ExcludeSchemas: []string{},
AllowUpdatePerfSchemaSettings: false,

QueryDetailsArguments: QueryDetailsArguments{
Expand Down Expand Up @@ -568,10 +569,11 @@ func (c *Component) startCollectors(serverID string, engineVersion string, parse
DB: c.dbConnection,
ScrapeInterval: c.args.ExplainPlansArguments.CollectInterval,
PerScrapeRatio: c.args.ExplainPlansArguments.PerCollectRatio,
ExcludeSchemas: c.args.ExcludeSchemas,
InitialLookback: time.Now().Add(-c.args.ExplainPlansArguments.InitialLookback),
Logger: c.opts.Logger,
DBVersion: engineVersion,
EntryHandler: entryHandler,
InitialLookback: time.Now().Add(-c.args.ExplainPlansArguments.InitialLookback),
})
if err != nil {
logStartError(collector.ExplainPlansCollector, "create", err)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -210,14 +210,13 @@ func newQueryInfo(datname, queryId, queryText string, calls int64, callsReset ti
}

type ExplainPlansArguments struct {
DB *sql.DB
DSN string
ScrapeInterval time.Duration
PerScrapeRatio float64
ExcludeSchemas []string
EntryHandler loki.EntryHandler
InitialLookback time.Time
DBVersion string
DB *sql.DB
DSN string
ScrapeInterval time.Duration
PerScrapeRatio float64
ExcludeDatabases []string
EntryHandler loki.EntryHandler
DBVersion string

Logger log.Logger
}
Expand All @@ -231,7 +230,7 @@ type ExplainPlans struct {
queryCache map[string]*queryInfo
queryDenylist map[string]*queryInfo
finishedQueryCache map[string]*queryInfo
excludeSchemas []string
excludeDatabases []string
perScrapeRatio float64
currentBatchSize int
entryHandler loki.EntryHandler
Expand All @@ -247,11 +246,11 @@ func NewExplainPlan(args ExplainPlansArguments) (*ExplainPlans, error) {
dbDSN: args.DSN,
dbConnectionFactory: defaultDbConnectionFactory,
scrapeInterval: args.ScrapeInterval,
perScrapeRatio: args.PerScrapeRatio,
excludeDatabases: args.ExcludeDatabases,
queryCache: make(map[string]*queryInfo),
queryDenylist: make(map[string]*queryInfo),
finishedQueryCache: make(map[string]*queryInfo),
excludeSchemas: args.ExcludeSchemas,
perScrapeRatio: args.PerScrapeRatio,
entryHandler: args.EntryHandler,
logger: log.With(args.Logger, "collector", ExplainPlanCollector),
running: atomic.NewBool(false),
Expand Down Expand Up @@ -380,7 +379,7 @@ func (c *ExplainPlans) populateQueryCache(ctx context.Context) error {
return fmt.Errorf("failed to scan query for explain plan: %w", err)
}

if slices.ContainsFunc(c.excludeSchemas, func(schema string) bool {
if slices.ContainsFunc(c.excludeDatabases, func(schema string) bool {
return strings.EqualFold(schema, datname)
}) {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2261,15 +2261,14 @@ func TestNewExplainPlan(t *testing.T) {
require.NoError(t, err)

args := ExplainPlansArguments{
DB: db,
DSN: "postgres://user:pass@localhost:5432/testdb",
ScrapeInterval: time.Minute,
PerScrapeRatio: 0.1,
ExcludeSchemas: []string{"information_schema", "pg_catalog"},
EntryHandler: entryHandler,
InitialLookback: time.Now().Add(-time.Hour),
DBVersion: pre17ver,
Logger: logger,
DB: db,
DSN: "postgres://user:pass@localhost:5432/testdb",
ScrapeInterval: time.Minute,
PerScrapeRatio: 0.1,
ExcludeDatabases: []string{"information_schema", "pg_catalog"},
EntryHandler: entryHandler,
DBVersion: pre17ver,
Logger: logger,
}

explainPlan, err := NewExplainPlan(args)
Expand All @@ -2281,7 +2280,7 @@ func TestNewExplainPlan(t *testing.T) {
assert.Equal(t, pre17semver, explainPlan.dbVersion)
assert.Equal(t, args.ScrapeInterval, explainPlan.scrapeInterval)
assert.Equal(t, args.PerScrapeRatio, explainPlan.perScrapeRatio)
assert.Equal(t, args.ExcludeSchemas, explainPlan.excludeSchemas)
assert.Equal(t, args.ExcludeDatabases, explainPlan.excludeDatabases)
assert.Equal(t, entryHandler, explainPlan.entryHandler)
assert.NotNil(t, explainPlan.queryCache)
assert.NotNil(t, explainPlan.queryDenylist)
Expand Down Expand Up @@ -2520,7 +2519,7 @@ func TestExplainPlan_PopulateQueryCache(t *testing.T) {
queryCache: make(map[string]*queryInfo),
queryDenylist: make(map[string]*queryInfo),
finishedQueryCache: make(map[string]*queryInfo),
excludeSchemas: []string{},
excludeDatabases: []string{},
perScrapeRatio: 1.0,
logger: logger,
entryHandler: lokiClient,
Expand Down Expand Up @@ -2560,7 +2559,7 @@ func TestExplainPlan_PopulateQueryCache(t *testing.T) {
queryCache: make(map[string]*queryInfo),
queryDenylist: make(map[string]*queryInfo),
finishedQueryCache: make(map[string]*queryInfo),
excludeSchemas: []string{"information_schema"},
excludeDatabases: []string{"information_schema"},
perScrapeRatio: 0.5,
logger: logger,
entryHandler: lokiClient,
Expand Down Expand Up @@ -2748,7 +2747,7 @@ func TestExplainPlanFetchExplainPlans(t *testing.T) {
queryCache: make(map[string]*queryInfo),
queryDenylist: make(map[string]*queryInfo),
finishedQueryCache: make(map[string]*queryInfo),
excludeSchemas: []string{},
excludeDatabases: []string{},
perScrapeRatio: 1.0,
logger: logger,
}
Expand Down Expand Up @@ -2794,7 +2793,7 @@ func TestExplainPlanFetchExplainPlans(t *testing.T) {
},
queryDenylist: map[string]*queryInfo{},
finishedQueryCache: map[string]*queryInfo{},
excludeSchemas: []string{},
excludeDatabases: []string{},
perScrapeRatio: 1.0,
logger: log.NewLogfmtLogger(log.NewSyncWriter(&logBuffer)),
entryHandler: lokiClient,
Expand Down Expand Up @@ -2849,7 +2848,7 @@ func TestExplainPlanFetchExplainPlans(t *testing.T) {
},
queryDenylist: map[string]*queryInfo{},
finishedQueryCache: map[string]*queryInfo{},
excludeSchemas: []string{},
excludeDatabases: []string{},
perScrapeRatio: 1.0,
logger: log.NewLogfmtLogger(log.NewSyncWriter(&logBuffer)),
entryHandler: lokiClient,
Expand Down Expand Up @@ -2902,7 +2901,7 @@ func TestExplainPlanFetchExplainPlans(t *testing.T) {
},
queryDenylist: map[string]*queryInfo{},
finishedQueryCache: map[string]*queryInfo{},
excludeSchemas: []string{},
excludeDatabases: []string{},
perScrapeRatio: 1.0,
logger: log.NewLogfmtLogger(log.NewSyncWriter(&logBuffer)),
currentBatchSize: 1,
Expand Down Expand Up @@ -2969,7 +2968,7 @@ func TestExplainPlanFetchExplainPlans(t *testing.T) {
},
queryDenylist: map[string]*queryInfo{},
finishedQueryCache: map[string]*queryInfo{},
excludeSchemas: []string{},
excludeDatabases: []string{},
perScrapeRatio: 1.0,
logger: log.NewLogfmtLogger(log.NewSyncWriter(&logBuffer)),
currentBatchSize: 1,
Expand Down Expand Up @@ -3036,7 +3035,7 @@ func TestExplainPlanFetchExplainPlans(t *testing.T) {
},
queryDenylist: map[string]*queryInfo{},
finishedQueryCache: map[string]*queryInfo{},
excludeSchemas: []string{},
excludeDatabases: []string{},
perScrapeRatio: 1.0,
logger: log.NewLogfmtLogger(log.NewSyncWriter(&logBuffer)),
currentBatchSize: 1,
Expand Down
28 changes: 15 additions & 13 deletions internal/component/database_observability/postgres/component.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,12 +65,13 @@ type Arguments struct {
Targets []discovery.Target `alloy:"targets,attr"`
EnableCollectors []string `alloy:"enable_collectors,attr,optional"`
DisableCollectors []string `alloy:"disable_collectors,attr,optional"`
ExcludeDatabases []string `alloy:"exclude_databases,attr,optional"`

CloudProvider *CloudProvider `alloy:"cloud_provider,block,optional"`
QuerySampleArguments QuerySampleArguments `alloy:"query_samples,block,optional"`
QueryTablesArguments QueryTablesArguments `alloy:"query_details,block,optional"`
SchemaDetailsArguments SchemaDetailsArguments `alloy:"schema_details,block,optional"`
ExplainPlanArguments ExplainPlanArguments `alloy:"explain_plans,block,optional"`
ExplainPlansArguments ExplainPlansArguments `alloy:"explain_plans,block,optional"`
HealthCheckArguments HealthCheckArguments `alloy:"health_check,block,optional"`
}

Expand Down Expand Up @@ -107,6 +108,7 @@ type SchemaDetailsArguments struct {
}

var DefaultArguments = Arguments{
ExcludeDatabases: []string{},
QuerySampleArguments: QuerySampleArguments{
CollectInterval: 15 * time.Second,
DisableQueryRedaction: false,
Expand All @@ -121,7 +123,7 @@ var DefaultArguments = Arguments{
CacheSize: 256,
CacheTTL: 10 * time.Minute,
},
ExplainPlanArguments: ExplainPlanArguments{
ExplainPlansArguments: ExplainPlansArguments{
CollectInterval: 1 * time.Minute,
PerCollectRatio: 1.0,
},
Expand All @@ -130,10 +132,9 @@ var DefaultArguments = Arguments{
},
}

type ExplainPlanArguments struct {
CollectInterval time.Duration `alloy:"collect_interval,attr,optional"`
PerCollectRatio float64 `alloy:"per_collect_ratio,attr,optional"`
ExplainPlanExcludeSchemas []string `alloy:"explain_plan_exclude_schemas,attr,optional"`
type ExplainPlansArguments struct {
CollectInterval time.Duration `alloy:"collect_interval,attr,optional"`
PerCollectRatio float64 `alloy:"per_collect_ratio,attr,optional"`
}

type HealthCheckArguments struct {
Expand Down Expand Up @@ -462,13 +463,14 @@ func (c *Component) startCollectors(systemID string, engineVersion string, cloud

if collectors[collector.ExplainPlanCollector] {
epCollector, err := collector.NewExplainPlan(collector.ExplainPlansArguments{
DB: c.dbConnection,
DSN: string(c.args.DataSourceName),
ScrapeInterval: c.args.ExplainPlanArguments.CollectInterval,
PerScrapeRatio: c.args.ExplainPlanArguments.PerCollectRatio,
Logger: c.opts.Logger,
DBVersion: engineVersion,
EntryHandler: entryHandler,
DB: c.dbConnection,
DSN: string(c.args.DataSourceName),
ScrapeInterval: c.args.ExplainPlansArguments.CollectInterval,
PerScrapeRatio: c.args.ExplainPlansArguments.PerCollectRatio,
ExcludeDatabases: c.args.ExcludeDatabases,
Logger: c.opts.Logger,
DBVersion: engineVersion,
EntryHandler: entryHandler,
})
if err != nil {
logStartError(collector.ExplainPlanCollector, "create", err)
Expand Down
Loading