diff --git a/mysql.go b/mysql.go index d18d9cdef..ebc7b15fa 100644 --- a/mysql.go +++ b/mysql.go @@ -50,6 +50,374 @@ type MySQLDatabase struct { UsedDiskSizeGB int `json:"used_disk_size_gb"` TotalDiskSizeGB int `json:"total_disk_size_gb"` Port int `json:"port"` + + EngineConfig MySQLDatabaseEngineConfig `json:"engine_config"` +} + +type MySQLDatabaseEngineConfig struct { + MySQL *MySQLDatabaseEngineConfigMySQL `json:"mysql,omitempty"` + BinlogRetentionPeriod *int `json:"binlog_retention_period,omitempty"` + ServiceLog *bool `json:"service_log,omitempty"` +} + +type MySQLDatabaseEngineConfigMySQL struct { + ConnectTimeout *int `json:"connect_timeout,omitempty"` + DefaultTimeZone *string `json:"default_time_zone,omitempty"` + GroupConcatMaxLen *float64 `json:"group_concat_max_len,omitempty"` + InformationSchemaStatsExpiry *int `json:"information_schema_stats_expiry,omitempty"` + InnoDBChangeBufferMaxSize *int `json:"innodb_change_buffer_max_size,omitempty"` + InnoDBFlushNeighbors *int `json:"innodb_flush_neighbors,omitempty"` + InnoDBFTMinTokenSize *int `json:"innodb_ft_min_token_size,omitempty"` + InnoDBFTServerStopwordTable *string `json:"innodb_ft_server_stopword_table,omitempty"` + InnoDBLockWaitTimeout *int `json:"innodb_lock_wait_timeout,omitempty"` + InnoDBLogBufferSize *int `json:"innodb_log_buffer_size,omitempty"` + InnoDBOnlineAlterLogMaxSize *int `json:"innodb_online_alter_log_max_size,omitempty"` + InnoDBPrintAllDeadlocks *bool `json:"innodb_print_all_deadlocks,omitempty"` + InnoDBReadIOThreads *int `json:"innodb_read_io_threads,omitempty"` + InnoDBRollbackOnTimeout *bool `json:"innodb_rollback_on_timeout,omitempty"` + InnoDBThreadConcurrency *int `json:"innodb_thread_concurrency,omitempty"` + InnoDBWriteIOThreads *int `json:"innodb_write_io_threads,omitempty"` + InteractiveTimeout *int `json:"interactive_timeout,omitempty"` + InternalTmpMemStorageEngine *string `json:"internal_tmp_mem_storage_engine,omitempty"` + LogOutput *string `json:"log_output,omitempty"` + LongQueryTime *float64 `json:"long_query_time,omitempty"` + MaxAllowedPacket *int `json:"max_allowed_packet,omitempty"` + MaxHeapTableSize *int `json:"max_heap_table_size,omitempty"` + NetBufferLength *int `json:"net_buffer_length,omitempty"` + NetReadTimeout *int `json:"net_read_timeout,omitempty"` + NetWriteTimeout *int `json:"net_write_timeout,omitempty"` + SlowQueryLog *bool `json:"slow_query_log,omitempty"` + SortBufferSize *int `json:"sort_buffer_size,omitempty"` + SQLMode *string `json:"sql_mode,omitempty"` + SQLRequirePrimaryKey *bool `json:"sql_require_primary_key,omitempty"` + TmpTableSize *int `json:"tmp_table_size,omitempty"` + WaitTimeout *int `json:"wait_timeout,omitempty"` +} + +type MySQLDatabaseConfigInfo struct { + MySQL MySQLDatabaseConfigInfoMySQL `json:"mysql"` + BinlogRetentionPeriod MySQLDatabaseConfigInfoBinlogRetentionPeriod `json:"binlog_retention_period"` + ServiceLog MySQLDatabaseConfigInfoServiceLog `json:"service_log"` +} + +type MySQLDatabaseConfigInfoMySQL struct { + ConnectTimeout ConnectTimeout `json:"connect_timeout"` + DefaultTimeZone DefaultTimeZone `json:"default_time_zone"` + GroupConcatMaxLen GroupConcatMaxLen `json:"group_concat_max_len"` + InformationSchemaStatsExpiry InformationSchemaStatsExpiry `json:"information_schema_stats_expiry"` + InnoDBChangeBufferMaxSize InnoDBChangeBufferMaxSize `json:"innodb_change_buffer_max_size"` + InnoDBFlushNeighbors InnoDBFlushNeighbors `json:"innodb_flush_neighbors"` + InnoDBFTMinTokenSize InnoDBFTMinTokenSize `json:"innodb_ft_min_token_size"` + InnoDBFTServerStopwordTable InnoDBFTServerStopwordTable `json:"innodb_ft_server_stopword_table"` + InnoDBLockWaitTimeout InnoDBLockWaitTimeout `json:"innodb_lock_wait_timeout"` + InnoDBLogBufferSize InnoDBLogBufferSize `json:"innodb_log_buffer_size"` + InnoDBOnlineAlterLogMaxSize InnoDBOnlineAlterLogMaxSize `json:"innodb_online_alter_log_max_size"` + InnoDBPrintAllDeadlocks InnoDBPrintAllDeadlocks `json:"innodb_print_all_deadlocks"` + InnoDBReadIOThreads InnoDBReadIOThreads `json:"innodb_read_io_threads"` + InnoDBRollbackOnTimeout InnoDBRollbackOnTimeout `json:"innodb_rollback_on_timeout"` + InnoDBThreadConcurrency InnoDBThreadConcurrency `json:"innodb_thread_concurrency"` + InnoDBWriteIOThreads InnoDBWriteIOThreads `json:"innodb_write_io_threads"` + InteractiveTimeout InteractiveTimeout `json:"interactive_timeout"` + InternalTmpMemStorageEngine InternalTmpMemStorageEngine `json:"internal_tmp_mem_storage_engine"` + LogOutput LogOutput `json:"log_output"` + LongQueryTime LongQueryTime `json:"long_query_time"` + MaxAllowedPacket MaxAllowedPacket `json:"max_allowed_packet"` + MaxHeapTableSize MaxHeapTableSize `json:"max_heap_table_size"` + NetBufferLength NetBufferLength `json:"net_buffer_length"` + NetReadTimeout NetReadTimeout `json:"net_read_timeout"` + NetWriteTimeout NetWriteTimeout `json:"net_write_timeout"` + SlowQueryLog SlowQueryLog `json:"slow_query_log"` + SortBufferSize SortBufferSize `json:"sort_buffer_size"` + SQLMode SQLMode `json:"sql_mode"` + SQLRequirePrimaryKey SQLRequirePrimaryKey `json:"sql_require_primary_key"` + TmpTableSize TmpTableSize `json:"tmp_table_size"` + WaitTimeout WaitTimeout `json:"wait_timeout"` +} + +type ConnectTimeout struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type DefaultTimeZone struct { + Description string `json:"description"` + Example string `json:"example"` + MaxLength int `json:"maxLength"` + MinLength int `json:"minLength"` + Pattern string `json:"pattern"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type GroupConcatMaxLen struct { + Description string `json:"description"` + Example float64 `json:"example"` + Maximum float64 `json:"maximum"` + Minimum float64 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InformationSchemaStatsExpiry struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBChangeBufferMaxSize struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBFlushNeighbors struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBFTMinTokenSize struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBFTServerStopwordTable struct { + Description string `json:"description"` + Example string `json:"example"` + MaxLength int `json:"maxLength"` + Pattern string `json:"pattern"` + RequiresRestart bool `json:"requires_restart"` + Type []string `json:"type"` +} + +type InnoDBLockWaitTimeout struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBLogBufferSize struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBOnlineAlterLogMaxSize struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBPrintAllDeadlocks struct { + Description string `json:"description"` + Example bool `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBReadIOThreads struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBRollbackOnTimeout struct { + Description string `json:"description"` + Example bool `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBThreadConcurrency struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InnoDBWriteIOThreads struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InteractiveTimeout struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type InternalTmpMemStorageEngine struct { + Description string `json:"description"` + Enum []string `json:"enum"` + Example string `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type LogOutput struct { + Description string `json:"description"` + Enum []string `json:"enum"` + Example string `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type LongQueryTime struct { + Description string `json:"description"` + Example float64 `json:"example"` + Maximum float64 `json:"maximum"` + Minimum float64 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxAllowedPacket struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxHeapTableSize struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type NetBufferLength struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type NetReadTimeout struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type NetWriteTimeout struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type SlowQueryLog struct { + Description string `json:"description"` + Example bool `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type SortBufferSize struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type SQLMode struct { + Description string `json:"description"` + Example string `json:"example"` + MaxLength int `json:"maxLength"` + Pattern string `json:"pattern"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type SQLRequirePrimaryKey struct { + Description string `json:"description"` + Example bool `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type TmpTableSize struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type WaitTimeout struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MySQLDatabaseConfigInfoBinlogRetentionPeriod struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MySQLDatabaseConfigInfoServiceLog struct { + Description string `json:"description"` + Example bool `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type []string `json:"type"` } func (d *MySQLDatabase) UnmarshalJSON(b []byte) error { @@ -90,17 +458,19 @@ type MySQLCreateOptions struct { // Deprecated: SSLConnection is a deprecated property, as it is no longer supported in DBaaS V2. SSLConnection bool `json:"ssl_connection,omitempty"` - Fork *DatabaseFork `json:"fork,omitempty"` + Fork *DatabaseFork `json:"fork,omitempty"` + EngineConfig *MySQLDatabaseEngineConfig `json:"engine_config,omitempty"` } // MySQLUpdateOptions fields are used when altering the existing MySQL Database type MySQLUpdateOptions struct { - Label string `json:"label,omitempty"` - AllowList *[]string `json:"allow_list,omitempty"` - Updates *DatabaseMaintenanceWindow `json:"updates,omitempty"` - Type string `json:"type,omitempty"` - ClusterSize int `json:"cluster_size,omitempty"` - Version string `json:"version,omitempty"` + Label string `json:"label,omitempty"` + AllowList *[]string `json:"allow_list,omitempty"` + Updates *DatabaseMaintenanceWindow `json:"updates,omitempty"` + Type string `json:"type,omitempty"` + ClusterSize int `json:"cluster_size,omitempty"` + Version string `json:"version,omitempty"` + EngineConfig *MySQLDatabaseEngineConfig `json:"engine_config,omitempty"` } // MySQLDatabaseBackup is information for interacting with a backup for the existing MySQL Database @@ -245,3 +615,8 @@ func (c *Client) ResumeMySQLDatabase(ctx context.Context, databaseID int) error e := formatAPIPath("databases/mysql/instances/%d/resume", databaseID) return doPOSTRequestNoRequestResponseBody(ctx, c, e) } + +// GetMySQLDatabaseConfig returns a detailed list of all the configuration options for MySQL Databases +func (c *Client) GetMySQLDatabaseConfig(ctx context.Context) (*MySQLDatabaseConfigInfo, error) { + return doGETRequest[MySQLDatabaseConfigInfo](ctx, c, "databases/mysql/config") +} diff --git a/postgres.go b/postgres.go index ef89d4cf6..e9e03c8f0 100644 --- a/postgres.go +++ b/postgres.go @@ -60,14 +60,602 @@ type PostgresDatabase struct { // Deprecated: Encrypted is a deprecated property, as it is no longer supported in DBaaS V2. Encrypted bool `json:"encrypted"` - Hosts DatabaseHost `json:"hosts"` - Updates DatabaseMaintenanceWindow `json:"updates"` - Created *time.Time `json:"-"` - Updated *time.Time `json:"-"` - Fork *DatabaseFork `json:"fork"` - OldestRestoreTime *time.Time `json:"-"` - UsedDiskSizeGB int `json:"used_disk_size_gb"` - TotalDiskSizeGB int `json:"total_disk_size_gb"` + Hosts DatabaseHost `json:"hosts"` + Updates DatabaseMaintenanceWindow `json:"updates"` + Created *time.Time `json:"-"` + Updated *time.Time `json:"-"` + Fork *DatabaseFork `json:"fork"` + OldestRestoreTime *time.Time `json:"-"` + UsedDiskSizeGB int `json:"used_disk_size_gb"` + TotalDiskSizeGB int `json:"total_disk_size_gb"` + EngineConfig PostgresDatabaseEngineConfig `json:"engine_config"` +} + +type PostgresDatabaseEngineConfig struct { + PG *PostgresDatabaseEngineConfigPG `json:"pg,omitempty"` + PGStatMonitorEnable *bool `json:"pg_stat_monitor_enable,omitempty"` + PGLookout *PostgresDatabaseEngineConfigPGLookout `json:"pglookout,omitempty"` + ServiceLog *bool `json:"service_log,omitempty"` + SharedBuffersPercentage *float64 `json:"shared_buffers_percentage,omitempty"` + SynchronousReplication *string `json:"synchronous_replication,omitempty"` + WorkMem *int `json:"work_mem,omitempty"` +} + +type PostgresDatabaseEngineConfigPG struct { + AutovacuumAnalyzeScaleFactor *float64 `json:"autovacuum_analyze_scale_factor,omitempty"` + AutovacuumAnalyzeThreshold *int32 `json:"autovacuum_analyze_threshold,omitempty"` + AutovacuumFreezeMaxAge *int `json:"autovacuum_freeze_max_age,omitempty"` + AutovacuumMaxWorkers *int `json:"autovacuum_max_workers,omitempty"` + AutovacuumNaptime *int `json:"autovacuum_naptime,omitempty"` + AutovacuumVacuumCostDelay *int `json:"autovacuum_vacuum_cost_delay,omitempty"` + AutovacuumVacuumCostLimit *int `json:"autovacuum_vacuum_cost_limit,omitempty"` + AutovacuumVacuumScaleFactor *float64 `json:"autovacuum_vacuum_scale_factor,omitempty"` + AutovacuumVacuumThreshold *int32 `json:"autovacuum_vacuum_threshold,omitempty"` + BGWriterDelay *int `json:"bgwriter_delay,omitempty"` + BGWriterFlushAfter *int `json:"bgwriter_flush_after,omitempty"` + BGWriterLRUMaxPages *int `json:"bgwriter_lru_maxpages,omitempty"` + BGWriterLRUMultiplier *float64 `json:"bgwriter_lru_multiplier,omitempty"` + DeadlockTimeout *int `json:"deadlock_timeout,omitempty"` + DefaultToastCompression *string `json:"default_toast_compression,omitempty"` + IdleInTransactionSessionTimeout *int `json:"idle_in_transaction_session_timeout,omitempty"` + JIT *bool `json:"jit,omitempty"` + LogAutovacuumMinDuration *int32 `json:"log_autovacuum_min_duration,omitempty"` + LogErrorVerbosity *string `json:"log_error_verbosity,omitempty"` + LogLinePrefix *string `json:"log_line_prefix,omitempty"` + LogMinDurationStatement *int `json:"log_min_duration_statement,omitempty"` + LogTempFiles *int32 `json:"log_temp_files,omitempty"` + MaxFilesPerProcess *int `json:"max_files_per_process,omitempty"` + MaxLocksPerTransaction *int `json:"max_locks_per_transaction,omitempty"` + MaxLogicalReplicationWorkers *int `json:"max_logical_replication_workers,omitempty"` + MaxParallelWorkers *int `json:"max_parallel_workers,omitempty"` + MaxParallelWorkersPerGather *int `json:"max_parallel_workers_per_gather,omitempty"` + MaxPredLocksPerTransaction *int `json:"max_pred_locks_per_transaction,omitempty"` + MaxPreparedTransactions *int `json:"max_prepared_transactions,omitempty"` + MaxReplicationSlots *int `json:"max_replication_slots,omitempty"` + MaxSlotWALKeepSize *int32 `json:"max_slot_wal_keep_size,omitempty"` + MaxStackDepth *int `json:"max_stack_depth,omitempty"` + MaxStandbyArchiveDelay *int `json:"max_standby_archive_delay,omitempty"` + MaxStandbyStreamingDelay *int `json:"max_standby_streaming_delay,omitempty"` + MaxWALSenders *int `json:"max_wal_senders,omitempty"` + MaxWorkerProcesses *int `json:"max_worker_processes,omitempty"` + PasswordEncryption *string `json:"password_encryption,omitempty"` + PGPartmanBGWInterval *int `json:"pg_partman_bgw.interval,omitempty"` + PGPartmanBGWRole *string `json:"pg_partman_bgw.role,omitempty"` + PGStatMonitorPGSMEnableQueryPlan *bool `json:"pg_stat_monitor.pgsm_enable_query_plan,omitempty"` + PGStatMonitorPGSMMaxBuckets *int `json:"pg_stat_monitor.pgsm_max_buckets,omitempty"` + PGStatStatementsTrack *string `json:"pg_stat_statements.track,omitempty"` + TempFileLimit *int32 `json:"temp_file_limit,omitempty"` + Timezone *string `json:"timezone,omitempty"` + TrackActivityQuerySize *int `json:"track_activity_query_size,omitempty"` + TrackCommitTimestamp *string `json:"track_commit_timestamp,omitempty"` + TrackFunctions *string `json:"track_functions,omitempty"` + TrackIOTiming *string `json:"track_io_timing,omitempty"` + WALSenderTimeout *int `json:"wal_sender_timeout,omitempty"` + WALWriterDelay *int `json:"wal_writer_delay,omitempty"` +} + +type PostgresDatabaseEngineConfigPGLookout struct { + MaxFailoverReplicationTimeLag *int64 `json:"max_failover_replication_time_lag,omitempty"` +} + +type PostgresDatabaseConfigInfo struct { + PG PostgresDatabaseConfigInfoPG `json:"pg"` + PGStatMonitorEnable PostgresDatabaseConfigInfoPGStatMonitorEnable `json:"pg_stat_monitor_enable"` + PGLookout PostgresDatabaseConfigInfoPGLookout `json:"pglookout"` + ServiceLog PostgresDatabaseConfigInfoServiceLog `json:"service_log"` + SharedBuffersPercentage PostgresDatabaseConfigInfoSharedBuffersPercentage `json:"shared_buffers_percentage"` + SynchronousReplication PostgresDatabaseConfigInfoSynchronousReplication `json:"synchronous_replication"` + WorkMem PostgresDatabaseConfigInfoWorkMem `json:"work_mem"` +} + +type PostgresDatabaseConfigInfoPG struct { + AutovacuumAnalyzeScaleFactor AutovacuumAnalyzeScaleFactor `json:"autovacuum_analyze_scale_factor"` + AutovacuumAnalyzeThreshold AutovacuumAnalyzeThreshold `json:"autovacuum_analyze_threshold"` + AutovacuumFreezeMaxAge AutovacuumFreezeMaxAge `json:"autovacuum_freeze_max_age"` + AutovacuumMaxWorkers AutovacuumMaxWorkers `json:"autovacuum_max_workers"` + AutovacuumNaptime AutovacuumNaptime `json:"autovacuum_naptime"` + AutovacuumVacuumCostDelay AutovacuumVacuumCostDelay `json:"autovacuum_vacuum_cost_delay"` + AutovacuumVacuumCostLimit AutovacuumVacuumCostLimit `json:"autovacuum_vacuum_cost_limit"` + AutovacuumVacuumScaleFactor AutovacuumVacuumScaleFactor `json:"autovacuum_vacuum_scale_factor"` + AutovacuumVacuumThreshold AutovacuumVacuumThreshold `json:"autovacuum_vacuum_threshold"` + BGWriterDelay BGWriterDelay `json:"bgwriter_delay"` + BGWriterFlushAfter BGWriterFlushAfter `json:"bgwriter_flush_after"` + BGWriterLRUMaxPages BGWriterLRUMaxPages `json:"bgwriter_lru_maxpages"` + BGWriterLRUMultiplier BGWriterLRUMultiplier `json:"bgwriter_lru_multiplier"` + DeadlockTimeout DeadlockTimeout `json:"deadlock_timeout"` + DefaultToastCompression DefaultToastCompression `json:"default_toast_compression"` + IdleInTransactionSessionTimeout IdleInTransactionSessionTimeout `json:"idle_in_transaction_session_timeout"` + JIT JIT `json:"jit"` + LogAutovacuumMinDuration LogAutovacuumMinDuration `json:"log_autovacuum_min_duration"` + LogErrorVerbosity LogErrorVerbosity `json:"log_error_verbosity"` + LogLinePrefix LogLinePrefix `json:"log_line_prefix"` + LogMinDurationStatement LogMinDurationStatement `json:"log_min_duration_statement"` + LogTempFiles LogTempFiles `json:"log_temp_files"` + MaxFilesPerProcess MaxFilesPerProcess `json:"max_files_per_process"` + MaxLocksPerTransaction MaxLocksPerTransaction `json:"max_locks_per_transaction"` + MaxLogicalReplicationWorkers MaxLogicalReplicationWorkers `json:"max_logical_replication_workers"` + MaxParallelWorkers MaxParallelWorkers `json:"max_parallel_workers"` + MaxParallelWorkersPerGather MaxParallelWorkersPerGather `json:"max_parallel_workers_per_gather"` + MaxPredLocksPerTransaction MaxPredLocksPerTransaction `json:"max_pred_locks_per_transaction"` + MaxPreparedTransactions MaxPreparedTransactions `json:"max_prepared_transactions"` + MaxReplicationSlots MaxReplicationSlots `json:"max_replication_slots"` + MaxSlotWALKeepSize MaxSlotWALKeepSize `json:"max_slot_wal_keep_size"` + MaxStackDepth MaxStackDepth `json:"max_stack_depth"` + MaxStandbyArchiveDelay MaxStandbyArchiveDelay `json:"max_standby_archive_delay"` + MaxStandbyStreamingDelay MaxStandbyStreamingDelay `json:"max_standby_streaming_delay"` + MaxWALSenders MaxWALSenders `json:"max_wal_senders"` + MaxWorkerProcesses MaxWorkerProcesses `json:"max_worker_processes"` + PasswordEncryption PasswordEncryption `json:"password_encryption"` + PGPartmanBGWInterval PGPartmanBGWInterval `json:"pg_partman_bgw.interval"` + PGPartmanBGWRole PGPartmanBGWRole `json:"pg_partman_bgw.role"` + PGStatMonitorPGSMEnableQueryPlan PGStatMonitorPGSMEnableQueryPlan `json:"pg_stat_monitor.pgsm_enable_query_plan"` + PGStatMonitorPGSMMaxBuckets PGStatMonitorPGSMMaxBuckets `json:"pg_stat_monitor.pgsm_max_buckets"` + PGStatStatementsTrack PGStatStatementsTrack `json:"pg_stat_statements.track"` + TempFileLimit TempFileLimit `json:"temp_file_limit"` + Timezone Timezone `json:"timezone"` + TrackActivityQuerySize TrackActivityQuerySize `json:"track_activity_query_size"` + TrackCommitTimestamp TrackCommitTimestamp `json:"track_commit_timestamp"` + TrackFunctions TrackFunctions `json:"track_functions"` + TrackIOTiming TrackIOTiming `json:"track_io_timing"` + WALSenderTimeout WALSenderTimeout `json:"wal_sender_timeout"` + WALWriterDelay WALWriterDelay `json:"wal_writer_delay"` +} + +type AutovacuumAnalyzeScaleFactor struct { + Description string `json:"description"` + Maximum float64 `json:"maximum"` + Minimum float64 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type AutovacuumAnalyzeThreshold struct { + Description string `json:"description"` + Maximum int32 `json:"maximum"` + Minimum int32 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type AutovacuumFreezeMaxAge struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type AutovacuumMaxWorkers struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type AutovacuumNaptime struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type AutovacuumVacuumCostDelay struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type AutovacuumVacuumCostLimit struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type AutovacuumVacuumScaleFactor struct { + Description string `json:"description"` + Maximum float64 `json:"maximum"` + Minimum float64 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type AutovacuumVacuumThreshold struct { + Description string `json:"description"` + Maximum int32 `json:"maximum"` + Minimum int32 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type BGWriterDelay struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type BGWriterFlushAfter struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type BGWriterLRUMaxPages struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type BGWriterLRUMultiplier struct { + Description string `json:"description"` + Example float64 `json:"example"` + Maximum float64 `json:"maximum"` + Minimum float64 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type DeadlockTimeout struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type DefaultToastCompression struct { + Description string `json:"description"` + Enum []string `json:"enum"` + Example string `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type IdleInTransactionSessionTimeout struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type JIT struct { + Description string `json:"description"` + Example bool `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type LogAutovacuumMinDuration struct { + Description string `json:"description"` + Maximum int32 `json:"maximum"` + Minimum int32 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type LogErrorVerbosity struct { + Description string `json:"description"` + Enum []string `json:"enum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type LogLinePrefix struct { + Description string `json:"description"` + Enum []string `json:"enum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type LogMinDurationStatement struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type LogTempFiles struct { + Description string `json:"description"` + Maximum int32 `json:"maximum"` + Minimum int32 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxFilesPerProcess struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxLocksPerTransaction struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxLogicalReplicationWorkers struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxParallelWorkers struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxParallelWorkersPerGather struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxPredLocksPerTransaction struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxPreparedTransactions struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxReplicationSlots struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxSlotWALKeepSize struct { + Description string `json:"description"` + Maximum int32 `json:"maximum"` + Minimum int32 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxStackDepth struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxStandbyArchiveDelay struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxStandbyStreamingDelay struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxWALSenders struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type MaxWorkerProcesses struct { + Description string `json:"description"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type PasswordEncryption struct { + Description string `json:"description"` + Enum []string `json:"enum"` + Example string `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type []string `json:"type"` +} + +type PGPartmanBGWInterval struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type PGPartmanBGWRole struct { + Description string `json:"description"` + Example string `json:"example"` + MaxLength int `json:"maxLength"` + Pattern string `json:"pattern"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type PGStatMonitorPGSMEnableQueryPlan struct { + Description string `json:"description"` + Example bool `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type PGStatMonitorPGSMMaxBuckets struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type PGStatStatementsTrack struct { + Description string `json:"description"` + Enum []string `json:"enum"` + RequiresRestart bool `json:"requires_restart"` + Type []string `json:"type"` +} + +type TempFileLimit struct { + Description string `json:"description"` + Example int32 `json:"example"` + Maximum int32 `json:"maximum"` + Minimum int32 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type Timezone struct { + Description string `json:"description"` + Example string `json:"example"` + MaxLength int `json:"maxLength"` + Pattern string `json:"pattern"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type TrackActivityQuerySize struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type TrackCommitTimestamp struct { + Description string `json:"description"` + Enum []string `json:"enum"` + Example string `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type TrackFunctions struct { + Description string `json:"description"` + Enum []string `json:"enum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type TrackIOTiming struct { + Description string `json:"description"` + Enum []string `json:"enum"` + Example string `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type WALSenderTimeout struct { + Description string `json:"description"` + Example int `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type WALWriterDelay struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type PostgresDatabaseConfigInfoPGStatMonitorEnable struct { + Description string `json:"description"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type PostgresDatabaseConfigInfoPGLookout struct { + PGLookoutMaxFailoverReplicationTimeLag PGLookoutMaxFailoverReplicationTimeLag `json:"max_failover_replication_time_lag"` +} + +type PGLookoutMaxFailoverReplicationTimeLag struct { + Description string `json:"description"` + Maximum int64 `json:"maximum"` + Minimum int64 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type PostgresDatabaseConfigInfoServiceLog struct { + Description string `json:"description"` + Example bool `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type []string `json:"type"` +} + +type PostgresDatabaseConfigInfoSharedBuffersPercentage struct { + Description string `json:"description"` + Example float64 `json:"example"` + Maximum float64 `json:"maximum"` + Minimum float64 `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type PostgresDatabaseConfigInfoSynchronousReplication struct { + Description string `json:"description"` + Enum []string `json:"enum"` + Example string `json:"example"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` +} + +type PostgresDatabaseConfigInfoWorkMem struct { + Description string `json:"description"` + Example int `json:"example"` + Maximum int `json:"maximum"` + Minimum int `json:"minimum"` + RequiresRestart bool `json:"requires_restart"` + Type string `json:"type"` } func (d *PostgresDatabase) UnmarshalJSON(b []byte) error { @@ -111,16 +699,19 @@ type PostgresCreateOptions struct { ReplicationCommitType PostgresCommitType `json:"replication_commit_type,omitempty"` Fork *DatabaseFork `json:"fork,omitempty"` + + EngineConfig *PostgresDatabaseEngineConfig `json:"engine_config,omitempty"` } // PostgresUpdateOptions fields are used when altering the existing Postgres Database type PostgresUpdateOptions struct { - Label string `json:"label,omitempty"` - AllowList *[]string `json:"allow_list,omitempty"` - Updates *DatabaseMaintenanceWindow `json:"updates,omitempty"` - Type string `json:"type,omitempty"` - ClusterSize int `json:"cluster_size,omitempty"` - Version string `json:"version,omitempty"` + Label string `json:"label,omitempty"` + AllowList *[]string `json:"allow_list,omitempty"` + Updates *DatabaseMaintenanceWindow `json:"updates,omitempty"` + Type string `json:"type,omitempty"` + ClusterSize int `json:"cluster_size,omitempty"` + Version string `json:"version,omitempty"` + EngineConfig *PostgresDatabaseEngineConfig `json:"engine_config,omitempty"` } // PostgresDatabaseSSL is the SSL Certificate to access the Linode Managed Postgres Database @@ -265,3 +856,8 @@ func (c *Client) ResumePostgresDatabase(ctx context.Context, databaseID int) err e := formatAPIPath("databases/postgresql/instances/%d/resume", databaseID) return doPOSTRequestNoRequestResponseBody(ctx, c, e) } + +// GetPostgresDatabaseConfig returns a detailed list of all the configuration options for PostgreSQL Databases +func (c *Client) GetPostgresDatabaseConfig(ctx context.Context) (*PostgresDatabaseConfigInfo, error) { + return doGETRequest[PostgresDatabaseConfigInfo](ctx, c, "databases/postgresql/config") +} diff --git a/test/unit/fixtures/mysql_database_config_get.json b/test/unit/fixtures/mysql_database_config_get.json new file mode 100644 index 000000000..ce6cb6b47 --- /dev/null +++ b/test/unit/fixtures/mysql_database_config_get.json @@ -0,0 +1,271 @@ +{ + "mysql": { + "connect_timeout": { + "description": "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake", + "example": 10, + "maximum": 3600, + "minimum": 2, + "requires_restart": false, + "type": "integer" + }, + "default_time_zone": { + "description": "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", + "example": "+03:00", + "maxLength": 100, + "minLength": 2, + "pattern": "^([-+][\\d:]*|[\\w/]*)$", + "requires_restart": false, + "type": "string" + }, + "group_concat_max_len": { + "description": "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", + "example": 1024, + "maximum": 18446744073709551600, + "minimum": 4, + "requires_restart": false, + "type": "integer" + }, + "information_schema_stats_expiry": { + "description": "The time, in seconds, before cached statistics expire", + "example": 86400, + "maximum": 31536000, + "minimum": 900, + "requires_restart": false, + "type": "integer" + }, + "innodb_change_buffer_max_size": { + "description": "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25", + "example": 30, + "maximum": 50, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_flush_neighbors": { + "description": "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent", + "example": 0, + "maximum": 2, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_ft_min_token_size": { + "description": "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", + "example": 3, + "maximum": 16, + "minimum": 0, + "requires_restart": true, + "type": "integer" + }, + "innodb_ft_server_stopword_table": { + "description": "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", + "example": "db_name/table_name", + "maxLength": 1024, + "pattern": "^.+/.+$", + "requires_restart": false, + "type": [ + "null", + "string" + ] + }, + "innodb_lock_wait_timeout": { + "description": "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", + "example": 50, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "innodb_log_buffer_size": { + "description": "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", + "example": 16777216, + "maximum": 4294967295, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "innodb_online_alter_log_max_size": { + "description": "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", + "example": 134217728, + "maximum": 1099511627776, + "minimum": 65536, + "requires_restart": false, + "type": "integer" + }, + "innodb_print_all_deadlocks": { + "description": "When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default.", + "example": true, + "requires_restart": false, + "type": "boolean" + }, + "innodb_read_io_threads": { + "description": "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + "example": 10, + "maximum": 64, + "minimum": 1, + "requires_restart": true, + "type": "integer" + }, + "innodb_rollback_on_timeout": { + "description": "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", + "example": true, + "requires_restart": true, + "type": "boolean" + }, + "innodb_thread_concurrency": { + "description": "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit)", + "example": 10, + "maximum": 1000, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "innodb_write_io_threads": { + "description": "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + "example": 10, + "maximum": 64, + "minimum": 1, + "requires_restart": true, + "type": "integer" + }, + "interactive_timeout": { + "description": "The number of seconds the server waits for activity on an interactive connection before closing it.", + "example": 3600, + "maximum": 604800, + "minimum": 30, + "requires_restart": false, + "type": "integer" + }, + "internal_tmp_mem_storage_engine": { + "description": "The storage engine for in-memory internal temporary tables.", + "enum": [ + "TempTable", + "MEMORY" + ], + "example": "TempTable", + "requires_restart": false, + "type": "string" + }, + "log_output": { + "description": "The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.", + "enum": [ + "INSIGHTS", + "NONE", + "TABLE", + "INSIGHTS,TABLE" + ], + "example": "INSIGHTS", + "requires_restart": false, + "type": "string" + }, + "long_query_time": { + "description": "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute.", + "example": 10, + "maximum": 3600, + "minimum": 0.0, + "requires_restart": false, + "type": "number" + }, + "max_allowed_packet": { + "description": "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M)", + "example": 67108864, + "maximum": 1073741824, + "minimum": 102400, + "requires_restart": false, + "type": "integer" + }, + "max_heap_table_size": { + "description": "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M)", + "example": 16777216, + "maximum": 1073741824, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "net_buffer_length": { + "description": "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", + "example": 16384, + "maximum": 1048576, + "minimum": 1024, + "requires_restart": true, + "type": "integer" + }, + "net_read_timeout": { + "description": "The number of seconds to wait for more data from a connection before aborting the read.", + "example": 30, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "net_write_timeout": { + "description": "The number of seconds to wait for a block to be written to a connection before aborting the write.", + "example": 30, + "maximum": 3600, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "slow_query_log": { + "description": "Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table.", + "example": true, + "requires_restart": false, + "type": "boolean" + }, + "sort_buffer_size": { + "description": "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K)", + "example": 262144, + "maximum": 1073741824, + "minimum": 32768, + "requires_restart": false, + "type": "integer" + }, + "sql_mode": { + "description": "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Akamai default SQL mode (strict, SQL standard compliant) will be assigned.", + "example": "ANSI,TRADITIONAL", + "maxLength": 1024, + "pattern": "^[A-Z_]*(,[A-Z_]+)*$", + "requires_restart": false, + "type": "string" + }, + "sql_require_primary_key": { + "description": "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", + "example": true, + "requires_restart": false, + "type": "boolean" + }, + "tmp_table_size": { + "description": "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M)", + "example": 16777216, + "maximum": 1073741824, + "minimum": 1048576, + "requires_restart": false, + "type": "integer" + }, + "wait_timeout": { + "description": "The number of seconds the server waits for activity on a noninteractive connection before closing it.", + "example": 28800, + "maximum": 2147483, + "minimum": 1, + "requires_restart": false, + "type": "integer" + } + }, + "binlog_retention_period": { + "description": "The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector.", + "example": 600, + "maximum": 86400, + "minimum": 600, + "requires_restart": false, + "type": "integer" + }, + "service_log": { + "description": "Store logs for the service so that they are available in the HTTP API and console.", + "example": true, + "requires_restart": false, + "type": [ + "boolean", + "null" + ] + } +} \ No newline at end of file diff --git a/test/unit/fixtures/mysql_database_create.json b/test/unit/fixtures/mysql_database_create.json index b814d8763..cd4c4e545 100644 --- a/test/unit/fixtures/mysql_database_create.json +++ b/test/unit/fixtures/mysql_database_create.json @@ -38,5 +38,42 @@ "pending": [] }, "used_disk_size_gb": 2, - "version": "8.0.26" + "version": "8.0.26", + "engine_config": { + "binlog_retention_period": 600, + "service_log": false, + "mysql": { + "connect_timeout": 20, + "default_time_zone": "+03:00", + "group_concat_max_len": 1024, + "information_schema_stats_expiry": 86400, + "innodb_change_buffer_max_size": 30, + "innodb_flush_neighbors": 0, + "innodb_ft_min_token_size": 3, + "innodb_ft_server_stopword_table": "db_name/table_name", + "innodb_lock_wait_timeout": 50, + "innodb_log_buffer_size": 16777216, + "innodb_online_alter_log_max_size": 134217728, + "innodb_print_all_deadlocks": true, + "innodb_read_io_threads": 10, + "innodb_rollback_on_timeout": true, + "innodb_thread_concurrency": 10, + "innodb_write_io_threads": 10, + "interactive_timeout": 3600, + "internal_tmp_mem_storage_engine": "TempTable", + "log_output": "INSIGHTS", + "long_query_time": 10, + "max_allowed_packet": 67108864, + "max_heap_table_size": 16777216, + "net_buffer_length": 16384, + "net_read_timeout": 30, + "net_write_timeout": 30, + "slow_query_log": true, + "sort_buffer_size": 262144, + "sql_mode": "ANSI,TRADITIONAL", + "sql_require_primary_key": true, + "tmp_table_size": 16777216, + "wait_timeout": 28800 + } + } } \ No newline at end of file diff --git a/test/unit/fixtures/mysql_database_get.json b/test/unit/fixtures/mysql_database_get.json index 14d37030f..ea3a08ca5 100644 --- a/test/unit/fixtures/mysql_database_get.json +++ b/test/unit/fixtures/mysql_database_get.json @@ -38,5 +38,42 @@ "pending": [] }, "used_disk_size_gb": 2, - "version": "8.0.26" + "version": "8.0.26", + "engine_config": { + "binlog_retention_period": 600, + "service_log": true, + "mysql": { + "connect_timeout": 10, + "default_time_zone": "+03:00", + "group_concat_max_len": 1024, + "information_schema_stats_expiry": 86400, + "innodb_change_buffer_max_size": 30, + "innodb_flush_neighbors": 0, + "innodb_ft_min_token_size": 3, + "innodb_ft_server_stopword_table": "db_name/table_name", + "innodb_lock_wait_timeout": 50, + "innodb_log_buffer_size": 16777216, + "innodb_online_alter_log_max_size": 134217728, + "innodb_print_all_deadlocks": true, + "innodb_read_io_threads": 10, + "innodb_rollback_on_timeout": true, + "innodb_thread_concurrency": 10, + "innodb_write_io_threads": 10, + "interactive_timeout": 3600, + "internal_tmp_mem_storage_engine": "TempTable", + "log_output": "INSIGHTS", + "long_query_time": 10, + "max_allowed_packet": 67108864, + "max_heap_table_size": 16777216, + "net_buffer_length": 16384, + "net_read_timeout": 30, + "net_write_timeout": 30, + "slow_query_log": true, + "sort_buffer_size": 262144, + "sql_mode": "ANSI,TRADITIONAL", + "sql_require_primary_key": true, + "tmp_table_size": 16777216, + "wait_timeout": 28800 + } + } } \ No newline at end of file diff --git a/test/unit/fixtures/mysql_database_update.json b/test/unit/fixtures/mysql_database_update.json index c51faf945..1710e13fa 100644 --- a/test/unit/fixtures/mysql_database_update.json +++ b/test/unit/fixtures/mysql_database_update.json @@ -38,5 +38,42 @@ "pending": [] }, "used_disk_size_gb": 2, - "version": "8.0.26" + "version": "8.0.26", + "engine_config": { + "binlog_retention_period": 600, + "service_log": false, + "mysql": { + "connect_timeout": 20, + "default_time_zone": "+03:00", + "group_concat_max_len": 1024, + "information_schema_stats_expiry": 86400, + "innodb_change_buffer_max_size": 30, + "innodb_flush_neighbors": 0, + "innodb_ft_min_token_size": 3, + "innodb_ft_server_stopword_table": "db_name/table_name", + "innodb_lock_wait_timeout": 50, + "innodb_log_buffer_size": 16777216, + "innodb_online_alter_log_max_size": 134217728, + "innodb_print_all_deadlocks": true, + "innodb_read_io_threads": 10, + "innodb_rollback_on_timeout": true, + "innodb_thread_concurrency": 10, + "innodb_write_io_threads": 10, + "interactive_timeout": 3600, + "internal_tmp_mem_storage_engine": "TempTable", + "log_output": "INSIGHTS", + "long_query_time": 10, + "max_allowed_packet": 67108864, + "max_heap_table_size": 16777216, + "net_buffer_length": 16384, + "net_read_timeout": 30, + "net_write_timeout": 30, + "slow_query_log": true, + "sort_buffer_size": 262144, + "sql_mode": "ANSI,TRADITIONAL", + "sql_require_primary_key": true, + "tmp_table_size": 16777216, + "wait_timeout": 28800 + } + } } \ No newline at end of file diff --git a/test/unit/fixtures/postgresql_database_config_get.json b/test/unit/fixtures/postgresql_database_config_get.json new file mode 100644 index 000000000..5f3e8b43b --- /dev/null +++ b/test/unit/fixtures/postgresql_database_config_get.json @@ -0,0 +1,443 @@ +{ + "pg": { + "autovacuum_analyze_scale_factor": { + "description": "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size)", + "maximum": 1.0, + "minimum": 0.0, + "requires_restart": false, + "type": "number" + }, + "autovacuum_analyze_threshold": { + "description": "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", + "maximum": 2147483647, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_freeze_max_age": { + "description": "Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted.", + "example": 200000000, + "maximum": 1500000000, + "minimum": 200000000, + "requires_restart": true, + "type": "integer" + }, + "autovacuum_max_workers": { + "description": "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", + "maximum": 20, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_naptime": { + "description": "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute", + "maximum": 86400, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_cost_delay": { + "description": "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds", + "maximum": 100, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_cost_limit": { + "description": "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", + "maximum": 10000, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "autovacuum_vacuum_scale_factor": { + "description": "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size)", + "maximum": 1.0, + "minimum": 0.0, + "requires_restart": false, + "type": "number" + }, + "autovacuum_vacuum_threshold": { + "description": "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples", + "maximum": 2147483647, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_delay": { + "description": "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", + "example": 200, + "maximum": 10000, + "minimum": 10, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_flush_after": { + "description": "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", + "example": 512, + "maximum": 2048, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_lru_maxpages": { + "description": "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", + "example": 100, + "maximum": 1073741823, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "bgwriter_lru_multiplier": { + "description": "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a \u201cjust in time\u201d policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", + "example": 2.0, + "maximum": 10, + "minimum": 0, + "requires_restart": false, + "type": "number" + }, + "deadlock_timeout": { + "description": "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", + "example": 1000, + "maximum": 1800000, + "minimum": 500, + "requires_restart": false, + "type": "integer" + }, + "default_toast_compression": { + "description": "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + "enum": [ + "lz4", + "pglz" + ], + "example": "lz4", + "requires_restart": false, + "type": "string" + }, + "idle_in_transaction_session_timeout": { + "description": "Time out sessions with open transactions after this number of milliseconds", + "maximum": 604800000, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "jit": { + "description": "Controls system-wide use of Just-in-Time Compilation (JIT).", + "example": true, + "requires_restart": false, + "type": "boolean" + }, + "log_autovacuum_min_duration": { + "description": "Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.", + "maximum": 2147483647, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "log_error_verbosity": { + "description": "Controls the amount of detail written in the server log for each message that is logged.", + "enum": [ + "TERSE", + "DEFAULT", + "VERBOSE" + ], + "requires_restart": false, + "type": "string" + }, + "log_line_prefix": { + "description": "Choose from one of the available log formats.", + "enum": [ + "'pid=%p,user=%u,db=%d,app=%a,client=%h '", + "'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '", + "'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '", + "'%m [%p] %q[user=%u,db=%d,app=%a] '" + ], + "requires_restart": false, + "type": "string" + }, + "log_min_duration_statement": { + "description": "Log statements that take more than this number of milliseconds to run, -1 disables", + "maximum": 86400000, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "log_temp_files": { + "description": "Log statements for each temporary file created larger than this number of kilobytes, -1 disables", + "maximum": 2147483647, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "max_files_per_process": { + "description": "PostgreSQL maximum number of files that can be open per process", + "maximum": 4096, + "minimum": 1000, + "requires_restart": false, + "type": "integer" + }, + "max_locks_per_transaction": { + "description": "PostgreSQL maximum locks per transaction", + "maximum": 6400, + "minimum": 64, + "requires_restart": false, + "type": "integer" + }, + "max_logical_replication_workers": { + "description": "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers)", + "maximum": 64, + "minimum": 4, + "requires_restart": false, + "type": "integer" + }, + "max_parallel_workers": { + "description": "Sets the maximum number of workers that the system can support for parallel queries", + "maximum": 96, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "max_parallel_workers_per_gather": { + "description": "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node", + "maximum": 96, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "max_pred_locks_per_transaction": { + "description": "PostgreSQL maximum predicate locks per transaction", + "maximum": 5120, + "minimum": 64, + "requires_restart": false, + "type": "integer" + }, + "max_prepared_transactions": { + "description": "PostgreSQL maximum prepared transactions", + "maximum": 10000, + "minimum": 0, + "requires_restart": false, + "type": "integer" + }, + "max_replication_slots": { + "description": "PostgreSQL maximum replication slots", + "maximum": 64, + "minimum": 8, + "requires_restart": false, + "type": "integer" + }, + "max_slot_wal_keep_size": { + "description": "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", + "maximum": 2147483647, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "max_stack_depth": { + "description": "Maximum depth of the stack in bytes", + "maximum": 6291456, + "minimum": 2097152, + "requires_restart": false, + "type": "integer" + }, + "max_standby_archive_delay": { + "description": "Max standby archive delay in milliseconds", + "maximum": 43200000, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "max_standby_streaming_delay": { + "description": "Max standby streaming delay in milliseconds", + "maximum": 43200000, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "max_wal_senders": { + "description": "PostgreSQL maximum WAL senders", + "maximum": 64, + "minimum": 20, + "requires_restart": false, + "type": "integer" + }, + "max_worker_processes": { + "description": "Sets the maximum number of background processes that the system can support", + "maximum": 96, + "minimum": 8, + "requires_restart": false, + "type": "integer" + }, + "password_encryption": { + "description": "Chooses the algorithm for encrypting passwords.", + "enum": [ + "md5", + "scram-sha-256" + ], + "example": "scram-sha-256", + "requires_restart": false, + "type": [ + "string", + "null" + ] + }, + "pg_partman_bgw.interval": { + "description": "Sets the time interval to run pg_partman's scheduled tasks", + "example": 3600, + "maximum": 604800, + "minimum": 3600, + "requires_restart": false, + "type": "integer" + }, + "pg_partman_bgw.role": { + "description": "Controls which role to use for pg_partman's scheduled background tasks.", + "example": "myrolename", + "maxLength": 64, + "pattern": "^[_A-Za-z0-9][-._A-Za-z0-9]{0,63}$", + "requires_restart": false, + "type": "string" + }, + "pg_stat_monitor.pgsm_enable_query_plan": { + "description": "Enables or disables query plan monitoring", + "example": false, + "requires_restart": false, + "type": "boolean" + }, + "pg_stat_monitor.pgsm_max_buckets": { + "description": "Sets the maximum number of buckets", + "example": 10, + "maximum": 10, + "minimum": 1, + "requires_restart": false, + "type": "integer" + }, + "pg_stat_statements.track": { + "description": "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + "enum": [ + "all", + "top", + "none" + ], + "requires_restart": false, + "type": [ + "string" + ] + }, + "temp_file_limit": { + "description": "PostgreSQL temporary file limit in KiB, -1 for unlimited", + "example": 5000000, + "maximum": 2147483647, + "minimum": -1, + "requires_restart": false, + "type": "integer" + }, + "timezone": { + "description": "PostgreSQL service timezone", + "example": "Europe/Helsinki", + "maxLength": 64, + "pattern": "^[\\w/]*$", + "requires_restart": false, + "type": "string" + }, + "track_activity_query_size": { + "description": "Specifies the number of bytes reserved to track the currently executing command for each active session.", + "example": 1024, + "maximum": 10240, + "minimum": 1024, + "requires_restart": false, + "type": "integer" + }, + "track_commit_timestamp": { + "description": "Record commit time of transactions.", + "enum": [ + "off", + "on" + ], + "example": "off", + "requires_restart": false, + "type": "string" + }, + "track_functions": { + "description": "Enables tracking of function call counts and time used.", + "enum": [ + "all", + "pl", + "none" + ], + "requires_restart": false, + "type": "string" + }, + "track_io_timing": { + "description": "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + "enum": [ + "off", + "on" + ], + "example": "off", + "requires_restart": false, + "type": "string" + }, + "wal_sender_timeout": { + "description": "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", + "example": 60000, + "requires_restart": false, + "type": "integer" + }, + "wal_writer_delay": { + "description": "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance", + "example": 50, + "maximum": 200, + "minimum": 10, + "requires_restart": false, + "type": "integer" + } + }, + "pg_stat_monitor_enable": { + "description": "Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable", + "requires_restart": true, + "type": "boolean" + }, + "pglookout": { + "max_failover_replication_time_lag": { + "description": "Number of seconds of master unavailability before triggering database failover to standby", + "maximum": 9223372036854775000, + "minimum": 10, + "requires_restart": false, + "type": "integer" + } + }, + "service_log": { + "description": "Store logs for the service so that they are available in the HTTP API and console.", + "example": true, + "requires_restart": false, + "type": [ + "boolean", + "null" + ] + }, + "shared_buffers_percentage": { + "description": "Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.", + "example": 41.5, + "maximum": 60.0, + "minimum": 20.0, + "requires_restart": false, + "type": "number" + }, + "synchronous_replication": { + "description": "Synchronous replication type. Note that the service plan also needs to support synchronous replication.", + "enum": [ + "quorum", + "off" + ], + "example": "off", + "requires_restart": false, + "type": "string" + }, + "work_mem": { + "description": "Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).", + "example": 4, + "maximum": 1024, + "minimum": 1, + "requires_restart": false, + "type": "integer" + } +} \ No newline at end of file diff --git a/test/unit/fixtures/postgresql_database_create.json b/test/unit/fixtures/postgresql_database_create.json index 16458e60b..e6181fa87 100644 --- a/test/unit/fixtures/postgresql_database_create.json +++ b/test/unit/fixtures/postgresql_database_create.json @@ -37,5 +37,67 @@ "pending": [] }, "used_disk_size_gb": 2, - "version": "13.2" + "version": "13.2", + "engine_config": { + "pg": { + "autovacuum_analyze_scale_factor": 0.5, + "autovacuum_analyze_threshold": 100, + "autovacuum_freeze_max_age": 400000000, + "autovacuum_max_workers": 10, + "autovacuum_naptime": 100, + "autovacuum_vacuum_cost_delay": 50, + "autovacuum_vacuum_cost_limit": 100, + "autovacuum_vacuum_scale_factor": 0.5, + "autovacuum_vacuum_threshold": 100, + "bgwriter_delay": 200, + "bgwriter_flush_after": 512, + "bgwriter_lru_maxpages": 100, + "bgwriter_lru_multiplier": 2.0, + "deadlock_timeout": 1000, + "default_toast_compression": "lz4", + "idle_in_transaction_session_timeout": 100, + "jit": true, + "log_autovacuum_min_duration": 100, + "log_error_verbosity": "DEFAULT", + "log_line_prefix": "'pid=%p,user=%u,db=%d,app=%a,client=%h '", + "log_min_duration_statement": 100, + "log_temp_files": 100, + "max_files_per_process": 100, + "max_locks_per_transaction": 100, + "max_logical_replication_workers": 32, + "max_parallel_workers": 64, + "max_parallel_workers_per_gather": 64, + "max_pred_locks_per_transaction": 1000, + "max_prepared_transactions": 5000, + "max_replication_slots": 32, + "max_slot_wal_keep_size": 100, + "max_stack_depth": 3507152, + "max_standby_archive_delay": 1000, + "max_standby_streaming_delay": 1000, + "max_wal_senders": 32, + "max_worker_processes": 64, + "password_encryption": "scram-sha-256", + "pg_partman_bgw.interval": 3600, + "pg_partman_bgw.role": "myrolename", + "pg_stat_monitor.pgsm_enable_query_plan": false, + "pg_stat_monitor.pgsm_max_buckets": 10, + "pg_stat_statements.track": "top", + "temp_file_limit": 5000000, + "timezone": "Europe/Helsinki", + "track_activity_query_size": 1024, + "track_commit_timestamp": "off", + "track_functions": "all", + "track_io_timing": "off", + "wal_sender_timeout": 60000, + "wal_writer_delay": 50 + }, + "pg_stat_monitor_enable": true, + "pglookout": { + "max_failover_replication_time_lag": 1000 + }, + "service_log": false, + "shared_buffers_percentage": 41.5, + "synchronous_replication": "off", + "work_mem": 4 + } } \ No newline at end of file diff --git a/test/unit/fixtures/postgresql_database_get.json b/test/unit/fixtures/postgresql_database_get.json index 4ea070c61..be4c084be 100644 --- a/test/unit/fixtures/postgresql_database_get.json +++ b/test/unit/fixtures/postgresql_database_get.json @@ -37,5 +37,67 @@ "pending": [] }, "used_disk_size_gb": 2, - "version": "13.2" + "version": "13.2", + "engine_config": { + "pg": { + "autovacuum_analyze_scale_factor": 0.5, + "autovacuum_analyze_threshold": 100, + "autovacuum_freeze_max_age": 200000000, + "autovacuum_max_workers": 10, + "autovacuum_naptime": 100, + "autovacuum_vacuum_cost_delay": 50, + "autovacuum_vacuum_cost_limit": 100, + "autovacuum_vacuum_scale_factor": 0.5, + "autovacuum_vacuum_threshold": 100, + "bgwriter_delay": 200, + "bgwriter_flush_after": 512, + "bgwriter_lru_maxpages": 100, + "bgwriter_lru_multiplier": 2.0, + "deadlock_timeout": 1000, + "default_toast_compression": "lz4", + "idle_in_transaction_session_timeout": 100, + "jit": true, + "log_autovacuum_min_duration": 100, + "log_error_verbosity": "DEFAULT", + "log_line_prefix": "'pid=%p,user=%u,db=%d,app=%a,client=%h '", + "log_min_duration_statement": 100, + "log_temp_files": 100, + "max_files_per_process": 100, + "max_locks_per_transaction": 100, + "max_logical_replication_workers": 32, + "max_parallel_workers": 64, + "max_parallel_workers_per_gather": 64, + "max_pred_locks_per_transaction": 1000, + "max_prepared_transactions": 5000, + "max_replication_slots": 32, + "max_slot_wal_keep_size": 100, + "max_stack_depth": 3507152, + "max_standby_archive_delay": 1000, + "max_standby_streaming_delay": 1000, + "max_wal_senders": 32, + "max_worker_processes": 64, + "password_encryption": "scram-sha-256", + "pg_partman_bgw.interval": 3600, + "pg_partman_bgw.role": "myrolename", + "pg_stat_monitor.pgsm_enable_query_plan": false, + "pg_stat_monitor.pgsm_max_buckets": 10, + "pg_stat_statements.track": "top", + "temp_file_limit": 5000000, + "timezone": "Europe/Helsinki", + "track_activity_query_size": 1024, + "track_commit_timestamp": "off", + "track_functions": "all", + "track_io_timing": "off", + "wal_sender_timeout": 60000, + "wal_writer_delay": 50 + }, + "pg_stat_monitor_enable": true, + "pglookout": { + "max_failover_replication_time_lag": 1000 + }, + "service_log": true, + "shared_buffers_percentage": 41.5, + "synchronous_replication": "off", + "work_mem": 4 + } } \ No newline at end of file diff --git a/test/unit/fixtures/postgresql_database_update.json b/test/unit/fixtures/postgresql_database_update.json index f7c579fbe..c93e4cc56 100644 --- a/test/unit/fixtures/postgresql_database_update.json +++ b/test/unit/fixtures/postgresql_database_update.json @@ -37,5 +37,67 @@ "pending": [] }, "used_disk_size_gb": 2, - "version": "13.2" + "version": "13.2", + "engine_config": { + "pg": { + "autovacuum_analyze_scale_factor": 0.5, + "autovacuum_analyze_threshold": 100, + "autovacuum_freeze_max_age": 400000000, + "autovacuum_max_workers": 10, + "autovacuum_naptime": 100, + "autovacuum_vacuum_cost_delay": 50, + "autovacuum_vacuum_cost_limit": 100, + "autovacuum_vacuum_scale_factor": 0.5, + "autovacuum_vacuum_threshold": 100, + "bgwriter_delay": 200, + "bgwriter_flush_after": 512, + "bgwriter_lru_maxpages": 100, + "bgwriter_lru_multiplier": 2.0, + "deadlock_timeout": 1000, + "default_toast_compression": "lz4", + "idle_in_transaction_session_timeout": 100, + "jit": true, + "log_autovacuum_min_duration": 100, + "log_error_verbosity": "DEFAULT", + "log_line_prefix": "'pid=%p,user=%u,db=%d,app=%a,client=%h '", + "log_min_duration_statement": 100, + "log_temp_files": 100, + "max_files_per_process": 100, + "max_locks_per_transaction": 100, + "max_logical_replication_workers": 32, + "max_parallel_workers": 64, + "max_parallel_workers_per_gather": 64, + "max_pred_locks_per_transaction": 1000, + "max_prepared_transactions": 5000, + "max_replication_slots": 32, + "max_slot_wal_keep_size": 100, + "max_stack_depth": 3507152, + "max_standby_archive_delay": 1000, + "max_standby_streaming_delay": 1000, + "max_wal_senders": 32, + "max_worker_processes": 64, + "password_encryption": "scram-sha-256", + "pg_partman_bgw.interval": 3600, + "pg_partman_bgw.role": "myrolename", + "pg_stat_monitor.pgsm_enable_query_plan": false, + "pg_stat_monitor.pgsm_max_buckets": 10, + "pg_stat_statements.track": "top", + "temp_file_limit": 5000000, + "timezone": "Europe/Helsinki", + "track_activity_query_size": 1024, + "track_commit_timestamp": "off", + "track_functions": "all", + "track_io_timing": "off", + "wal_sender_timeout": 60000, + "wal_writer_delay": 50 + }, + "pg_stat_monitor_enable": true, + "pglookout": { + "max_failover_replication_time_lag": 1000 + }, + "service_log": false, + "shared_buffers_percentage": 41.5, + "synchronous_replication": "off", + "work_mem": 4 + } } \ No newline at end of file diff --git a/test/unit/mysql_test.go b/test/unit/mysql_test.go index abe394f46..d42f46d19 100644 --- a/test/unit/mysql_test.go +++ b/test/unit/mysql_test.go @@ -53,6 +53,40 @@ func TestDatabaseMySQL_Get(t *testing.T) { assert.Equal(t, 0, db.Updates.HourOfDay) assert.Equal(t, 2, db.UsedDiskSizeGB) assert.Equal(t, "8.0.26", db.Version) + + assert.Equal(t, 600, *db.EngineConfig.BinlogRetentionPeriod) + assert.Equal(t, true, *db.EngineConfig.ServiceLog) + assert.Equal(t, 10, *db.EngineConfig.MySQL.ConnectTimeout) + assert.Equal(t, "+03:00", *db.EngineConfig.MySQL.DefaultTimeZone) + assert.Equal(t, float64(1024), *db.EngineConfig.MySQL.GroupConcatMaxLen) + assert.Equal(t, 86400, *db.EngineConfig.MySQL.InformationSchemaStatsExpiry) + assert.Equal(t, 30, *db.EngineConfig.MySQL.InnoDBChangeBufferMaxSize) + assert.Equal(t, 0, *db.EngineConfig.MySQL.InnoDBFlushNeighbors) + assert.Equal(t, 3, *db.EngineConfig.MySQL.InnoDBFTMinTokenSize) + assert.Equal(t, "db_name/table_name", *db.EngineConfig.MySQL.InnoDBFTServerStopwordTable) + assert.Equal(t, 50, *db.EngineConfig.MySQL.InnoDBLockWaitTimeout) + assert.Equal(t, 16777216, *db.EngineConfig.MySQL.InnoDBLogBufferSize) + assert.Equal(t, 134217728, *db.EngineConfig.MySQL.InnoDBOnlineAlterLogMaxSize) + assert.Equal(t, true, *db.EngineConfig.MySQL.InnoDBPrintAllDeadlocks) + assert.Equal(t, 10, *db.EngineConfig.MySQL.InnoDBReadIOThreads) + assert.Equal(t, true, *db.EngineConfig.MySQL.InnoDBRollbackOnTimeout) + assert.Equal(t, 10, *db.EngineConfig.MySQL.InnoDBThreadConcurrency) + assert.Equal(t, 10, *db.EngineConfig.MySQL.InnoDBWriteIOThreads) + assert.Equal(t, 3600, *db.EngineConfig.MySQL.InteractiveTimeout) + assert.Equal(t, "TempTable", *db.EngineConfig.MySQL.InternalTmpMemStorageEngine) + assert.Equal(t, "INSIGHTS", *db.EngineConfig.MySQL.LogOutput) + assert.Equal(t, float64(10), *db.EngineConfig.MySQL.LongQueryTime) + assert.Equal(t, 67108864, *db.EngineConfig.MySQL.MaxAllowedPacket) + assert.Equal(t, 16777216, *db.EngineConfig.MySQL.MaxHeapTableSize) + assert.Equal(t, 16384, *db.EngineConfig.MySQL.NetBufferLength) + assert.Equal(t, 30, *db.EngineConfig.MySQL.NetReadTimeout) + assert.Equal(t, 30, *db.EngineConfig.MySQL.NetWriteTimeout) + assert.Equal(t, true, *db.EngineConfig.MySQL.SlowQueryLog) + assert.Equal(t, 262144, *db.EngineConfig.MySQL.SortBufferSize) + assert.Equal(t, "ANSI,TRADITIONAL", *db.EngineConfig.MySQL.SQLMode) + assert.Equal(t, true, *db.EngineConfig.MySQL.SQLRequirePrimaryKey) + assert.Equal(t, 16777216, *db.EngineConfig.MySQL.TmpTableSize) + assert.Equal(t, 28800, *db.EngineConfig.MySQL.WaitTimeout) } func TestDatabaseMySQL_Update(t *testing.T) { @@ -65,6 +99,12 @@ func TestDatabaseMySQL_Update(t *testing.T) { requestData := linodego.MySQLUpdateOptions{ Label: "example-db-updated", + EngineConfig: &linodego.MySQLDatabaseEngineConfig{ + MySQL: &linodego.MySQLDatabaseEngineConfigMySQL{ + ConnectTimeout: linodego.Pointer(20), + }, + ServiceLog: linodego.Pointer(false), + }, } base.MockPut("databases/mysql/instances/123", fixtureData) @@ -88,6 +128,40 @@ func TestDatabaseMySQL_Update(t *testing.T) { assert.Equal(t, 0, db.Updates.HourOfDay) assert.Equal(t, 2, db.UsedDiskSizeGB) assert.Equal(t, "8.0.26", db.Version) + + assert.Equal(t, 600, *db.EngineConfig.BinlogRetentionPeriod) + assert.Equal(t, false, *db.EngineConfig.ServiceLog) + assert.Equal(t, 20, *db.EngineConfig.MySQL.ConnectTimeout) + assert.Equal(t, "+03:00", *db.EngineConfig.MySQL.DefaultTimeZone) + assert.Equal(t, float64(1024), *db.EngineConfig.MySQL.GroupConcatMaxLen) + assert.Equal(t, 86400, *db.EngineConfig.MySQL.InformationSchemaStatsExpiry) + assert.Equal(t, 30, *db.EngineConfig.MySQL.InnoDBChangeBufferMaxSize) + assert.Equal(t, 0, *db.EngineConfig.MySQL.InnoDBFlushNeighbors) + assert.Equal(t, 3, *db.EngineConfig.MySQL.InnoDBFTMinTokenSize) + assert.Equal(t, "db_name/table_name", *db.EngineConfig.MySQL.InnoDBFTServerStopwordTable) + assert.Equal(t, 50, *db.EngineConfig.MySQL.InnoDBLockWaitTimeout) + assert.Equal(t, 16777216, *db.EngineConfig.MySQL.InnoDBLogBufferSize) + assert.Equal(t, 134217728, *db.EngineConfig.MySQL.InnoDBOnlineAlterLogMaxSize) + assert.Equal(t, true, *db.EngineConfig.MySQL.InnoDBPrintAllDeadlocks) + assert.Equal(t, 10, *db.EngineConfig.MySQL.InnoDBReadIOThreads) + assert.Equal(t, true, *db.EngineConfig.MySQL.InnoDBRollbackOnTimeout) + assert.Equal(t, 10, *db.EngineConfig.MySQL.InnoDBThreadConcurrency) + assert.Equal(t, 10, *db.EngineConfig.MySQL.InnoDBWriteIOThreads) + assert.Equal(t, 3600, *db.EngineConfig.MySQL.InteractiveTimeout) + assert.Equal(t, "TempTable", *db.EngineConfig.MySQL.InternalTmpMemStorageEngine) + assert.Equal(t, "INSIGHTS", *db.EngineConfig.MySQL.LogOutput) + assert.Equal(t, float64(10), *db.EngineConfig.MySQL.LongQueryTime) + assert.Equal(t, 67108864, *db.EngineConfig.MySQL.MaxAllowedPacket) + assert.Equal(t, 16777216, *db.EngineConfig.MySQL.MaxHeapTableSize) + assert.Equal(t, 16384, *db.EngineConfig.MySQL.NetBufferLength) + assert.Equal(t, 30, *db.EngineConfig.MySQL.NetReadTimeout) + assert.Equal(t, 30, *db.EngineConfig.MySQL.NetWriteTimeout) + assert.Equal(t, true, *db.EngineConfig.MySQL.SlowQueryLog) + assert.Equal(t, 262144, *db.EngineConfig.MySQL.SortBufferSize) + assert.Equal(t, "ANSI,TRADITIONAL", *db.EngineConfig.MySQL.SQLMode) + assert.Equal(t, true, *db.EngineConfig.MySQL.SQLRequirePrimaryKey) + assert.Equal(t, 16777216, *db.EngineConfig.MySQL.TmpTableSize) + assert.Equal(t, 28800, *db.EngineConfig.MySQL.WaitTimeout) } func TestDatabaseMySQL_Create(t *testing.T) { @@ -103,6 +177,12 @@ func TestDatabaseMySQL_Create(t *testing.T) { Region: "us-east", Type: "g6-dedicated-2", Engine: "mysql", + EngineConfig: &linodego.MySQLDatabaseEngineConfig{ + MySQL: &linodego.MySQLDatabaseEngineConfigMySQL{ + ConnectTimeout: linodego.Pointer(20), + }, + ServiceLog: linodego.Pointer(false), + }, } base.MockPost("databases/mysql/instances", fixtureData) @@ -126,6 +206,40 @@ func TestDatabaseMySQL_Create(t *testing.T) { assert.Equal(t, 0, db.Updates.HourOfDay) assert.Equal(t, 2, db.UsedDiskSizeGB) assert.Equal(t, "8.0.26", db.Version) + + assert.Equal(t, 600, *db.EngineConfig.BinlogRetentionPeriod) + assert.Equal(t, false, *db.EngineConfig.ServiceLog) + assert.Equal(t, 20, *db.EngineConfig.MySQL.ConnectTimeout) + assert.Equal(t, "+03:00", *db.EngineConfig.MySQL.DefaultTimeZone) + assert.Equal(t, float64(1024), *db.EngineConfig.MySQL.GroupConcatMaxLen) + assert.Equal(t, 86400, *db.EngineConfig.MySQL.InformationSchemaStatsExpiry) + assert.Equal(t, 30, *db.EngineConfig.MySQL.InnoDBChangeBufferMaxSize) + assert.Equal(t, 0, *db.EngineConfig.MySQL.InnoDBFlushNeighbors) + assert.Equal(t, 3, *db.EngineConfig.MySQL.InnoDBFTMinTokenSize) + assert.Equal(t, "db_name/table_name", *db.EngineConfig.MySQL.InnoDBFTServerStopwordTable) + assert.Equal(t, 50, *db.EngineConfig.MySQL.InnoDBLockWaitTimeout) + assert.Equal(t, 16777216, *db.EngineConfig.MySQL.InnoDBLogBufferSize) + assert.Equal(t, 134217728, *db.EngineConfig.MySQL.InnoDBOnlineAlterLogMaxSize) + assert.Equal(t, true, *db.EngineConfig.MySQL.InnoDBPrintAllDeadlocks) + assert.Equal(t, 10, *db.EngineConfig.MySQL.InnoDBReadIOThreads) + assert.Equal(t, true, *db.EngineConfig.MySQL.InnoDBRollbackOnTimeout) + assert.Equal(t, 10, *db.EngineConfig.MySQL.InnoDBThreadConcurrency) + assert.Equal(t, 10, *db.EngineConfig.MySQL.InnoDBWriteIOThreads) + assert.Equal(t, 3600, *db.EngineConfig.MySQL.InteractiveTimeout) + assert.Equal(t, "TempTable", *db.EngineConfig.MySQL.InternalTmpMemStorageEngine) + assert.Equal(t, "INSIGHTS", *db.EngineConfig.MySQL.LogOutput) + assert.Equal(t, float64(10), *db.EngineConfig.MySQL.LongQueryTime) + assert.Equal(t, 67108864, *db.EngineConfig.MySQL.MaxAllowedPacket) + assert.Equal(t, 16777216, *db.EngineConfig.MySQL.MaxHeapTableSize) + assert.Equal(t, 16384, *db.EngineConfig.MySQL.NetBufferLength) + assert.Equal(t, 30, *db.EngineConfig.MySQL.NetReadTimeout) + assert.Equal(t, 30, *db.EngineConfig.MySQL.NetWriteTimeout) + assert.Equal(t, true, *db.EngineConfig.MySQL.SlowQueryLog) + assert.Equal(t, 262144, *db.EngineConfig.MySQL.SortBufferSize) + assert.Equal(t, "ANSI,TRADITIONAL", *db.EngineConfig.MySQL.SQLMode) + assert.Equal(t, true, *db.EngineConfig.MySQL.SQLRequirePrimaryKey) + assert.Equal(t, 16777216, *db.EngineConfig.MySQL.TmpTableSize) + assert.Equal(t, 28800, *db.EngineConfig.MySQL.WaitTimeout) } func TestDatabaseMySQL_Delete(t *testing.T) { @@ -212,3 +326,275 @@ func TestDatabaseMySQL_Resume(t *testing.T) { t.Fatal(err) } } + +func TestDatabaseMySQLConfig_Get(t *testing.T) { + fixtureData, err := fixtures.GetFixture("mysql_database_config_get") + assert.NoError(t, err) + + var base ClientBaseCase + base.SetUp(t) + defer base.TearDown(t) + + base.MockGet("databases/mysql/config", fixtureData) + + config, err := base.Client.GetMySQLDatabaseConfig(context.Background()) + assert.NoError(t, err) + + assert.Equal(t, "The number of seconds that the mysqld server waits for a connect packet before responding with Bad handshake", + config.MySQL.ConnectTimeout.Description) + assert.Equal(t, 10, config.MySQL.ConnectTimeout.Example) + assert.Equal(t, 3600, config.MySQL.ConnectTimeout.Maximum) + assert.Equal(t, 2, config.MySQL.ConnectTimeout.Minimum) + assert.False(t, config.MySQL.ConnectTimeout.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.ConnectTimeout.Type) + + assert.Equal(t, "Default server time zone as an offset from UTC (from -12:00 to +12:00), a time zone name, or 'SYSTEM' to use the MySQL server default.", + config.MySQL.DefaultTimeZone.Description) + assert.Equal(t, "+03:00", config.MySQL.DefaultTimeZone.Example) + assert.Equal(t, 100, config.MySQL.DefaultTimeZone.MaxLength) + assert.Equal(t, 2, config.MySQL.DefaultTimeZone.MinLength) + assert.Equal(t, "^([-+][\\d:]*|[\\w/]*)$", config.MySQL.DefaultTimeZone.Pattern) + assert.False(t, config.MySQL.DefaultTimeZone.RequiresRestart) + assert.Equal(t, "string", config.MySQL.DefaultTimeZone.Type) + + assert.Equal(t, "The maximum permitted result length in bytes for the GROUP_CONCAT() function.", + config.MySQL.GroupConcatMaxLen.Description) + assert.Equal(t, float64(1024), config.MySQL.GroupConcatMaxLen.Example) + assert.Equal(t, float64(18446744073709551600), config.MySQL.GroupConcatMaxLen.Maximum) + assert.Equal(t, float64(4), config.MySQL.GroupConcatMaxLen.Minimum) + assert.False(t, config.MySQL.GroupConcatMaxLen.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.GroupConcatMaxLen.Type) + + assert.Equal(t, "The time, in seconds, before cached statistics expire", + config.MySQL.InformationSchemaStatsExpiry.Description) + assert.Equal(t, 86400, config.MySQL.InformationSchemaStatsExpiry.Example) + assert.Equal(t, 31536000, config.MySQL.InformationSchemaStatsExpiry.Maximum) + assert.Equal(t, 900, config.MySQL.InformationSchemaStatsExpiry.Minimum) + assert.False(t, config.MySQL.InformationSchemaStatsExpiry.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InformationSchemaStatsExpiry.Type) + + assert.Equal(t, "Maximum size for the InnoDB change buffer, as a percentage of the total size of the buffer pool. Default is 25", + config.MySQL.InnoDBChangeBufferMaxSize.Description) + assert.Equal(t, 30, config.MySQL.InnoDBChangeBufferMaxSize.Example) + assert.Equal(t, 50, config.MySQL.InnoDBChangeBufferMaxSize.Maximum) + assert.Equal(t, 0, config.MySQL.InnoDBChangeBufferMaxSize.Minimum) + assert.False(t, config.MySQL.InnoDBChangeBufferMaxSize.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InnoDBChangeBufferMaxSize.Type) + + assert.Equal(t, "Specifies whether flushing a page from the InnoDB buffer pool also flushes other dirty pages in the same extent (default is 1): 0 - dirty pages in the same extent are not flushed, 1 - flush contiguous dirty pages in the same extent, 2 - flush dirty pages in the same extent", + config.MySQL.InnoDBFlushNeighbors.Description) + assert.Equal(t, 0, config.MySQL.InnoDBFlushNeighbors.Example) + assert.Equal(t, 2, config.MySQL.InnoDBFlushNeighbors.Maximum) + assert.Equal(t, 0, config.MySQL.InnoDBFlushNeighbors.Minimum) + assert.False(t, config.MySQL.InnoDBFlushNeighbors.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InnoDBFlushNeighbors.Type) + + assert.Equal(t, "Minimum length of words that are stored in an InnoDB FULLTEXT index. Changing this parameter will lead to a restart of the MySQL service.", + config.MySQL.InnoDBFTMinTokenSize.Description) + assert.Equal(t, 3, config.MySQL.InnoDBFTMinTokenSize.Example) + assert.Equal(t, 16, config.MySQL.InnoDBFTMinTokenSize.Maximum) + assert.Equal(t, 0, config.MySQL.InnoDBFTMinTokenSize.Minimum) + assert.True(t, config.MySQL.InnoDBFTMinTokenSize.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InnoDBFTMinTokenSize.Type) + + assert.Equal(t, "This option is used to specify your own InnoDB FULLTEXT index stopword list for all InnoDB tables.", + config.MySQL.InnoDBFTServerStopwordTable.Description) + assert.Equal(t, "db_name/table_name", config.MySQL.InnoDBFTServerStopwordTable.Example) + assert.Equal(t, 1024, config.MySQL.InnoDBFTServerStopwordTable.MaxLength) + assert.Equal(t, "^.+/.+$", config.MySQL.InnoDBFTServerStopwordTable.Pattern) + assert.False(t, config.MySQL.InnoDBFTServerStopwordTable.RequiresRestart) + assert.Equal(t, []string{"null", "string"}, config.MySQL.InnoDBFTServerStopwordTable.Type) + + assert.Equal(t, "The length of time in seconds an InnoDB transaction waits for a row lock before giving up. Default is 120.", + config.MySQL.InnoDBLockWaitTimeout.Description) + assert.Equal(t, 50, config.MySQL.InnoDBLockWaitTimeout.Example) + assert.Equal(t, 3600, config.MySQL.InnoDBLockWaitTimeout.Maximum) + assert.Equal(t, 1, config.MySQL.InnoDBLockWaitTimeout.Minimum) + assert.False(t, config.MySQL.InnoDBLockWaitTimeout.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InnoDBLockWaitTimeout.Type) + + assert.Equal(t, "The size in bytes of the buffer that InnoDB uses to write to the log files on disk.", + config.MySQL.InnoDBLogBufferSize.Description) + assert.Equal(t, 16777216, config.MySQL.InnoDBLogBufferSize.Example) + assert.Equal(t, 4294967295, config.MySQL.InnoDBLogBufferSize.Maximum) + assert.Equal(t, 1048576, config.MySQL.InnoDBLogBufferSize.Minimum) + assert.False(t, config.MySQL.InnoDBLogBufferSize.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InnoDBLogBufferSize.Type) + + assert.Equal(t, "The upper limit in bytes on the size of the temporary log files used during online DDL operations for InnoDB tables.", + config.MySQL.InnoDBOnlineAlterLogMaxSize.Description) + assert.Equal(t, 134217728, config.MySQL.InnoDBOnlineAlterLogMaxSize.Example) + assert.Equal(t, 1099511627776, config.MySQL.InnoDBOnlineAlterLogMaxSize.Maximum) + assert.Equal(t, 65536, config.MySQL.InnoDBOnlineAlterLogMaxSize.Minimum) + assert.False(t, config.MySQL.InnoDBOnlineAlterLogMaxSize.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InnoDBOnlineAlterLogMaxSize.Type) + + assert.Equal(t, "When enabled, information about all deadlocks in InnoDB user transactions is recorded in the error log. Disabled by default.", + config.MySQL.InnoDBPrintAllDeadlocks.Description) + assert.Equal(t, true, config.MySQL.InnoDBPrintAllDeadlocks.Example) + assert.False(t, config.MySQL.InnoDBPrintAllDeadlocks.RequiresRestart) + assert.Equal(t, "boolean", config.MySQL.InnoDBPrintAllDeadlocks.Type) + + assert.Equal(t, "The number of I/O threads for read operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + config.MySQL.InnoDBReadIOThreads.Description) + assert.Equal(t, 10, config.MySQL.InnoDBReadIOThreads.Example) + assert.Equal(t, 64, config.MySQL.InnoDBReadIOThreads.Maximum) + assert.Equal(t, 1, config.MySQL.InnoDBReadIOThreads.Minimum) + assert.True(t, config.MySQL.InnoDBReadIOThreads.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InnoDBReadIOThreads.Type) + + assert.Equal(t, "When enabled a transaction timeout causes InnoDB to abort and roll back the entire transaction. Changing this parameter will lead to a restart of the MySQL service.", + config.MySQL.InnoDBRollbackOnTimeout.Description) + assert.Equal(t, true, config.MySQL.InnoDBRollbackOnTimeout.Example) + assert.True(t, config.MySQL.InnoDBRollbackOnTimeout.RequiresRestart) + assert.Equal(t, "boolean", config.MySQL.InnoDBRollbackOnTimeout.Type) + + assert.Equal(t, "Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit)", + config.MySQL.InnoDBThreadConcurrency.Description) + assert.Equal(t, 10, config.MySQL.InnoDBThreadConcurrency.Example) + assert.Equal(t, 1000, config.MySQL.InnoDBThreadConcurrency.Maximum) + assert.Equal(t, 0, config.MySQL.InnoDBThreadConcurrency.Minimum) + assert.False(t, config.MySQL.InnoDBThreadConcurrency.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InnoDBThreadConcurrency.Type) + + assert.Equal(t, "The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service.", + config.MySQL.InnoDBWriteIOThreads.Description) + assert.Equal(t, 10, config.MySQL.InnoDBWriteIOThreads.Example) + assert.Equal(t, 64, config.MySQL.InnoDBWriteIOThreads.Maximum) + assert.Equal(t, 1, config.MySQL.InnoDBWriteIOThreads.Minimum) + assert.True(t, config.MySQL.InnoDBWriteIOThreads.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InnoDBWriteIOThreads.Type) + + assert.Equal(t, "The number of seconds the server waits for activity on an interactive connection before closing it.", + config.MySQL.InteractiveTimeout.Description) + assert.Equal(t, 3600, config.MySQL.InteractiveTimeout.Example) + assert.Equal(t, 604800, config.MySQL.InteractiveTimeout.Maximum) + assert.Equal(t, 30, config.MySQL.InteractiveTimeout.Minimum) + assert.False(t, config.MySQL.InteractiveTimeout.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.InteractiveTimeout.Type) + + assert.Equal(t, "The storage engine for in-memory internal temporary tables.", + config.MySQL.InternalTmpMemStorageEngine.Description) + assert.Equal(t, "TempTable", config.MySQL.InternalTmpMemStorageEngine.Example) + assert.Equal(t, []string{"TempTable", "MEMORY"}, config.MySQL.InternalTmpMemStorageEngine.Enum) + assert.False(t, config.MySQL.InternalTmpMemStorageEngine.RequiresRestart) + assert.Equal(t, "string", config.MySQL.InternalTmpMemStorageEngine.Type) + + assert.Equal(t, "The slow log output destination when slow_query_log is ON. To enable MySQL AI Insights, choose INSIGHTS. To use MySQL AI Insights and the mysql.slow_log table at the same time, choose INSIGHTS,TABLE. To only use the mysql.slow_log table, choose TABLE. To silence slow logs, choose NONE.", + config.MySQL.LogOutput.Description) + assert.Equal(t, "INSIGHTS", config.MySQL.LogOutput.Example) + assert.Equal(t, []string{ + "INSIGHTS", + "NONE", + "TABLE", + "INSIGHTS,TABLE", + }, config.MySQL.LogOutput.Enum) + assert.False(t, config.MySQL.LogOutput.RequiresRestart) + assert.Equal(t, "string", config.MySQL.LogOutput.Type) + + assert.Equal(t, "The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute.", + config.MySQL.LongQueryTime.Description) + assert.Equal(t, float64(10), config.MySQL.LongQueryTime.Example) + assert.Equal(t, float64(3600), config.MySQL.LongQueryTime.Maximum) + assert.Equal(t, float64(0.0), config.MySQL.LongQueryTime.Minimum) + assert.False(t, config.MySQL.LongQueryTime.RequiresRestart) + assert.Equal(t, "number", config.MySQL.LongQueryTime.Type) + + assert.Equal(t, "Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M)", + config.MySQL.MaxAllowedPacket.Description) + assert.Equal(t, 67108864, config.MySQL.MaxAllowedPacket.Example) + assert.Equal(t, 1073741824, config.MySQL.MaxAllowedPacket.Maximum) + assert.Equal(t, 102400, config.MySQL.MaxAllowedPacket.Minimum) + assert.False(t, config.MySQL.MaxAllowedPacket.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.MaxAllowedPacket.Type) + + assert.Equal(t, "Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M)", + config.MySQL.MaxHeapTableSize.Description) + assert.Equal(t, 16777216, config.MySQL.MaxHeapTableSize.Example) + assert.Equal(t, 1073741824, config.MySQL.MaxHeapTableSize.Maximum) + assert.Equal(t, 1048576, config.MySQL.MaxHeapTableSize.Minimum) + assert.False(t, config.MySQL.MaxHeapTableSize.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.MaxHeapTableSize.Type) + + assert.Equal(t, "Start sizes of connection buffer and result buffer. Default is 16384 (16K). Changing this parameter will lead to a restart of the MySQL service.", + config.MySQL.NetBufferLength.Description) + assert.Equal(t, 16384, config.MySQL.NetBufferLength.Example) + assert.Equal(t, 1048576, config.MySQL.NetBufferLength.Maximum) + assert.Equal(t, 1024, config.MySQL.NetBufferLength.Minimum) + assert.True(t, config.MySQL.NetBufferLength.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.NetBufferLength.Type) + + assert.Equal(t, "The number of seconds to wait for more data from a connection before aborting the read.", + config.MySQL.NetReadTimeout.Description) + assert.Equal(t, 30, config.MySQL.NetReadTimeout.Example) + assert.Equal(t, 3600, config.MySQL.NetReadTimeout.Maximum) + assert.Equal(t, 1, config.MySQL.NetReadTimeout.Minimum) + assert.False(t, config.MySQL.NetReadTimeout.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.NetReadTimeout.Type) + + assert.Equal(t, "The number of seconds to wait for a block to be written to a connection before aborting the write.", + config.MySQL.NetWriteTimeout.Description) + assert.Equal(t, 30, config.MySQL.NetWriteTimeout.Example) + assert.Equal(t, 3600, config.MySQL.NetWriteTimeout.Maximum) + assert.Equal(t, 1, config.MySQL.NetWriteTimeout.Minimum) + assert.False(t, config.MySQL.NetWriteTimeout.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.NetWriteTimeout.Type) + + assert.Equal(t, "Slow query log enables capturing of slow queries. Setting slow_query_log to false also truncates the mysql.slow_log table.", + config.MySQL.SlowQueryLog.Description) + assert.Equal(t, true, config.MySQL.SlowQueryLog.Example) + assert.False(t, config.MySQL.SlowQueryLog.RequiresRestart) + assert.Equal(t, "boolean", config.MySQL.SlowQueryLog.Type) + + assert.Equal(t, "Sort buffer size in bytes for ORDER BY optimization. Default is 262144 (256K)", + config.MySQL.SortBufferSize.Description) + assert.Equal(t, 262144, config.MySQL.SortBufferSize.Example) + assert.Equal(t, 1073741824, config.MySQL.SortBufferSize.Maximum) + assert.Equal(t, 32768, config.MySQL.SortBufferSize.Minimum) + assert.False(t, config.MySQL.SortBufferSize.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.SortBufferSize.Type) + + assert.Equal(t, "Global SQL mode. Set to empty to use MySQL server defaults. When creating a new service and not setting this field Akamai default SQL mode (strict, SQL standard compliant) will be assigned.", + config.MySQL.SQLMode.Description) + assert.Equal(t, "ANSI,TRADITIONAL", config.MySQL.SQLMode.Example) + assert.Equal(t, 1024, config.MySQL.SQLMode.MaxLength) + assert.Equal(t, "^[A-Z_]*(,[A-Z_]+)*$", config.MySQL.SQLMode.Pattern) + assert.False(t, config.MySQL.SQLMode.RequiresRestart) + assert.Equal(t, "string", config.MySQL.SQLMode.Type) + + assert.Equal(t, "Require primary key to be defined for new tables or old tables modified with ALTER TABLE and fail if missing. It is recommended to always have primary keys because various functionality may break if any large table is missing them.", + config.MySQL.SQLRequirePrimaryKey.Description) + assert.Equal(t, true, config.MySQL.SQLRequirePrimaryKey.Example) + assert.False(t, config.MySQL.SQLRequirePrimaryKey.RequiresRestart) + assert.Equal(t, "boolean", config.MySQL.SQLRequirePrimaryKey.Type) + + assert.Equal(t, "Limits the size of internal in-memory tables. Also set max_heap_table_size. Default is 16777216 (16M)", + config.MySQL.TmpTableSize.Description) + assert.Equal(t, 16777216, config.MySQL.TmpTableSize.Example) + assert.Equal(t, 1073741824, config.MySQL.TmpTableSize.Maximum) + assert.Equal(t, 1048576, config.MySQL.TmpTableSize.Minimum) + assert.False(t, config.MySQL.TmpTableSize.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.TmpTableSize.Type) + + assert.Equal(t, "The number of seconds the server waits for activity on a noninteractive connection before closing it.", + config.MySQL.WaitTimeout.Description) + assert.Equal(t, 28800, config.MySQL.WaitTimeout.Example) + assert.Equal(t, 2147483, config.MySQL.WaitTimeout.Maximum) + assert.Equal(t, 1, config.MySQL.WaitTimeout.Minimum) + assert.False(t, config.MySQL.WaitTimeout.RequiresRestart) + assert.Equal(t, "integer", config.MySQL.WaitTimeout.Type) + + assert.Equal(t, "The minimum amount of time in seconds to keep binlog entries before deletion. This may be extended for services that require binlog entries for longer than the default for example if using the MySQL Debezium Kafka connector.", + config.BinlogRetentionPeriod.Description) + assert.Equal(t, 600, config.BinlogRetentionPeriod.Example) + assert.Equal(t, 86400, config.BinlogRetentionPeriod.Maximum) + assert.Equal(t, 600, config.BinlogRetentionPeriod.Minimum) + assert.False(t, config.BinlogRetentionPeriod.RequiresRestart) + assert.Equal(t, "integer", config.BinlogRetentionPeriod.Type) + + assert.Equal(t, "Store logs for the service so that they are available in the HTTP API and console.", + config.ServiceLog.Description) + assert.Equal(t, true, config.ServiceLog.Example) + assert.False(t, config.ServiceLog.RequiresRestart) + assert.Equal(t, []string{"boolean", "null"}, config.ServiceLog.Type) +} diff --git a/test/unit/postgres_test.go b/test/unit/postgres_test.go index 31c23c13a..11e5a3c65 100644 --- a/test/unit/postgres_test.go +++ b/test/unit/postgres_test.go @@ -52,6 +52,63 @@ func TestDatabasePostgreSQL_Get(t *testing.T) { assert.Equal(t, 0, db.Updates.HourOfDay) assert.Equal(t, 2, db.UsedDiskSizeGB) assert.Equal(t, "13.2", db.Version) + + assert.Equal(t, true, *db.EngineConfig.PGStatMonitorEnable) + assert.Equal(t, int64(1000), *db.EngineConfig.PGLookout.MaxFailoverReplicationTimeLag) + assert.Equal(t, true, *db.EngineConfig.ServiceLog) + assert.Equal(t, 41.5, *db.EngineConfig.SharedBuffersPercentage) + assert.Equal(t, "off", *db.EngineConfig.SynchronousReplication) + assert.Equal(t, 4, *db.EngineConfig.WorkMem) + assert.Equal(t, 0.5, *db.EngineConfig.PG.AutovacuumAnalyzeScaleFactor) + assert.Equal(t, int32(100), *db.EngineConfig.PG.AutovacuumAnalyzeThreshold) + assert.Equal(t, 200000000, *db.EngineConfig.PG.AutovacuumFreezeMaxAge) + assert.Equal(t, 10, *db.EngineConfig.PG.AutovacuumMaxWorkers) + assert.Equal(t, 100, *db.EngineConfig.PG.AutovacuumNaptime) + assert.Equal(t, 50, *db.EngineConfig.PG.AutovacuumVacuumCostDelay) + assert.Equal(t, 100, *db.EngineConfig.PG.AutovacuumVacuumCostLimit) + assert.Equal(t, 0.5, *db.EngineConfig.PG.AutovacuumVacuumScaleFactor) + assert.Equal(t, int32(100), *db.EngineConfig.PG.AutovacuumVacuumThreshold) + assert.Equal(t, 200, *db.EngineConfig.PG.BGWriterDelay) + assert.Equal(t, 512, *db.EngineConfig.PG.BGWriterFlushAfter) + assert.Equal(t, 100, *db.EngineConfig.PG.BGWriterLRUMaxPages) + assert.Equal(t, 2.0, *db.EngineConfig.PG.BGWriterLRUMultiplier) + assert.Equal(t, 1000, *db.EngineConfig.PG.DeadlockTimeout) + assert.Equal(t, "lz4", *db.EngineConfig.PG.DefaultToastCompression) + assert.Equal(t, 100, *db.EngineConfig.PG.IdleInTransactionSessionTimeout) + assert.Equal(t, true, *db.EngineConfig.PG.JIT) + assert.Equal(t, int32(100), *db.EngineConfig.PG.LogAutovacuumMinDuration) + assert.Equal(t, "DEFAULT", *db.EngineConfig.PG.LogErrorVerbosity) + assert.Equal(t, "'pid=%p,user=%u,db=%d,app=%a,client=%h '", *db.EngineConfig.PG.LogLinePrefix) + assert.Equal(t, 100, *db.EngineConfig.PG.LogMinDurationStatement) + assert.Equal(t, int32(100), *db.EngineConfig.PG.LogTempFiles) + assert.Equal(t, 100, *db.EngineConfig.PG.MaxFilesPerProcess) + assert.Equal(t, 100, *db.EngineConfig.PG.MaxLocksPerTransaction) + assert.Equal(t, 32, *db.EngineConfig.PG.MaxLogicalReplicationWorkers) + assert.Equal(t, 64, *db.EngineConfig.PG.MaxParallelWorkers) + assert.Equal(t, 64, *db.EngineConfig.PG.MaxParallelWorkersPerGather) + assert.Equal(t, 1000, *db.EngineConfig.PG.MaxPredLocksPerTransaction) + assert.Equal(t, 5000, *db.EngineConfig.PG.MaxPreparedTransactions) + assert.Equal(t, 32, *db.EngineConfig.PG.MaxReplicationSlots) + assert.Equal(t, int32(100), *db.EngineConfig.PG.MaxSlotWALKeepSize) + assert.Equal(t, 3507152, *db.EngineConfig.PG.MaxStackDepth) + assert.Equal(t, 1000, *db.EngineConfig.PG.MaxStandbyArchiveDelay) + assert.Equal(t, 1000, *db.EngineConfig.PG.MaxStandbyStreamingDelay) + assert.Equal(t, 32, *db.EngineConfig.PG.MaxWALSenders) + assert.Equal(t, 64, *db.EngineConfig.PG.MaxWorkerProcesses) + assert.Equal(t, "scram-sha-256", *db.EngineConfig.PG.PasswordEncryption) + assert.Equal(t, 3600, *db.EngineConfig.PG.PGPartmanBGWInterval) + assert.Equal(t, "myrolename", *db.EngineConfig.PG.PGPartmanBGWRole) + assert.Equal(t, false, *db.EngineConfig.PG.PGStatMonitorPGSMEnableQueryPlan) + assert.Equal(t, 10, *db.EngineConfig.PG.PGStatMonitorPGSMMaxBuckets) + assert.Equal(t, "top", *db.EngineConfig.PG.PGStatStatementsTrack) + assert.Equal(t, int32(5000000), *db.EngineConfig.PG.TempFileLimit) + assert.Equal(t, "Europe/Helsinki", *db.EngineConfig.PG.Timezone) + assert.Equal(t, 1024, *db.EngineConfig.PG.TrackActivityQuerySize) + assert.Equal(t, "off", *db.EngineConfig.PG.TrackCommitTimestamp) + assert.Equal(t, "all", *db.EngineConfig.PG.TrackFunctions) + assert.Equal(t, "off", *db.EngineConfig.PG.TrackIOTiming) + assert.Equal(t, 60000, *db.EngineConfig.PG.WALSenderTimeout) + assert.Equal(t, 50, *db.EngineConfig.PG.WALWriterDelay) } func TestDatabasePostgreSQL_Update(t *testing.T) { @@ -64,6 +121,12 @@ func TestDatabasePostgreSQL_Update(t *testing.T) { requestData := linodego.PostgresUpdateOptions{ Label: "example-db-updated", + EngineConfig: &linodego.PostgresDatabaseEngineConfig{ + PG: &linodego.PostgresDatabaseEngineConfigPG{ + AutovacuumFreezeMaxAge: linodego.Pointer(400000000), + }, + ServiceLog: linodego.Pointer(false), + }, } base.MockPut("databases/postgresql/instances/123", fixtureData) @@ -87,6 +150,63 @@ func TestDatabasePostgreSQL_Update(t *testing.T) { assert.Equal(t, 0, db.Updates.HourOfDay) assert.Equal(t, 2, db.UsedDiskSizeGB) assert.Equal(t, "13.2", db.Version) + + assert.Equal(t, true, *db.EngineConfig.PGStatMonitorEnable) + assert.Equal(t, int64(1000), *db.EngineConfig.PGLookout.MaxFailoverReplicationTimeLag) + assert.Equal(t, false, *db.EngineConfig.ServiceLog) + assert.Equal(t, 41.5, *db.EngineConfig.SharedBuffersPercentage) + assert.Equal(t, "off", *db.EngineConfig.SynchronousReplication) + assert.Equal(t, 4, *db.EngineConfig.WorkMem) + assert.Equal(t, 0.5, *db.EngineConfig.PG.AutovacuumAnalyzeScaleFactor) + assert.Equal(t, int32(100), *db.EngineConfig.PG.AutovacuumAnalyzeThreshold) + assert.Equal(t, 400000000, *db.EngineConfig.PG.AutovacuumFreezeMaxAge) + assert.Equal(t, 10, *db.EngineConfig.PG.AutovacuumMaxWorkers) + assert.Equal(t, 100, *db.EngineConfig.PG.AutovacuumNaptime) + assert.Equal(t, 50, *db.EngineConfig.PG.AutovacuumVacuumCostDelay) + assert.Equal(t, 100, *db.EngineConfig.PG.AutovacuumVacuumCostLimit) + assert.Equal(t, 0.5, *db.EngineConfig.PG.AutovacuumVacuumScaleFactor) + assert.Equal(t, int32(100), *db.EngineConfig.PG.AutovacuumVacuumThreshold) + assert.Equal(t, 200, *db.EngineConfig.PG.BGWriterDelay) + assert.Equal(t, 512, *db.EngineConfig.PG.BGWriterFlushAfter) + assert.Equal(t, 100, *db.EngineConfig.PG.BGWriterLRUMaxPages) + assert.Equal(t, 2.0, *db.EngineConfig.PG.BGWriterLRUMultiplier) + assert.Equal(t, 1000, *db.EngineConfig.PG.DeadlockTimeout) + assert.Equal(t, "lz4", *db.EngineConfig.PG.DefaultToastCompression) + assert.Equal(t, 100, *db.EngineConfig.PG.IdleInTransactionSessionTimeout) + assert.Equal(t, true, *db.EngineConfig.PG.JIT) + assert.Equal(t, int32(100), *db.EngineConfig.PG.LogAutovacuumMinDuration) + assert.Equal(t, "DEFAULT", *db.EngineConfig.PG.LogErrorVerbosity) + assert.Equal(t, "'pid=%p,user=%u,db=%d,app=%a,client=%h '", *db.EngineConfig.PG.LogLinePrefix) + assert.Equal(t, 100, *db.EngineConfig.PG.LogMinDurationStatement) + assert.Equal(t, int32(100), *db.EngineConfig.PG.LogTempFiles) + assert.Equal(t, 100, *db.EngineConfig.PG.MaxFilesPerProcess) + assert.Equal(t, 100, *db.EngineConfig.PG.MaxLocksPerTransaction) + assert.Equal(t, 32, *db.EngineConfig.PG.MaxLogicalReplicationWorkers) + assert.Equal(t, 64, *db.EngineConfig.PG.MaxParallelWorkers) + assert.Equal(t, 64, *db.EngineConfig.PG.MaxParallelWorkersPerGather) + assert.Equal(t, 1000, *db.EngineConfig.PG.MaxPredLocksPerTransaction) + assert.Equal(t, 5000, *db.EngineConfig.PG.MaxPreparedTransactions) + assert.Equal(t, 32, *db.EngineConfig.PG.MaxReplicationSlots) + assert.Equal(t, int32(100), *db.EngineConfig.PG.MaxSlotWALKeepSize) + assert.Equal(t, 3507152, *db.EngineConfig.PG.MaxStackDepth) + assert.Equal(t, 1000, *db.EngineConfig.PG.MaxStandbyArchiveDelay) + assert.Equal(t, 1000, *db.EngineConfig.PG.MaxStandbyStreamingDelay) + assert.Equal(t, 32, *db.EngineConfig.PG.MaxWALSenders) + assert.Equal(t, 64, *db.EngineConfig.PG.MaxWorkerProcesses) + assert.Equal(t, "scram-sha-256", *db.EngineConfig.PG.PasswordEncryption) + assert.Equal(t, 3600, *db.EngineConfig.PG.PGPartmanBGWInterval) + assert.Equal(t, "myrolename", *db.EngineConfig.PG.PGPartmanBGWRole) + assert.Equal(t, false, *db.EngineConfig.PG.PGStatMonitorPGSMEnableQueryPlan) + assert.Equal(t, 10, *db.EngineConfig.PG.PGStatMonitorPGSMMaxBuckets) + assert.Equal(t, "top", *db.EngineConfig.PG.PGStatStatementsTrack) + assert.Equal(t, int32(5000000), *db.EngineConfig.PG.TempFileLimit) + assert.Equal(t, "Europe/Helsinki", *db.EngineConfig.PG.Timezone) + assert.Equal(t, 1024, *db.EngineConfig.PG.TrackActivityQuerySize) + assert.Equal(t, "off", *db.EngineConfig.PG.TrackCommitTimestamp) + assert.Equal(t, "all", *db.EngineConfig.PG.TrackFunctions) + assert.Equal(t, "off", *db.EngineConfig.PG.TrackIOTiming) + assert.Equal(t, 60000, *db.EngineConfig.PG.WALSenderTimeout) + assert.Equal(t, 50, *db.EngineConfig.PG.WALWriterDelay) } func TestDatabasePostgreSQL_Create(t *testing.T) { @@ -102,6 +222,12 @@ func TestDatabasePostgreSQL_Create(t *testing.T) { Region: "us-east", Type: "g6-dedicated-2", Engine: "postgresql", + EngineConfig: &linodego.PostgresDatabaseEngineConfig{ + PG: &linodego.PostgresDatabaseEngineConfigPG{ + AutovacuumFreezeMaxAge: linodego.Pointer(400000000), + }, + ServiceLog: linodego.Pointer(false), + }, } base.MockPost("databases/postgresql/instances", fixtureData) @@ -125,6 +251,63 @@ func TestDatabasePostgreSQL_Create(t *testing.T) { assert.Equal(t, 0, db.Updates.HourOfDay) assert.Equal(t, 2, db.UsedDiskSizeGB) assert.Equal(t, "13.2", db.Version) + + assert.Equal(t, true, *db.EngineConfig.PGStatMonitorEnable) + assert.Equal(t, int64(1000), *db.EngineConfig.PGLookout.MaxFailoverReplicationTimeLag) + assert.Equal(t, false, *db.EngineConfig.ServiceLog) + assert.Equal(t, 41.5, *db.EngineConfig.SharedBuffersPercentage) + assert.Equal(t, "off", *db.EngineConfig.SynchronousReplication) + assert.Equal(t, 4, *db.EngineConfig.WorkMem) + assert.Equal(t, 0.5, *db.EngineConfig.PG.AutovacuumAnalyzeScaleFactor) + assert.Equal(t, int32(100), *db.EngineConfig.PG.AutovacuumAnalyzeThreshold) + assert.Equal(t, 400000000, *db.EngineConfig.PG.AutovacuumFreezeMaxAge) + assert.Equal(t, 10, *db.EngineConfig.PG.AutovacuumMaxWorkers) + assert.Equal(t, 100, *db.EngineConfig.PG.AutovacuumNaptime) + assert.Equal(t, 50, *db.EngineConfig.PG.AutovacuumVacuumCostDelay) + assert.Equal(t, 100, *db.EngineConfig.PG.AutovacuumVacuumCostLimit) + assert.Equal(t, 0.5, *db.EngineConfig.PG.AutovacuumVacuumScaleFactor) + assert.Equal(t, int32(100), *db.EngineConfig.PG.AutovacuumVacuumThreshold) + assert.Equal(t, 200, *db.EngineConfig.PG.BGWriterDelay) + assert.Equal(t, 512, *db.EngineConfig.PG.BGWriterFlushAfter) + assert.Equal(t, 100, *db.EngineConfig.PG.BGWriterLRUMaxPages) + assert.Equal(t, 2.0, *db.EngineConfig.PG.BGWriterLRUMultiplier) + assert.Equal(t, 1000, *db.EngineConfig.PG.DeadlockTimeout) + assert.Equal(t, "lz4", *db.EngineConfig.PG.DefaultToastCompression) + assert.Equal(t, 100, *db.EngineConfig.PG.IdleInTransactionSessionTimeout) + assert.Equal(t, true, *db.EngineConfig.PG.JIT) + assert.Equal(t, int32(100), *db.EngineConfig.PG.LogAutovacuumMinDuration) + assert.Equal(t, "DEFAULT", *db.EngineConfig.PG.LogErrorVerbosity) + assert.Equal(t, "'pid=%p,user=%u,db=%d,app=%a,client=%h '", *db.EngineConfig.PG.LogLinePrefix) + assert.Equal(t, 100, *db.EngineConfig.PG.LogMinDurationStatement) + assert.Equal(t, int32(100), *db.EngineConfig.PG.LogTempFiles) + assert.Equal(t, 100, *db.EngineConfig.PG.MaxFilesPerProcess) + assert.Equal(t, 100, *db.EngineConfig.PG.MaxLocksPerTransaction) + assert.Equal(t, 32, *db.EngineConfig.PG.MaxLogicalReplicationWorkers) + assert.Equal(t, 64, *db.EngineConfig.PG.MaxParallelWorkers) + assert.Equal(t, 64, *db.EngineConfig.PG.MaxParallelWorkersPerGather) + assert.Equal(t, 1000, *db.EngineConfig.PG.MaxPredLocksPerTransaction) + assert.Equal(t, 5000, *db.EngineConfig.PG.MaxPreparedTransactions) + assert.Equal(t, 32, *db.EngineConfig.PG.MaxReplicationSlots) + assert.Equal(t, int32(100), *db.EngineConfig.PG.MaxSlotWALKeepSize) + assert.Equal(t, 3507152, *db.EngineConfig.PG.MaxStackDepth) + assert.Equal(t, 1000, *db.EngineConfig.PG.MaxStandbyArchiveDelay) + assert.Equal(t, 1000, *db.EngineConfig.PG.MaxStandbyStreamingDelay) + assert.Equal(t, 32, *db.EngineConfig.PG.MaxWALSenders) + assert.Equal(t, 64, *db.EngineConfig.PG.MaxWorkerProcesses) + assert.Equal(t, "scram-sha-256", *db.EngineConfig.PG.PasswordEncryption) + assert.Equal(t, 3600, *db.EngineConfig.PG.PGPartmanBGWInterval) + assert.Equal(t, "myrolename", *db.EngineConfig.PG.PGPartmanBGWRole) + assert.Equal(t, false, *db.EngineConfig.PG.PGStatMonitorPGSMEnableQueryPlan) + assert.Equal(t, 10, *db.EngineConfig.PG.PGStatMonitorPGSMMaxBuckets) + assert.Equal(t, "top", *db.EngineConfig.PG.PGStatStatementsTrack) + assert.Equal(t, int32(5000000), *db.EngineConfig.PG.TempFileLimit) + assert.Equal(t, "Europe/Helsinki", *db.EngineConfig.PG.Timezone) + assert.Equal(t, 1024, *db.EngineConfig.PG.TrackActivityQuerySize) + assert.Equal(t, "off", *db.EngineConfig.PG.TrackCommitTimestamp) + assert.Equal(t, "all", *db.EngineConfig.PG.TrackFunctions) + assert.Equal(t, "off", *db.EngineConfig.PG.TrackIOTiming) + assert.Equal(t, 60000, *db.EngineConfig.PG.WALSenderTimeout) + assert.Equal(t, 50, *db.EngineConfig.PG.WALWriterDelay) } func TestDatabasePostgreSQL_Delete(t *testing.T) { @@ -211,3 +394,422 @@ func TestDatabasePostgreSQL_Resume(t *testing.T) { t.Fatal(err) } } + +func TestDatabasePostgreSQLConfig_Get(t *testing.T) { + fixtureData, err := fixtures.GetFixture("postgresql_database_config_get") + assert.NoError(t, err) + + var base ClientBaseCase + base.SetUp(t) + defer base.TearDown(t) + + base.MockGet("databases/postgresql/config", fixtureData) + + config, err := base.Client.GetPostgresDatabaseConfig(context.Background()) + assert.NoError(t, err) + + assert.Equal(t, "Specifies a fraction of the table size to add to autovacuum_analyze_threshold when "+ + "deciding whether to trigger an ANALYZE. The default is 0.2 (20% of table size)", + config.PG.AutovacuumAnalyzeScaleFactor.Description) + assert.Equal(t, 1.0, config.PG.AutovacuumAnalyzeScaleFactor.Maximum) + assert.Equal(t, 0.0, config.PG.AutovacuumAnalyzeScaleFactor.Minimum) + assert.False(t, config.PG.AutovacuumAnalyzeScaleFactor.RequiresRestart) + assert.Equal(t, "number", config.PG.AutovacuumAnalyzeScaleFactor.Type) + + assert.Equal(t, "Specifies the minimum number of inserted, updated or deleted tuples needed to trigger an ANALYZE in any one table. The default is 50 tuples.", + config.PG.AutovacuumAnalyzeThreshold.Description) + assert.Equal(t, int32(2147483647), config.PG.AutovacuumAnalyzeThreshold.Maximum) + assert.Equal(t, int32(0), config.PG.AutovacuumAnalyzeThreshold.Minimum) + assert.False(t, config.PG.AutovacuumAnalyzeThreshold.RequiresRestart) + assert.Equal(t, "integer", config.PG.AutovacuumAnalyzeThreshold.Type) + + assert.Equal(t, "Specifies the maximum age (in transactions) that a table's pg_class.relfrozenxid field can attain before a VACUUM operation is forced to prevent transaction ID wraparound within the table. Note that the system will launch autovacuum processes to prevent wraparound even when autovacuum is otherwise disabled. This parameter will cause the server to be restarted.", + config.PG.AutovacuumFreezeMaxAge.Description) + assert.Equal(t, 200000000, config.PG.AutovacuumFreezeMaxAge.Example) + assert.Equal(t, 1500000000, config.PG.AutovacuumFreezeMaxAge.Maximum) + assert.Equal(t, 200000000, config.PG.AutovacuumFreezeMaxAge.Minimum) + assert.True(t, config.PG.AutovacuumFreezeMaxAge.RequiresRestart) + assert.Equal(t, "integer", config.PG.AutovacuumFreezeMaxAge.Type) + + assert.Equal(t, "Specifies the maximum number of autovacuum processes (other than the autovacuum launcher) that may be running at any one time. The default is three. This parameter can only be set at server start.", + config.PG.AutovacuumMaxWorkers.Description) + assert.Equal(t, 20, config.PG.AutovacuumMaxWorkers.Maximum) + assert.Equal(t, 1, config.PG.AutovacuumMaxWorkers.Minimum) + assert.False(t, config.PG.AutovacuumMaxWorkers.RequiresRestart) + assert.Equal(t, "integer", config.PG.AutovacuumMaxWorkers.Type) + + assert.Equal(t, "Specifies the minimum delay between autovacuum runs on any given database. The delay is measured in seconds, and the default is one minute", + config.PG.AutovacuumNaptime.Description) + assert.Equal(t, 86400, config.PG.AutovacuumNaptime.Maximum) + assert.Equal(t, 1, config.PG.AutovacuumNaptime.Minimum) + assert.False(t, config.PG.AutovacuumNaptime.RequiresRestart) + assert.Equal(t, "integer", config.PG.AutovacuumNaptime.Type) + + assert.Equal(t, "Specifies the cost delay value that will be used in automatic VACUUM operations. If -1 is specified, the regular vacuum_cost_delay value will be used. The default value is 20 milliseconds", + config.PG.AutovacuumVacuumCostDelay.Description) + assert.Equal(t, 100, config.PG.AutovacuumVacuumCostDelay.Maximum) + assert.Equal(t, -1, config.PG.AutovacuumVacuumCostDelay.Minimum) + assert.False(t, config.PG.AutovacuumVacuumCostDelay.RequiresRestart) + assert.Equal(t, "integer", config.PG.AutovacuumVacuumCostDelay.Type) + + assert.Equal(t, "Specifies the cost limit value that will be used in automatic VACUUM operations. If -1 is specified (which is the default), the regular vacuum_cost_limit value will be used.", + config.PG.AutovacuumVacuumCostLimit.Description) + assert.Equal(t, 10000, config.PG.AutovacuumVacuumCostLimit.Maximum) + assert.Equal(t, -1, config.PG.AutovacuumVacuumCostLimit.Minimum) + assert.False(t, config.PG.AutovacuumVacuumCostLimit.RequiresRestart) + assert.Equal(t, "integer", config.PG.AutovacuumVacuumCostLimit.Type) + + assert.Equal(t, "Specifies a fraction of the table size to add to autovacuum_vacuum_threshold when deciding whether to trigger a VACUUM. The default is 0.2 (20% of table size)", + config.PG.AutovacuumVacuumScaleFactor.Description) + assert.Equal(t, 1.0, config.PG.AutovacuumVacuumScaleFactor.Maximum) + assert.Equal(t, 0.0, config.PG.AutovacuumVacuumScaleFactor.Minimum) + assert.False(t, config.PG.AutovacuumVacuumScaleFactor.RequiresRestart) + assert.Equal(t, "number", config.PG.AutovacuumVacuumScaleFactor.Type) + + assert.Equal(t, "Specifies the minimum number of updated or deleted tuples needed to trigger a VACUUM in any one table. The default is 50 tuples", + config.PG.AutovacuumVacuumThreshold.Description) + assert.Equal(t, int32(2147483647), config.PG.AutovacuumVacuumThreshold.Maximum) + assert.Equal(t, int32(0), config.PG.AutovacuumVacuumThreshold.Minimum) + assert.False(t, config.PG.AutovacuumVacuumThreshold.RequiresRestart) + assert.Equal(t, "integer", config.PG.AutovacuumVacuumThreshold.Type) + + assert.Equal(t, "Specifies the delay between activity rounds for the background writer in milliseconds. Default is 200.", + config.PG.BGWriterDelay.Description) + assert.Equal(t, 200, config.PG.BGWriterDelay.Example) + assert.Equal(t, 10000, config.PG.BGWriterDelay.Maximum) + assert.Equal(t, 10, config.PG.BGWriterDelay.Minimum) + assert.False(t, config.PG.BGWriterDelay.RequiresRestart) + assert.Equal(t, "integer", config.PG.BGWriterDelay.Type) + + assert.Equal(t, "Whenever more than bgwriter_flush_after bytes have been written by the background writer, attempt to force the OS to issue these writes to the underlying storage. Specified in kilobytes, default is 512. Setting of 0 disables forced writeback.", + config.PG.BGWriterFlushAfter.Description) + assert.Equal(t, 512, config.PG.BGWriterFlushAfter.Example) + assert.Equal(t, 2048, config.PG.BGWriterFlushAfter.Maximum) + assert.Equal(t, 0, config.PG.BGWriterFlushAfter.Minimum) + assert.False(t, config.PG.BGWriterFlushAfter.RequiresRestart) + assert.Equal(t, "integer", config.PG.BGWriterFlushAfter.Type) + + assert.Equal(t, "In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100.", + config.PG.BGWriterLRUMaxPages.Description) + assert.Equal(t, 100, config.PG.BGWriterLRUMaxPages.Example) + assert.Equal(t, 1073741823, config.PG.BGWriterLRUMaxPages.Maximum) + assert.Equal(t, 0, config.PG.BGWriterLRUMaxPages.Minimum) + assert.False(t, config.PG.BGWriterLRUMaxPages.RequiresRestart) + assert.Equal(t, "integer", config.PG.BGWriterLRUMaxPages.Type) + + assert.Equal(t, "The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0.", + config.PG.BGWriterLRUMultiplier.Description) + assert.Equal(t, 2.0, config.PG.BGWriterLRUMultiplier.Example) + assert.Equal(t, 10.0, config.PG.BGWriterLRUMultiplier.Maximum) + assert.Equal(t, 0.0, config.PG.BGWriterLRUMultiplier.Minimum) + assert.False(t, config.PG.BGWriterLRUMultiplier.RequiresRestart) + assert.Equal(t, "number", config.PG.BGWriterLRUMultiplier.Type) + + assert.Equal(t, "This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition.", + config.PG.DeadlockTimeout.Description) + assert.Equal(t, 1000, config.PG.DeadlockTimeout.Example) + assert.Equal(t, 1800000, config.PG.DeadlockTimeout.Maximum) + assert.Equal(t, 500, config.PG.DeadlockTimeout.Minimum) + assert.False(t, config.PG.DeadlockTimeout.RequiresRestart) + assert.Equal(t, "integer", config.PG.DeadlockTimeout.Type) + + assert.Equal(t, "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + config.PG.DefaultToastCompression.Description) + assert.ElementsMatch(t, []string{"lz4", "pglz"}, config.PG.DefaultToastCompression.Enum) + assert.Equal(t, "lz4", config.PG.DefaultToastCompression.Example) + assert.False(t, config.PG.DefaultToastCompression.RequiresRestart) + assert.Equal(t, "string", config.PG.DefaultToastCompression.Type) + + assert.Equal(t, "Time out sessions with open transactions after this number of milliseconds", + config.PG.IdleInTransactionSessionTimeout.Description) + assert.Equal(t, 604800000, config.PG.IdleInTransactionSessionTimeout.Maximum) + assert.Equal(t, 0, config.PG.IdleInTransactionSessionTimeout.Minimum) + assert.False(t, config.PG.IdleInTransactionSessionTimeout.RequiresRestart) + assert.Equal(t, "integer", config.PG.IdleInTransactionSessionTimeout.Type) + + assert.Equal(t, "Controls system-wide use of Just-in-Time Compilation (JIT).", + config.PG.JIT.Description) + assert.Equal(t, true, config.PG.JIT.Example) + assert.False(t, config.PG.JIT.RequiresRestart) + assert.Equal(t, "boolean", config.PG.JIT.Type) + + assert.Equal(t, "Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions.", + config.PG.LogAutovacuumMinDuration.Description) + assert.Equal(t, int32(2147483647), config.PG.LogAutovacuumMinDuration.Maximum) + assert.Equal(t, int32(-1), config.PG.LogAutovacuumMinDuration.Minimum) + assert.False(t, config.PG.LogAutovacuumMinDuration.RequiresRestart) + assert.Equal(t, "integer", config.PG.LogAutovacuumMinDuration.Type) + + assert.Equal(t, "Controls the amount of detail written in the server log for each message that is logged.", + config.PG.LogErrorVerbosity.Description) + assert.Equal(t, []string{"TERSE", "DEFAULT", "VERBOSE"}, + config.PG.LogErrorVerbosity.Enum) + assert.False(t, config.PG.LogErrorVerbosity.RequiresRestart) + assert.Equal(t, "string", config.PG.LogErrorVerbosity.Type) + + assert.Equal(t, "Choose from one of the available log formats.", + config.PG.LogLinePrefix.Description) + assert.Equal(t, []string{ + "'pid=%p,user=%u,db=%d,app=%a,client=%h '", + "'pid=%p,user=%u,db=%d,app=%a,client=%h,txid=%x,qid=%Q '", + "'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '", + "'%m [%p] %q[user=%u,db=%d,app=%a] '", + }, + config.PG.LogLinePrefix.Enum) + assert.False(t, config.PG.LogLinePrefix.RequiresRestart) + assert.Equal(t, "string", config.PG.LogLinePrefix.Type) + + assert.Equal(t, "Log statements that take more than this number of milliseconds to run, -1 disables", + config.PG.LogMinDurationStatement.Description) + assert.Equal(t, 86400000, config.PG.LogMinDurationStatement.Maximum) + assert.Equal(t, -1, config.PG.LogMinDurationStatement.Minimum) + assert.False(t, config.PG.LogMinDurationStatement.RequiresRestart) + assert.Equal(t, "integer", config.PG.LogMinDurationStatement.Type) + + assert.Equal(t, "Log statements for each temporary file created larger than this number of kilobytes, -1 disables", + config.PG.LogTempFiles.Description) + assert.Equal(t, int32(2147483647), config.PG.LogTempFiles.Maximum) + assert.Equal(t, int32(-1), config.PG.LogTempFiles.Minimum) + assert.False(t, config.PG.LogTempFiles.RequiresRestart) + assert.Equal(t, "integer", config.PG.LogTempFiles.Type) + + assert.Equal(t, "PostgreSQL maximum number of files that can be open per process", + config.PG.MaxFilesPerProcess.Description) + assert.Equal(t, 4096, config.PG.MaxFilesPerProcess.Maximum) + assert.Equal(t, 1000, config.PG.MaxFilesPerProcess.Minimum) + assert.False(t, config.PG.MaxFilesPerProcess.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxFilesPerProcess.Type) + + assert.Equal(t, "PostgreSQL maximum locks per transaction", + config.PG.MaxLocksPerTransaction.Description) + assert.Equal(t, 6400, config.PG.MaxLocksPerTransaction.Maximum) + assert.Equal(t, 64, config.PG.MaxLocksPerTransaction.Minimum) + assert.False(t, config.PG.MaxLocksPerTransaction.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxLocksPerTransaction.Type) + + assert.Equal(t, "PostgreSQL maximum logical replication workers (taken from the pool of max_parallel_workers)", + config.PG.MaxLogicalReplicationWorkers.Description) + assert.Equal(t, 64, config.PG.MaxLogicalReplicationWorkers.Maximum) + assert.Equal(t, 4, config.PG.MaxLogicalReplicationWorkers.Minimum) + assert.False(t, config.PG.MaxLogicalReplicationWorkers.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxLogicalReplicationWorkers.Type) + + assert.Equal(t, "Sets the maximum number of workers that the system can support for parallel queries", + config.PG.MaxParallelWorkers.Description) + assert.Equal(t, 96, config.PG.MaxParallelWorkers.Maximum) + assert.Equal(t, 0, config.PG.MaxParallelWorkers.Minimum) + assert.False(t, config.PG.MaxParallelWorkers.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxParallelWorkers.Type) + + assert.Equal(t, "Sets the maximum number of workers that can be started by a single Gather or Gather Merge node", + config.PG.MaxParallelWorkersPerGather.Description) + assert.Equal(t, 96, config.PG.MaxParallelWorkersPerGather.Maximum) + assert.Equal(t, 0, config.PG.MaxParallelWorkersPerGather.Minimum) + assert.False(t, config.PG.MaxParallelWorkersPerGather.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxParallelWorkersPerGather.Type) + + assert.Equal(t, "PostgreSQL maximum predicate locks per transaction", + config.PG.MaxPredLocksPerTransaction.Description) + assert.Equal(t, 5120, config.PG.MaxPredLocksPerTransaction.Maximum) + assert.Equal(t, 64, config.PG.MaxPredLocksPerTransaction.Minimum) + assert.False(t, config.PG.MaxPredLocksPerTransaction.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxPredLocksPerTransaction.Type) + + assert.Equal(t, "PostgreSQL maximum prepared transactions", + config.PG.MaxPreparedTransactions.Description) + assert.Equal(t, 10000, config.PG.MaxPreparedTransactions.Maximum) + assert.Equal(t, 0, config.PG.MaxPreparedTransactions.Minimum) + assert.False(t, config.PG.MaxPreparedTransactions.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxPreparedTransactions.Type) + + assert.Equal(t, "PostgreSQL maximum replication slots", + config.PG.MaxReplicationSlots.Description) + assert.Equal(t, 64, config.PG.MaxReplicationSlots.Maximum) + assert.Equal(t, 8, config.PG.MaxReplicationSlots.Minimum) + assert.False(t, config.PG.MaxReplicationSlots.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxReplicationSlots.Type) + + assert.Equal(t, "PostgreSQL maximum WAL size (MB) reserved for replication slots. Default is -1 (unlimited). wal_keep_size minimum WAL size setting takes precedence over this.", + config.PG.MaxSlotWALKeepSize.Description) + assert.Equal(t, int32(2147483647), config.PG.MaxSlotWALKeepSize.Maximum) + assert.Equal(t, int32(-1), config.PG.MaxSlotWALKeepSize.Minimum) + assert.False(t, config.PG.MaxSlotWALKeepSize.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxSlotWALKeepSize.Type) + + assert.Equal(t, "Maximum depth of the stack in bytes", + config.PG.MaxStackDepth.Description) + assert.Equal(t, 6291456, config.PG.MaxStackDepth.Maximum) + assert.Equal(t, 2097152, config.PG.MaxStackDepth.Minimum) + assert.False(t, config.PG.MaxStackDepth.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxStackDepth.Type) + + assert.Equal(t, "Max standby archive delay in milliseconds", + config.PG.MaxStandbyArchiveDelay.Description) + assert.Equal(t, 43200000, config.PG.MaxStandbyArchiveDelay.Maximum) + assert.Equal(t, 1, config.PG.MaxStandbyArchiveDelay.Minimum) + assert.False(t, config.PG.MaxStandbyArchiveDelay.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxStandbyArchiveDelay.Type) + + assert.Equal(t, "Max standby streaming delay in milliseconds", + config.PG.MaxStandbyStreamingDelay.Description) + assert.Equal(t, 43200000, config.PG.MaxStandbyStreamingDelay.Maximum) + assert.Equal(t, 1, config.PG.MaxStandbyStreamingDelay.Minimum) + assert.False(t, config.PG.MaxStandbyStreamingDelay.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxStandbyStreamingDelay.Type) + + assert.Equal(t, "PostgreSQL maximum WAL senders", + config.PG.MaxWALSenders.Description) + assert.Equal(t, 64, config.PG.MaxWALSenders.Maximum) + assert.Equal(t, 20, config.PG.MaxWALSenders.Minimum) + assert.False(t, config.PG.MaxWALSenders.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxWALSenders.Type) + + assert.Equal(t, "Sets the maximum number of background processes that the system can support", + config.PG.MaxWorkerProcesses.Description) + assert.Equal(t, 96, config.PG.MaxWorkerProcesses.Maximum) + assert.Equal(t, 8, config.PG.MaxWorkerProcesses.Minimum) + assert.False(t, config.PG.MaxWorkerProcesses.RequiresRestart) + assert.Equal(t, "integer", config.PG.MaxWorkerProcesses.Type) + + assert.Equal(t, "Chooses the algorithm for encrypting passwords.", + config.PG.PasswordEncryption.Description) + assert.Equal(t, []string{"md5", "scram-sha-256"}, config.PG.PasswordEncryption.Enum) + assert.Equal(t, "scram-sha-256", config.PG.PasswordEncryption.Example) + assert.False(t, config.PG.PasswordEncryption.RequiresRestart) + assert.Equal(t, []string{"string", "null"}, config.PG.PasswordEncryption.Type) + + assert.Equal(t, "Sets the time interval to run pg_partman's scheduled tasks", + config.PG.PGPartmanBGWInterval.Description) + assert.Equal(t, 3600, config.PG.PGPartmanBGWInterval.Example) + assert.Equal(t, 604800, config.PG.PGPartmanBGWInterval.Maximum) + assert.Equal(t, 3600, config.PG.PGPartmanBGWInterval.Minimum) + assert.False(t, config.PG.PGPartmanBGWInterval.RequiresRestart) + assert.Equal(t, "integer", config.PG.PGPartmanBGWInterval.Type) + + assert.Equal(t, "Controls which role to use for pg_partman's scheduled background tasks.", + config.PG.PGPartmanBGWRole.Description) + assert.Equal(t, "myrolename", config.PG.PGPartmanBGWRole.Example) + assert.Equal(t, 64, config.PG.PGPartmanBGWRole.MaxLength) + assert.Equal(t, "^[_A-Za-z0-9][-._A-Za-z0-9]{0,63}$", config.PG.PGPartmanBGWRole.Pattern) + assert.False(t, config.PG.PGPartmanBGWRole.RequiresRestart) + assert.Equal(t, "string", config.PG.PGPartmanBGWRole.Type) + + assert.Equal(t, "Enables or disables query plan monitoring", + config.PG.PGStatMonitorPGSMEnableQueryPlan.Description) + assert.Equal(t, false, config.PG.PGStatMonitorPGSMEnableQueryPlan.Example) + assert.False(t, config.PG.PGStatMonitorPGSMEnableQueryPlan.RequiresRestart) + assert.Equal(t, "boolean", config.PG.PGStatMonitorPGSMEnableQueryPlan.Type) + + assert.Equal(t, "Sets the maximum number of buckets", + config.PG.PGStatMonitorPGSMMaxBuckets.Description) + assert.Equal(t, 10, config.PG.PGStatMonitorPGSMMaxBuckets.Example) + assert.Equal(t, 10, config.PG.PGStatMonitorPGSMMaxBuckets.Maximum) + assert.Equal(t, 1, config.PG.PGStatMonitorPGSMMaxBuckets.Minimum) + assert.False(t, config.PG.PGStatMonitorPGSMMaxBuckets.RequiresRestart) + assert.Equal(t, "integer", config.PG.PGStatMonitorPGSMMaxBuckets.Type) + + assert.Equal(t, "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + config.PG.PGStatStatementsTrack.Description) + assert.Equal(t, []string{"all", "top", "none"}, config.PG.PGStatStatementsTrack.Enum) + assert.False(t, config.PG.PGStatStatementsTrack.RequiresRestart) + assert.Equal(t, []string{"string"}, config.PG.PGStatStatementsTrack.Type) + + assert.Equal(t, "PostgreSQL temporary file limit in KiB, -1 for unlimited", + config.PG.TempFileLimit.Description) + assert.Equal(t, int32(5000000), config.PG.TempFileLimit.Example) + assert.Equal(t, int32(2147483647), config.PG.TempFileLimit.Maximum) + assert.Equal(t, int32(-1), config.PG.TempFileLimit.Minimum) + assert.False(t, config.PG.TempFileLimit.RequiresRestart) + assert.Equal(t, "integer", config.PG.TempFileLimit.Type) + + assert.Equal(t, "PostgreSQL service timezone", + config.PG.Timezone.Description) + assert.Equal(t, "Europe/Helsinki", config.PG.Timezone.Example) + assert.Equal(t, 64, config.PG.Timezone.MaxLength) + assert.Equal(t, "^[\\w/]*$", config.PG.Timezone.Pattern) + assert.False(t, config.PG.Timezone.RequiresRestart) + assert.Equal(t, "string", config.PG.Timezone.Type) + + assert.Equal(t, "Specifies the number of bytes reserved to track the currently executing command for each active session.", + config.PG.TrackActivityQuerySize.Description) + assert.Equal(t, 1024, config.PG.TrackActivityQuerySize.Example) + assert.Equal(t, 10240, config.PG.TrackActivityQuerySize.Maximum) + assert.Equal(t, 1024, config.PG.TrackActivityQuerySize.Minimum) + assert.False(t, config.PG.TrackActivityQuerySize.RequiresRestart) + assert.Equal(t, "integer", config.PG.TrackActivityQuerySize.Type) + + assert.Equal(t, "Record commit time of transactions.", + config.PG.TrackCommitTimestamp.Description) + assert.Equal(t, "off", config.PG.TrackCommitTimestamp.Example) + assert.Equal(t, []string{"off", "on"}, config.PG.TrackCommitTimestamp.Enum) + assert.False(t, config.PG.TrackCommitTimestamp.RequiresRestart) + assert.Equal(t, "string", config.PG.TrackCommitTimestamp.Type) + + assert.Equal(t, "Enables tracking of function call counts and time used.", + config.PG.TrackFunctions.Description) + assert.Equal(t, []string{"all", "pl", "none"}, config.PG.TrackFunctions.Enum) + assert.False(t, config.PG.TrackFunctions.RequiresRestart) + assert.Equal(t, "string", config.PG.TrackFunctions.Type) + + assert.Equal(t, "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + config.PG.TrackIOTiming.Description) + assert.Equal(t, "off", config.PG.TrackIOTiming.Example) + assert.Equal(t, []string{"off", "on"}, config.PG.TrackIOTiming.Enum) + assert.False(t, config.PG.TrackIOTiming.RequiresRestart) + assert.Equal(t, "string", config.PG.TrackIOTiming.Type) + + assert.Equal(t, "Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout.", + config.PG.WALSenderTimeout.Description) + assert.Equal(t, 60000, config.PG.WALSenderTimeout.Example) + assert.False(t, config.PG.WALSenderTimeout.RequiresRestart) + assert.Equal(t, "integer", config.PG.WALSenderTimeout.Type) + + assert.Equal(t, "WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance", + config.PG.WALWriterDelay.Description) + assert.Equal(t, 50, config.PG.WALWriterDelay.Example) + assert.Equal(t, 200, config.PG.WALWriterDelay.Maximum) + assert.Equal(t, 10, config.PG.WALWriterDelay.Minimum) + assert.False(t, config.PG.WALWriterDelay.RequiresRestart) + assert.Equal(t, "integer", config.PG.WALWriterDelay.Type) + + assert.Equal(t, "Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable", + config.PGStatMonitorEnable.Description) + assert.True(t, config.PGStatMonitorEnable.RequiresRestart) + assert.Equal(t, "boolean", config.PGStatMonitorEnable.Type) + + assert.Equal(t, "Number of seconds of master unavailability before triggering database failover to standby", + config.PGLookout.PGLookoutMaxFailoverReplicationTimeLag.Description) + assert.Equal(t, int64(9223372036854775000), config.PGLookout.PGLookoutMaxFailoverReplicationTimeLag.Maximum) + assert.Equal(t, int64(10), config.PGLookout.PGLookoutMaxFailoverReplicationTimeLag.Minimum) + assert.False(t, config.PGLookout.PGLookoutMaxFailoverReplicationTimeLag.RequiresRestart) + assert.Equal(t, "integer", config.PGLookout.PGLookoutMaxFailoverReplicationTimeLag.Type) + + assert.Equal(t, "Store logs for the service so that they are available in the HTTP API and console.", + config.ServiceLog.Description) + assert.Equal(t, true, config.ServiceLog.Example) + assert.False(t, config.ServiceLog.RequiresRestart) + assert.Equal(t, []string{"boolean", "null"}, config.ServiceLog.Type) + + assert.Equal(t, "Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value.", + config.SharedBuffersPercentage.Description) + assert.Equal(t, 41.5, config.SharedBuffersPercentage.Example) + assert.Equal(t, 60.0, config.SharedBuffersPercentage.Maximum) + assert.Equal(t, 20.0, config.SharedBuffersPercentage.Minimum) + assert.False(t, config.SharedBuffersPercentage.RequiresRestart) + assert.Equal(t, "number", config.SharedBuffersPercentage.Type) + + assert.Equal(t, "Synchronous replication type. Note that the service plan also needs to support synchronous replication.", + config.SynchronousReplication.Description) + assert.Equal(t, "off", config.SynchronousReplication.Example) + assert.Equal(t, []string{"quorum", "off"}, config.SynchronousReplication.Enum) + assert.False(t, config.SynchronousReplication.RequiresRestart) + assert.Equal(t, "string", config.SynchronousReplication.Type) + + assert.Equal(t, "Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB).", + config.WorkMem.Description) + assert.Equal(t, 4, config.WorkMem.Example) + assert.Equal(t, 1024, config.WorkMem.Maximum) + assert.Equal(t, 1, config.WorkMem.Minimum) + assert.False(t, config.WorkMem.RequiresRestart) + assert.Equal(t, "integer", config.WorkMem.Type) +}