diff --git a/integration/alertmanager_test.go b/integration/alertmanager_test.go index 0d6313b808e..e1790f005a2 100644 --- a/integration/alertmanager_test.go +++ b/integration/alertmanager_test.go @@ -25,8 +25,8 @@ func TestAlertmanager(t *testing.T) { alertmanager := e2ecortex.NewAlertmanager( "alertmanager", mergeFlags( - AlertmanagerFlags, - AlertmanagerLocalFlags, + AlertmanagerFlags(), + AlertmanagerLocalFlags(), ), "", ) @@ -56,15 +56,14 @@ func TestAlertmanagerStoreAPI(t *testing.T) { require.NoError(t, err) defer s.Close() - minio := e2edb.NewMinio(9000, AlertmanagerS3Flags["-alertmanager.storage.s3.buckets"]) + flags := mergeFlags(AlertmanagerFlags(), AlertmanagerS3Flags()) + + minio := e2edb.NewMinio(9000, flags["-alertmanager.storage.s3.buckets"]) require.NoError(t, s.StartAndWaitReady(minio)) am := e2ecortex.NewAlertmanager( "alertmanager", - mergeFlags( - AlertmanagerFlags, - AlertmanagerS3Flags, - ), + flags, "", ) diff --git a/integration/backward_compatibility_test.go b/integration/backward_compatibility_test.go index 6c56589282e..52502d302d1 100644 --- a/integration/backward_compatibility_test.go +++ b/integration/backward_compatibility_test.go @@ -41,7 +41,7 @@ func preCortex14Flags(flags map[string]string) map[string]string { func TestBackwardCompatibilityWithChunksStorage(t *testing.T) { for previousImage, flagsFn := range previousVersionImages { t.Run(fmt.Sprintf("Backward compatibility upgrading from %s", previousImage), func(t *testing.T) { - flags := ChunksStorageFlags + flags := ChunksStorageFlags() if flagsFn != nil { flags = flagsFn(flags) } @@ -54,7 +54,7 @@ func TestBackwardCompatibilityWithChunksStorage(t *testing.T) { func TestNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T) { for previousImage, flagsFn := range previousVersionImages { t.Run(fmt.Sprintf("Backward compatibility upgrading from %s", previousImage), func(t *testing.T) { - flags := ChunksStorageFlags + flags := ChunksStorageFlags() if flagsFn != nil { flags = flagsFn(flags) } @@ -78,7 +78,7 @@ func runBackwardCompatibilityTestWithChunksStorage(t *testing.T, previousImage s // Start Cortex table-manager (running on current version since the backward compatibility // test is about testing a rolling update of other services). - tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "") + tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "") require.NoError(t, s.StartAndWaitReady(tableManager)) // Wait until the first table-manager sync has completed, so that we're @@ -87,7 +87,7 @@ func runBackwardCompatibilityTestWithChunksStorage(t *testing.T, previousImage s // Start other Cortex components (ingester running on previous version). ingester1 := e2ecortex.NewIngester("ingester-1", consul.NetworkHTTPEndpoint(), flagsForOldImage, previousImage) - distributor := e2ecortex.NewDistributor("distributor", consul.NetworkHTTPEndpoint(), ChunksStorageFlags, "") + distributor := e2ecortex.NewDistributor("distributor", consul.NetworkHTTPEndpoint(), ChunksStorageFlags(), "") require.NoError(t, s.StartAndWaitReady(distributor, ingester1)) // Wait until the distributor has updated the ring. @@ -104,7 +104,7 @@ func runBackwardCompatibilityTestWithChunksStorage(t *testing.T, previousImage s require.NoError(t, err) require.Equal(t, 200, res.StatusCode) - ingester2 := e2ecortex.NewIngester("ingester-2", consul.NetworkHTTPEndpoint(), mergeFlags(ChunksStorageFlags, map[string]string{ + ingester2 := e2ecortex.NewIngester("ingester-2", consul.NetworkHTTPEndpoint(), mergeFlags(ChunksStorageFlags(), map[string]string{ "-ingester.join-after": "10s", }), "") // Start ingester-2 on new version, to ensure the transfer is backward compatible. @@ -117,7 +117,8 @@ func runBackwardCompatibilityTestWithChunksStorage(t *testing.T, previousImage s checkQueries(t, consul, distributor, expectedVector, previousImage, - flagsForOldImage, ChunksStorageFlags, + flagsForOldImage, + ChunksStorageFlags(), now, s, 1, @@ -135,7 +136,7 @@ func runNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T, previo consul := e2edb.NewConsul() require.NoError(t, s.StartAndWaitReady(dynamo, consul)) - flagsForNewImage := mergeFlags(ChunksStorageFlags, map[string]string{ + flagsForNewImage := mergeFlags(ChunksStorageFlags(), map[string]string{ "-distributor.replication-factor": "3", }) @@ -143,7 +144,7 @@ func runNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T, previo // Start Cortex table-manager (running on current version since the backward compatibility // test is about testing a rolling update of other services). - tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "") + tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "") require.NoError(t, s.StartAndWaitReady(tableManager)) // Wait until the first table-manager sync has completed, so that we're @@ -174,7 +175,8 @@ func runNewDistributorsCanPushToOldIngestersWithReplication(t *testing.T, previo checkQueries(t, consul, distributor, expectedVector, previousImage, - flagsForPreviousImage, flagsForNewImage, + flagsForPreviousImage, + flagsForNewImage, now, s, 3, diff --git a/integration/chunks_delete_series_test.go b/integration/chunks_delete_series_test.go index 2ed12899979..22e1feff6df 100644 --- a/integration/chunks_delete_series_test.go +++ b/integration/chunks_delete_series_test.go @@ -43,7 +43,7 @@ func TestDeleteSeriesAllIndexBackends(t *testing.T) { storeConfigs[i] = storeConfig{From: oldestStoreStartTime.Add(time.Duration(i) * perStoreDuration).UTC().Format("2006-01-02"), IndexStore: store} } - flags := mergeFlags(ChunksStorageFlags, map[string]string{ + flags := mergeFlags(ChunksStorageFlags(), map[string]string{ "-cassandra.addresses": cassandra.NetworkHTTPEndpoint(), "-cassandra.keyspace": "tests", // keyspace gets created on startup if it does not exist "-cassandra.replication-factor": "1", diff --git a/integration/chunks_storage_backends_test.go b/integration/chunks_storage_backends_test.go index c92d034550e..45198ae0b69 100644 --- a/integration/chunks_storage_backends_test.go +++ b/integration/chunks_storage_backends_test.go @@ -60,7 +60,7 @@ func TestChunksStorageAllIndexBackends(t *testing.T) { storeConfigs[i] = storeConfig{From: oldestStoreStartTime.Add(time.Duration(i) * perStoreDuration).Format("2006-01-02"), IndexStore: store} } - storageFlags := mergeFlags(ChunksStorageFlags, map[string]string{ + storageFlags := mergeFlags(ChunksStorageFlags(), map[string]string{ "-cassandra.addresses": cassandra.NetworkHTTPEndpoint(), "-cassandra.keyspace": "tests", // keyspace gets created on startup if it does not exist "-cassandra.replication-factor": "1", diff --git a/integration/configs.go b/integration/configs.go index 64029d535d7..da084871de6 100644 --- a/integration/configs.go +++ b/integration/configs.go @@ -90,46 +90,56 @@ receivers: var ( cortexSchemaConfigYaml = buildSchemaConfigWith([]storeConfig{{From: "2019-03-20", IndexStore: "aws-dynamo"}}) - AlertmanagerFlags = map[string]string{ - "-alertmanager.configs.poll-interval": "1s", - "-alertmanager.web.external-url": "http://localhost/api/prom", + AlertmanagerFlags = func() map[string]string { + return map[string]string{ + "-alertmanager.configs.poll-interval": "1s", + "-alertmanager.web.external-url": "http://localhost/api/prom", + } } - AlertmanagerLocalFlags = map[string]string{ - "-alertmanager.storage.type": "local", - "-alertmanager.storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"), + AlertmanagerLocalFlags = func() map[string]string { + return map[string]string{ + "-alertmanager.storage.type": "local", + "-alertmanager.storage.local.path": filepath.Join(e2e.ContainerSharedDir, "alertmanager_configs"), + } } - AlertmanagerS3Flags = map[string]string{ - "-alertmanager.storage.type": "s3", - "-alertmanager.storage.s3.buckets": "cortex-alerts", - "-alertmanager.storage.s3.force-path-style": "true", - "-alertmanager.storage.s3.url": fmt.Sprintf("s3://%s:%s@%s-minio-9000.:9000", e2edb.MinioAccessKey, e2edb.MinioSecretKey, networkName), + AlertmanagerS3Flags = func() map[string]string { + return map[string]string{ + "-alertmanager.storage.type": "s3", + "-alertmanager.storage.s3.buckets": "cortex-alerts", + "-alertmanager.storage.s3.force-path-style": "true", + "-alertmanager.storage.s3.url": fmt.Sprintf("s3://%s:%s@%s-minio-9000.:9000", e2edb.MinioAccessKey, e2edb.MinioSecretKey, networkName), + } } - RulerConfigs = map[string]string{ - "-ruler.enable-sharding": "false", - "-ruler.poll-interval": "2s", - "-experimental.ruler.enable-api": "true", - "-ruler.storage.type": "s3", - "-ruler.storage.s3.buckets": "cortex-rules", - "-ruler.storage.s3.force-path-style": "true", - "-ruler.storage.s3.url": fmt.Sprintf("s3://%s:%s@%s-minio-9000.:9000", e2edb.MinioAccessKey, e2edb.MinioSecretKey, networkName), + RulerFlags = func() map[string]string { + return map[string]string{ + "-ruler.enable-sharding": "false", + "-ruler.poll-interval": "2s", + "-experimental.ruler.enable-api": "true", + "-ruler.storage.type": "s3", + "-ruler.storage.s3.buckets": "cortex-rules", + "-ruler.storage.s3.force-path-style": "true", + "-ruler.storage.s3.url": fmt.Sprintf("s3://%s:%s@%s-minio-9000.:9000", e2edb.MinioAccessKey, e2edb.MinioSecretKey, networkName), + } } - BlocksStorageFlags = map[string]string{ - "-store.engine": blocksStorageEngine, - "-blocks-storage.backend": "s3", - "-blocks-storage.tsdb.block-ranges-period": "1m", - "-blocks-storage.bucket-store.sync-interval": "5s", - "-blocks-storage.tsdb.retention-period": "5m", - "-blocks-storage.tsdb.ship-interval": "1m", - "-blocks-storage.tsdb.head-compaction-interval": "1s", - "-blocks-storage.s3.access-key-id": e2edb.MinioAccessKey, - "-blocks-storage.s3.secret-access-key": e2edb.MinioSecretKey, - "-blocks-storage.s3.bucket-name": bucketName, - "-blocks-storage.s3.endpoint": fmt.Sprintf("%s-minio-9000:9000", networkName), - "-blocks-storage.s3.insecure": "true", + BlocksStorageFlags = func() map[string]string { + return map[string]string{ + "-store.engine": blocksStorageEngine, + "-blocks-storage.backend": "s3", + "-blocks-storage.tsdb.block-ranges-period": "1m", + "-blocks-storage.bucket-store.sync-interval": "5s", + "-blocks-storage.tsdb.retention-period": "5m", + "-blocks-storage.tsdb.ship-interval": "1m", + "-blocks-storage.tsdb.head-compaction-interval": "1s", + "-blocks-storage.s3.access-key-id": e2edb.MinioAccessKey, + "-blocks-storage.s3.secret-access-key": e2edb.MinioSecretKey, + "-blocks-storage.s3.bucket-name": bucketName, + "-blocks-storage.s3.endpoint": fmt.Sprintf("%s-minio-9000:9000", networkName), + "-blocks-storage.s3.insecure": "true", + } } BlocksStorageConfig = buildConfigFromTemplate(` @@ -163,11 +173,13 @@ blocks_storage: MinioEndpoint: fmt.Sprintf("%s-minio-9000:9000", networkName), }) - ChunksStorageFlags = map[string]string{ - "-dynamodb.url": fmt.Sprintf("dynamodb://u:p@%s-dynamodb.:8000", networkName), - "-table-manager.poll-interval": "1m", - "-schema-config-file": filepath.Join(e2e.ContainerSharedDir, cortexSchemaConfigFile), - "-table-manager.retention-period": "168h", + ChunksStorageFlags = func() map[string]string { + return map[string]string{ + "-dynamodb.url": fmt.Sprintf("dynamodb://u:p@%s-dynamodb.:8000", networkName), + "-table-manager.poll-interval": "1m", + "-schema-config-file": filepath.Join(e2e.ContainerSharedDir, cortexSchemaConfigFile), + "-table-manager.retention-period": "168h", + } } ChunksStorageConfig = buildConfigFromTemplate(` diff --git a/integration/ingester_flush_test.go b/integration/ingester_flush_test.go index 5bd3dc7df96..cd713e8656a 100644 --- a/integration/ingester_flush_test.go +++ b/integration/ingester_flush_test.go @@ -32,12 +32,12 @@ func TestIngesterFlushWithChunksStorage(t *testing.T) { // Start Cortex components. require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml))) - tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "") - ingester := e2ecortex.NewIngester("ingester", consul.NetworkHTTPEndpoint(), mergeFlags(ChunksStorageFlags, map[string]string{ + tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "") + ingester := e2ecortex.NewIngester("ingester", consul.NetworkHTTPEndpoint(), mergeFlags(ChunksStorageFlags(), map[string]string{ "-ingester.max-transfer-retries": "0", }), "") - querier := e2ecortex.NewQuerier("querier", consul.NetworkHTTPEndpoint(), ChunksStorageFlags, "") - distributor := e2ecortex.NewDistributor("distributor", consul.NetworkHTTPEndpoint(), ChunksStorageFlags, "") + querier := e2ecortex.NewQuerier("querier", consul.NetworkHTTPEndpoint(), ChunksStorageFlags(), "") + distributor := e2ecortex.NewDistributor("distributor", consul.NetworkHTTPEndpoint(), ChunksStorageFlags(), "") require.NoError(t, s.StartAndWaitReady(distributor, querier, ingester, tableManager)) // Wait until the first table-manager sync has completed, so that we're diff --git a/integration/ingester_hand_over_test.go b/integration/ingester_hand_over_test.go index 81255300fa1..8e362eff024 100644 --- a/integration/ingester_hand_over_test.go +++ b/integration/ingester_hand_over_test.go @@ -17,13 +17,13 @@ import ( ) func TestIngesterHandOverWithChunksStorage(t *testing.T) { - runIngesterHandOverTest(t, ChunksStorageFlags, func(t *testing.T, s *e2e.Scenario) { + runIngesterHandOverTest(t, ChunksStorageFlags(), func(t *testing.T, s *e2e.Scenario) { dynamo := e2edb.NewDynamoDB() require.NoError(t, s.StartAndWaitReady(dynamo)) require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml))) - tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "") + tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "") require.NoError(t, s.StartAndWaitReady(tableManager)) // Wait until the first table-manager sync has completed, so that we're diff --git a/integration/ingester_sharding_test.go b/integration/ingester_sharding_test.go index 4b60c5e8f2a..337e78d119b 100644 --- a/integration/ingester_sharding_test.go +++ b/integration/ingester_sharding_test.go @@ -44,7 +44,7 @@ func TestIngesterSharding(t *testing.T) { require.NoError(t, err) defer s.Close() - flags := BlocksStorageFlags + flags := BlocksStorageFlags() flags["-distributor.shard-by-all-labels"] = "true" flags["-distributor.sharding-strategy"] = testData.shardingStrategy flags["-distributor.ingestion-tenant-shard-size"] = strconv.Itoa(testData.tenantShardSize) diff --git a/integration/integration_memberlist_single_binary_test.go b/integration/integration_memberlist_single_binary_test.go index dcf9f270f43..883db8c0bb6 100644 --- a/integration/integration_memberlist_single_binary_test.go +++ b/integration/integration_memberlist_single_binary_test.go @@ -77,7 +77,7 @@ func newSingleBinary(name string, join string) *e2ecortex.CortexService { serv := e2ecortex.NewSingleBinary( name, - mergeFlags(ChunksStorageFlags, flags), + mergeFlags(ChunksStorageFlags(), flags), "", 8000, ) diff --git a/integration/querier_remote_read_test.go b/integration/querier_remote_read_test.go index 5bc0388ebff..ae2b31cef23 100644 --- a/integration/querier_remote_read_test.go +++ b/integration/querier_remote_read_test.go @@ -29,7 +29,7 @@ func TestQuerierRemoteRead(t *testing.T) { defer s.Close() require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml))) - flags := mergeFlags(ChunksStorageFlags, map[string]string{}) + flags := mergeFlags(ChunksStorageFlags(), map[string]string{}) // Start dependencies. dynamo := e2edb.NewDynamoDB() @@ -37,7 +37,7 @@ func TestQuerierRemoteRead(t *testing.T) { consul := e2edb.NewConsul() require.NoError(t, s.StartAndWaitReady(consul, dynamo)) - tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "") + tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "") require.NoError(t, s.StartAndWaitReady(tableManager)) // Wait until the first table-manager sync has completed, so that we're @@ -63,7 +63,7 @@ func TestQuerierRemoteRead(t *testing.T) { require.NoError(t, err) require.Equal(t, 200, res.StatusCode) - querier := e2ecortex.NewQuerier("querier", consul.NetworkHTTPEndpoint(), ChunksStorageFlags, "") + querier := e2ecortex.NewQuerier("querier", consul.NetworkHTTPEndpoint(), ChunksStorageFlags(), "") require.NoError(t, s.StartAndWaitReady(querier)) // Wait until the querier has updated the ring. diff --git a/integration/querier_sharding_test.go b/integration/querier_sharding_test.go index d205b7e16f2..82ead391755 100644 --- a/integration/querier_sharding_test.go +++ b/integration/querier_sharding_test.go @@ -39,12 +39,7 @@ func runQuerierShardingTest(t *testing.T, sharding bool) { consul := e2edb.NewConsul() require.NoError(t, s.StartAndWaitReady(consul, memcached)) - minio := e2edb.NewMinio(9000, BlocksStorageFlags["-blocks-storage.s3.bucket-name"]) - require.NoError(t, s.StartAndWaitReady(minio)) - - flags := BlocksStorageFlags - - flags = mergeFlags(flags, map[string]string{ + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-querier.cache-results": "true", "-querier.split-queries-by-interval": "24h", "-querier.query-ingesters-within": "12h", // Required by the test on query /series out of ingesters time range @@ -52,6 +47,9 @@ func runQuerierShardingTest(t *testing.T, sharding bool) { "-querier.max-outstanding-requests-per-tenant": strconv.Itoa(numQueries), // To avoid getting errors. }) + minio := e2edb.NewMinio(9000, flags["-blocks-storage.s3.bucket-name"]) + require.NoError(t, s.StartAndWaitReady(minio)) + if sharding { // Use only single querier for each user. flags["-frontend.max-queriers-per-tenant"] = "1" diff --git a/integration/querier_streaming_mixed_ingester_test.go b/integration/querier_streaming_mixed_ingester_test.go index 16e3d321658..ee347171c51 100644 --- a/integration/querier_streaming_mixed_ingester_test.go +++ b/integration/querier_streaming_mixed_ingester_test.go @@ -26,9 +26,8 @@ func TestQuerierWithStreamingBlocksAndChunksIngesters(t *testing.T) { defer s.Close() require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml))) - chunksFlags := mergeFlags(ChunksStorageFlags, map[string]string{}) - - blockFlags := mergeFlags(BlocksStorageFlags, map[string]string{ + chunksFlags := ChunksStorageFlags() + blockFlags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-blocks-storage.tsdb.block-ranges-period": "1h", "-blocks-storage.tsdb.head-compaction-interval": "1m", "-store-gateway.sharding-enabled": "false", diff --git a/integration/querier_test.go b/integration/querier_test.go index 1e1d331487a..887725a9de4 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -71,7 +71,7 @@ func TestQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T) { // Configure the blocks storage to frequently compact TSDB head // and ship blocks to the storage. - flags := mergeFlags(BlocksStorageFlags, map[string]string{ + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), "-blocks-storage.tsdb.ship-interval": "1s", "-blocks-storage.bucket-store.sync-interval": "1s", @@ -281,7 +281,7 @@ func TestQuerierWithBlocksStorageRunningInSingleBinaryMode(t *testing.T) { // Configure the blocks storage to frequently compact TSDB head // and ship blocks to the storage. - flags := mergeFlags(BlocksStorageFlags, map[string]string{ + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), "-blocks-storage.tsdb.ship-interval": "1s", "-blocks-storage.bucket-store.sync-interval": "1s", @@ -423,7 +423,7 @@ func TestQuerierWithBlocksStorageOnMissingBlocksFromStorage(t *testing.T) { // Configure the blocks storage to frequently compact TSDB head // and ship blocks to the storage. - flags := mergeFlags(BlocksStorageFlags, map[string]string{ + flags := mergeFlags(BlocksStorageFlags(), map[string]string{ "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), "-blocks-storage.tsdb.ship-interval": "1s", "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), @@ -509,7 +509,7 @@ func TestQuerierWithChunksStorage(t *testing.T) { defer s.Close() require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml))) - flags := mergeFlags(ChunksStorageFlags, map[string]string{}) + flags := ChunksStorageFlags() // Start dependencies. dynamo := e2edb.NewDynamoDB() @@ -517,7 +517,7 @@ func TestQuerierWithChunksStorage(t *testing.T) { consul := e2edb.NewConsul() require.NoError(t, s.StartAndWaitReady(consul, dynamo)) - tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "") + tableManager := e2ecortex.NewTableManager("table-manager", flags, "") require.NoError(t, s.StartAndWaitReady(tableManager)) // Wait until the first table-manager sync has completed, so that we're @@ -626,7 +626,7 @@ func TestHashCollisionHandling(t *testing.T) { defer s.Close() require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml))) - flags := mergeFlags(ChunksStorageFlags, map[string]string{}) + flags := ChunksStorageFlags() // Start dependencies. dynamo := e2edb.NewDynamoDB() @@ -634,7 +634,7 @@ func TestHashCollisionHandling(t *testing.T) { consul := e2edb.NewConsul() require.NoError(t, s.StartAndWaitReady(consul, dynamo)) - tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "") + tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags(), "") require.NoError(t, s.StartAndWaitReady(tableManager)) // Wait until the first table-manager sync has completed, so that we're diff --git a/integration/query_frontend_test.go b/integration/query_frontend_test.go index b9fe085e2a8..3eb91c4340f 100644 --- a/integration/query_frontend_test.go +++ b/integration/query_frontend_test.go @@ -29,10 +29,12 @@ type queryFrontendSetup func(t *testing.T, s *e2e.Scenario) (configFile string, func TestQueryFrontendWithBlocksStorageViaFlags(t *testing.T) { runQueryFrontendTest(t, false, func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) { - minio := e2edb.NewMinio(9000, BlocksStorageFlags["-blocks-storage.s3.bucket-name"]) + flags = BlocksStorageFlags() + + minio := e2edb.NewMinio(9000, flags["-blocks-storage.s3.bucket-name"]) require.NoError(t, s.StartAndWaitReady(minio)) - return "", BlocksStorageFlags + return "", flags }) } @@ -40,7 +42,7 @@ func TestQueryFrontendWithBlocksStorageViaConfigFile(t *testing.T) { runQueryFrontendTest(t, false, func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) { require.NoError(t, writeFileToSharedDir(s, cortexConfigFile, []byte(BlocksStorageConfig))) - minio := e2edb.NewMinio(9000, BlocksStorageFlags["-blocks-storage.s3.bucket-name"]) + minio := e2edb.NewMinio(9000, BlocksStorageFlags()["-blocks-storage.s3.bucket-name"]) require.NoError(t, s.StartAndWaitReady(minio)) return cortexConfigFile, e2e.EmptyFlags() @@ -54,14 +56,15 @@ func TestQueryFrontendWithChunksStorageViaFlags(t *testing.T) { dynamo := e2edb.NewDynamoDB() require.NoError(t, s.StartAndWaitReady(dynamo)) - tableManager := e2ecortex.NewTableManager("table-manager", ChunksStorageFlags, "") + flags = ChunksStorageFlags() + tableManager := e2ecortex.NewTableManager("table-manager", flags, "") require.NoError(t, s.StartAndWaitReady(tableManager)) // Wait until the first table-manager sync has completed, so that we're // sure the tables have been created. require.NoError(t, tableManager.WaitSumMetrics(e2e.Greater(0), "cortex_table_manager_sync_success_timestamp_seconds")) - return "", ChunksStorageFlags + return "", flags }) } @@ -86,7 +89,14 @@ func TestQueryFrontendWithChunksStorageViaConfigFile(t *testing.T) { func TestQueryFrontendTLSWithBlocksStorageViaFlags(t *testing.T) { runQueryFrontendTest(t, false, func(t *testing.T, s *e2e.Scenario) (configFile string, flags map[string]string) { - minio := e2edb.NewMinio(9000, BlocksStorageFlags["-blocks-storage.s3.bucket-name"]) + flags = mergeFlags( + BlocksStorageFlags(), + getServerTLSFlags(), + getClientTLSFlagsWithPrefix("ingester.client"), + getClientTLSFlagsWithPrefix("querier.frontend-client"), + ) + + minio := e2edb.NewMinio(9000, flags["-blocks-storage.s3.bucket-name"]) require.NoError(t, s.StartAndWaitReady(minio)) // set the ca @@ -116,12 +126,7 @@ func TestQueryFrontendTLSWithBlocksStorageViaFlags(t *testing.T) { filepath.Join(s.SharedDir(), serverKeyFile), )) - return "", mergeFlags( - BlocksStorageFlags, - getServerTLSFlags(), - getClientTLSFlagsWithPrefix("ingester.client"), - getClientTLSFlagsWithPrefix("querier.frontend-client"), - ) + return "", flags }) } diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 93f2400297a..023d61f4463 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -32,12 +32,12 @@ func TestRulerAPI(t *testing.T) { // Start dependencies. dynamo := e2edb.NewDynamoDB() - minio := e2edb.NewMinio(9000, RulerConfigs["-ruler.storage.s3.buckets"]) + minio := e2edb.NewMinio(9000, RulerFlags()["-ruler.storage.s3.buckets"]) require.NoError(t, s.StartAndWaitReady(minio, dynamo)) // Start Cortex components. require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml))) - ruler := e2ecortex.NewRuler("ruler", mergeFlags(ChunksStorageFlags, RulerConfigs), "") + ruler := e2ecortex.NewRuler("ruler", mergeFlags(ChunksStorageFlags(), RulerFlags()), "") require.NoError(t, s.StartAndWaitReady(ruler)) // Create a client with the ruler address configured @@ -207,17 +207,17 @@ func TestRulerAlertmanager(t *testing.T) { // Start dependencies. dynamo := e2edb.NewDynamoDB() - minio := e2edb.NewMinio(9000, RulerConfigs["-ruler.storage.s3.buckets"]) + minio := e2edb.NewMinio(9000, RulerFlags()["-ruler.storage.s3.buckets"]) require.NoError(t, s.StartAndWaitReady(minio, dynamo)) // Have at least one alertmanager configuration. require.NoError(t, writeFileToSharedDir(s, "alertmanager_configs/user-1.yaml", []byte(cortexAlertmanagerUserConfigYaml))) // Start Alertmanagers. - am1 := e2ecortex.NewAlertmanager("alertmanager1", mergeFlags(AlertmanagerFlags, AlertmanagerLocalFlags), "") - require.NoError(t, s.StartAndWaitReady(am1)) - am2 := e2ecortex.NewAlertmanager("alertmanager2", mergeFlags(AlertmanagerFlags, AlertmanagerLocalFlags), "") - require.NoError(t, s.StartAndWaitReady(am2)) + amFlags := mergeFlags(AlertmanagerFlags(), AlertmanagerLocalFlags()) + am1 := e2ecortex.NewAlertmanager("alertmanager1", amFlags, "") + am2 := e2ecortex.NewAlertmanager("alertmanager2", amFlags, "") + require.NoError(t, s.StartAndWaitReady(am1, am2)) am1URL := "http://" + am1.HTTPEndpoint() am2URL := "http://" + am2.HTTPEndpoint() @@ -229,7 +229,7 @@ func TestRulerAlertmanager(t *testing.T) { // Start Ruler. require.NoError(t, writeFileToSharedDir(s, cortexSchemaConfigFile, []byte(cortexSchemaConfigYaml))) - ruler := e2ecortex.NewRuler("ruler", mergeFlags(ChunksStorageFlags, RulerConfigs, configOverrides), "") + ruler := e2ecortex.NewRuler("ruler", mergeFlags(ChunksStorageFlags(), RulerFlags(), configOverrides), "") require.NoError(t, s.StartAndWaitReady(ruler)) // Create a client with the ruler address configured