From abffaa1bcbedae8002861ba40b40fd6ffb98016d Mon Sep 17 00:00:00 2001 From: Yong Date: Sun, 9 Nov 2025 02:32:51 -0600 Subject: [PATCH 01/14] Client: remove monkey patching --- .../python/apache_polaris/cli/polaris_cli.py | 42 ----------------- client/python/generate_clients.py | 45 +++++++++++++++++++ 2 files changed, 45 insertions(+), 42 deletions(-) diff --git a/client/python/apache_polaris/cli/polaris_cli.py b/client/python/apache_polaris/cli/polaris_cli.py index 8bd49d63dd..b06286df04 100644 --- a/client/python/apache_polaris/cli/polaris_cli.py +++ b/client/python/apache_polaris/cli/polaris_cli.py @@ -46,50 +46,8 @@ class PolarisCli: # Can be enabled if the client is able to authenticate directly without first fetching a token DIRECT_AUTHENTICATION_ENABLED = False - @staticmethod - def _patch_generated_models() -> None: - """ - The OpenAPI generator creates an `api_client` that dynamically looks up - model classes from the `apache_polaris.sdk.catalog.models` module using `getattr()`. - For example, when a response for a `create_policy` call is received, the - deserializer tries to find the `LoadPolicyResponse` class by looking for - `apache_polaris.sdk.catalog.models.LoadPolicyResponse`. - - However, the generator fails to add the necessary `import` statements - to the `apache_polaris/sdk/catalog/models/__init__.py` file. This means that even - though the model files exist (e.g., `load_policy_response.py`), the classes - are not part of the `apache_polaris.sdk.catalog.models` namespace. - - This method works around the bug in the generated code without modifying - the source files. It runs once per CLI execution, before any commands, and - manually injects the missing response-side model classes into the - `apache_polaris.sdk.catalog.models` namespace, allowing the deserializer to find them. - """ - import apache_polaris.sdk.catalog.models - from apache_polaris.sdk.catalog.models.applicable_policy import ApplicablePolicy - from apache_polaris.sdk.catalog.models.get_applicable_policies_response import GetApplicablePoliciesResponse - from apache_polaris.sdk.catalog.models.list_policies_response import ListPoliciesResponse - from apache_polaris.sdk.catalog.models.load_policy_response import LoadPolicyResponse - from apache_polaris.sdk.catalog.models.policy import Policy - from apache_polaris.sdk.catalog.models.policy_attachment_target import PolicyAttachmentTarget - from apache_polaris.sdk.catalog.models.policy_identifier import PolicyIdentifier - - models_to_patch = { - "ApplicablePolicy": ApplicablePolicy, - "GetApplicablePoliciesResponse": GetApplicablePoliciesResponse, - "ListPoliciesResponse": ListPoliciesResponse, - "LoadPolicyResponse": LoadPolicyResponse, - "Policy": Policy, - "PolicyAttachmentTarget": PolicyAttachmentTarget, - "PolicyIdentifier": PolicyIdentifier, - } - - for name, model_class in models_to_patch.items(): - setattr(apache_polaris.sdk.catalog.models, name, model_class) - @staticmethod def execute(args=None): - PolarisCli._patch_generated_models() options = Parser.parse(args) if options.command == Commands.PROFILES: from apache_polaris.cli.command import Command diff --git a/client/python/generate_clients.py b/client/python/generate_clients.py index 9aaa07fd87..ebfdc0538f 100644 --- a/client/python/generate_clients.py +++ b/client/python/generate_clients.py @@ -32,6 +32,7 @@ import logging import argparse import shutil +import ast # Paths CLIENT_DIR = Path(__file__).parent @@ -306,9 +307,53 @@ def build() -> None: generate_polaris_management_client() generate_polaris_catalog_client() generate_iceberg_catalog_client() + fix_catalog_models_init() prepend_licenses() +def fix_catalog_models_init() -> None: + """ + Regenerate the `apache_polaris.sdk.catalog.models.__init__.py` file by consolidating + imports for all model classes found under `apache_polaris/sdk/catalog/models`. + + This ensures that rerunning the OpenAPI Generator (which overwrites `__init__.py`) + does not cause missing imports for earlier generated model files. + """ + logger.info("Fixing catalog models __init__.py...") + models_dir = CLIENT_DIR / "apache_polaris" / "sdk" / "catalog" / "models" + init_py = models_dir / "__init__.py" + + # Get all python files in the models directory except __init__.py + model_files = [ + f for f in models_dir.glob("*.py") if f.is_file() and f.name != "__init__.py" + ] + + # Generate import statements + imports = [] + for model_file in sorted(model_files): + module_name = model_file.stem + with open(model_file, "r") as f: + tree = ast.parse(f.read()) + class_name = None + for node in ast.walk(tree): + if isinstance(node, ast.ClassDef): + # Find the first class that doesn't start with an underscore + if not node.name.startswith("_"): + class_name = node.name + break + if class_name: + imports.append( + f"from apache_polaris.sdk.catalog.models.{module_name} import {class_name}" + ) + else: + logger.warning(f"Could not find a suitable class in {model_file}") + + # Write the new __init__.py + with open(init_py, "w") as f: + f.write("\n".join(imports)) + logger.info("Catalog models __init__.py fixed.") + + def main(): parser = argparse.ArgumentParser(description="Generate Polaris Python clients.") parser.add_argument( From d1e393adaed0db9de66771aac35b8b55aeccd71b Mon Sep 17 00:00:00 2001 From: Yong Date: Sun, 9 Nov 2025 02:41:41 -0600 Subject: [PATCH 02/14] Client: remove monkey patching --- client/python/integration_tests/conftest.py | 50 --------------------- 1 file changed, 50 deletions(-) diff --git a/client/python/integration_tests/conftest.py b/client/python/integration_tests/conftest.py index d448fb50e1..12f72cbd60 100644 --- a/client/python/integration_tests/conftest.py +++ b/client/python/integration_tests/conftest.py @@ -375,53 +375,3 @@ def clear_namespace( def format_namespace(namespace: List[str]) -> str: return codecs.decode("1F", "hex").decode("UTF-8").join(namespace) - - -@pytest.fixture(scope="session", autouse=True) -def _patch_generated_models() -> None: - """ - The OpenAPI generator creates an `api_client` that dynamically looks up - model classes from the `apache_polaris.sdk.catalog.models` module using `getattr()`. - For example, when a response for a `create_policy` call is received, the - deserializer tries to find the `LoadPolicyResponse` class by looking for - `apache_polaris.sdk.catalog.models.LoadPolicyResponse`. - - However, the generator fails to add the necessary `import` statements - to the `apache_polaris/sdk/catalog/models/__init__.py` file. This means that even - though the model files exist (e.g., `load_policy_response.py`), the classes - are not part of the `apache_polaris.sdk.catalog.models` namespace. - - This fixture works around the bug in the generated code without modifying - the source files. It runs once per test session, before any tests, and - manually injects the missing response-side model classes into the - `apache_polaris.sdk.catalog.models` namespace, allowing the deserializer to find them. - """ - import apache_polaris.sdk.catalog.models - from apache_polaris.sdk.catalog.models.applicable_policy import ApplicablePolicy - from apache_polaris.sdk.catalog.models.get_applicable_policies_response import ( - GetApplicablePoliciesResponse, - ) - from apache_polaris.sdk.catalog.models.list_policies_response import ( - ListPoliciesResponse, - ) - from apache_polaris.sdk.catalog.models.load_policy_response import ( - LoadPolicyResponse, - ) - from apache_polaris.sdk.catalog.models.policy import Policy - from apache_polaris.sdk.catalog.models.policy_attachment_target import ( - PolicyAttachmentTarget, - ) - from apache_polaris.sdk.catalog.models.policy_identifier import PolicyIdentifier - - models_to_patch = { - "ApplicablePolicy": ApplicablePolicy, - "GetApplicablePoliciesResponse": GetApplicablePoliciesResponse, - "ListPoliciesResponse": ListPoliciesResponse, - "LoadPolicyResponse": LoadPolicyResponse, - "Policy": Policy, - "PolicyAttachmentTarget": PolicyAttachmentTarget, - "PolicyIdentifier": PolicyIdentifier, - } - - for name, model_class in models_to_patch.items(): - setattr(apache_polaris.sdk.catalog.models, name, model_class) From 9dc7378760a6b0e26fa1869d7bf71a59b951800f Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Sun, 9 Nov 2025 11:22:07 +0000 Subject: [PATCH 03/14] chore(deps): update dependency pre-commit to v4.4.0 (#3015) --- client/python/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/python/pyproject.toml b/client/python/pyproject.toml index f32a65bfe0..1bedb045d4 100644 --- a/client/python/pyproject.toml +++ b/client/python/pyproject.toml @@ -63,7 +63,7 @@ flake8 = ">= 4.0.0" types-python-dateutil = ">= 2.8.19.14" mypy = ">=1.18, <=1.18.2" pyiceberg = "==0.10.0" -pre-commit = "==4.3.0" +pre-commit = "==4.4.0" openapi-generator-cli = "==7.17.0" pip-licenses-cli = "==3.0.1" From 71750aceb12fea59162c55f0ac8ddbf87beec969 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 10 Nov 2025 05:27:40 +0000 Subject: [PATCH 04/14] fix(deps): update dependency software.amazon.awssdk:bom to v2.38.2 (#3019) --- gradle/libs.versions.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index 69f21a65f5..cd2d3d341a 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -44,7 +44,7 @@ antlr4-runtime = { module = "org.antlr:antlr4-runtime", version.strictly = "4.9. apache-httpclient5 = { module = "org.apache.httpcomponents.client5:httpclient5", version = "5.5.1" } assertj-core = { module = "org.assertj:assertj-core", version = "3.27.6" } auth0-jwt = { module = "com.auth0:java-jwt", version = "4.5.0" } -awssdk-bom = { module = "software.amazon.awssdk:bom", version = "2.37.3" } +awssdk-bom = { module = "software.amazon.awssdk:bom", version = "2.38.2" } awaitility = { module = "org.awaitility:awaitility", version = "4.3.0" } azuresdk-bom = { module = "com.azure:azure-sdk-bom", version = "1.3.2" } caffeine = { module = "com.github.ben-manes.caffeine:caffeine", version = "3.2.3" } From c29531eb0baad8014c924f0afc686e805c097610 Mon Sep 17 00:00:00 2001 From: Alexandre Dutra Date: Mon, 10 Nov 2025 13:03:42 +0100 Subject: [PATCH 05/14] Add test for TracingFilter (#2847) --- runtime/service/build.gradle.kts | 2 + .../service/tracing/TracingFilterTest.java | 117 ++++++++++++++++++ 2 files changed, 119 insertions(+) create mode 100644 runtime/service/src/test/java/org/apache/polaris/service/tracing/TracingFilterTest.java diff --git a/runtime/service/build.gradle.kts b/runtime/service/build.gradle.kts index af86c83774..c0f462982a 100644 --- a/runtime/service/build.gradle.kts +++ b/runtime/service/build.gradle.kts @@ -133,6 +133,8 @@ dependencies { testImplementation("io.quarkus:quarkus-rest-client-jackson") testImplementation("io.quarkus:quarkus-jdbc-h2") + testImplementation("io.opentelemetry:opentelemetry-sdk-testing") + testImplementation("io.rest-assured:rest-assured") testImplementation(platform(libs.testcontainers.bom)) diff --git a/runtime/service/src/test/java/org/apache/polaris/service/tracing/TracingFilterTest.java b/runtime/service/src/test/java/org/apache/polaris/service/tracing/TracingFilterTest.java new file mode 100644 index 0000000000..825d45cb5f --- /dev/null +++ b/runtime/service/src/test/java/org/apache/polaris/service/tracing/TracingFilterTest.java @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.polaris.service.tracing; + +import static io.opentelemetry.api.common.AttributeKey.stringKey; +import static io.restassured.RestAssured.given; +import static org.assertj.core.api.Assertions.assertThat; +import static org.awaitility.Awaitility.await; + +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.trace.SpanContext; +import io.opentelemetry.sdk.testing.exporter.InMemorySpanExporter; +import io.opentelemetry.sdk.trace.data.SpanData; +import io.quarkus.test.common.http.TestHTTPEndpoint; +import io.quarkus.test.junit.QuarkusTest; +import io.quarkus.test.junit.QuarkusTestProfile; +import io.quarkus.test.junit.TestProfile; +import io.restassured.http.ContentType; +import jakarta.enterprise.inject.Produces; +import jakarta.inject.Inject; +import jakarta.inject.Singleton; +import java.time.Duration; +import java.util.List; +import java.util.Map; +import org.apache.polaris.service.catalog.api.IcebergRestOAuth2Api; +import org.junit.jupiter.api.Test; + +@QuarkusTest +@TestProfile(TracingFilterTest.Profile.class) +@TestHTTPEndpoint(IcebergRestOAuth2Api.class) +public class TracingFilterTest { + + public static class Profile implements QuarkusTestProfile { + + @Produces + @Singleton + InMemorySpanExporter inMemorySpanExporter() { + return InMemorySpanExporter.create(); + } + + @Override + public Map getConfigOverrides() { + return Map.of("quarkus.otel.sdk.disabled", "false"); + } + } + + @Inject InMemorySpanExporter inMemorySpanExporter; + + @Test + void testW3CTraceContextPropagation() { + + // Emulate an incoming request with a W3C trace context + // Example taken from: + // https://www.w3.org/TR/trace-context/#traceparent-header-field-values + String traceId = "4bf92f3577b34da6a3ce929d0e0e4736"; + String spanId = "00f067aa0ba902b7"; + String traceparent = "00-" + traceId + "-" + spanId + "-01"; + String rojoState = spanId; + String congoState = "t61rcWkgMzE"; + String tracestate = "rojo=" + rojoState + ",congo=" + congoState; + + given() + .contentType(ContentType.URLENC) + .formParam("grant_type", "client_credentials") + .formParam("scope", "PRINCIPAL_ROLE:ALL") + .formParam("client_id", "test-admin") + .formParam("client_secret", "test-secret") + // W3C headers + .header("traceparent", traceparent) + .header("tracestate", tracestate) + // Polaris request ID + .header("X-Request-ID", "12345") + .when() + .post() + .then() + .statusCode(200) + .header("X-Request-ID", "12345"); + + List spans = + await() + .atMost(Duration.ofSeconds(30)) + .until(inMemorySpanExporter::getFinishedSpanItems, sp -> !sp.isEmpty()); + + SpanData span = spans.getFirst(); + + Map, Object> attributes = span.getAttributes().asMap(); + assertThat(attributes) + .containsEntry(stringKey(TracingFilter.REALM_ID_ATTRIBUTE), "POLARIS") + .containsEntry(stringKey(TracingFilter.REQUEST_ID_ATTRIBUTE), "12345"); + + SpanContext parent = span.getParentSpanContext(); + assertThat(parent.getTraceId()).isEqualTo(traceId); + assertThat(parent.getSpanId()).isEqualTo(spanId); + assertThat(parent.isRemote()).isTrue(); + assertThat(parent.getTraceFlags().asByte()).isEqualTo((byte) 1); + assertThat(parent.getTraceState().asMap()) + .containsEntry("rojo", rojoState) + .containsEntry("congo", congoState); + } +} From 7ca5511c36c05caff11ff73dd86068c861331ccf Mon Sep 17 00:00:00 2001 From: Robert Stupp Date: Mon, 10 Nov 2025 16:48:41 +0100 Subject: [PATCH 06/14] NoSQL: Add (micro-ish) benchmarks (#3006) A project for JMH based benchmarks against NoSQL persistence. --- bom/build.gradle.kts | 1 + gradle/projects.main.properties | 1 + .../nosql/persistence/benchmark/NOTES.md | 41 +++++ .../persistence/benchmark/build.gradle.kts | 62 +++++++ .../nosql/benchmark/BaseParam.java | 80 +++++++++ .../nosql/benchmark/CommitBenchmark.java | 156 ++++++++++++++++++ .../nosql/benchmark/SimpleBenchmark.java | 128 ++++++++++++++ .../nosql/benchmark/ImmutableObj.java | 56 +++++++ .../nosql/benchmark/SimpleCommitTestObj.java | 49 ++++++ ....polaris.persistence.nosql.api.obj.ObjType | 21 +++ .../src/test/resources/logback-test.xml | 30 ++++ 11 files changed, 625 insertions(+) create mode 100644 persistence/nosql/persistence/benchmark/NOTES.md create mode 100644 persistence/nosql/persistence/benchmark/build.gradle.kts create mode 100644 persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/BaseParam.java create mode 100644 persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/CommitBenchmark.java create mode 100644 persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/SimpleBenchmark.java create mode 100644 persistence/nosql/persistence/benchmark/src/main/java/org/apache/polaris/persistence/nosql/benchmark/ImmutableObj.java create mode 100644 persistence/nosql/persistence/benchmark/src/main/java/org/apache/polaris/persistence/nosql/benchmark/SimpleCommitTestObj.java create mode 100644 persistence/nosql/persistence/benchmark/src/main/resources/META-INF/services/org.apache.polaris.persistence.nosql.api.obj.ObjType create mode 100644 persistence/nosql/persistence/benchmark/src/test/resources/logback-test.xml diff --git a/bom/build.gradle.kts b/bom/build.gradle.kts index d1e6aeab18..d590052228 100644 --- a/bom/build.gradle.kts +++ b/bom/build.gradle.kts @@ -50,6 +50,7 @@ dependencies { api(project(":polaris-persistence-nosql-api")) api(project(":polaris-persistence-nosql-impl")) + api(project(":polaris-persistence-nosql-benchmark")) api(project(":polaris-persistence-nosql-standalone")) api(project(":polaris-persistence-nosql-testextension")) diff --git a/gradle/projects.main.properties b/gradle/projects.main.properties index 3727f6eb65..7c4fd61e53 100644 --- a/gradle/projects.main.properties +++ b/gradle/projects.main.properties @@ -65,6 +65,7 @@ polaris-nodes-spi=persistence/nosql/nodes/spi # persistence / database agnostic polaris-persistence-nosql-api=persistence/nosql/persistence/api polaris-persistence-nosql-impl=persistence/nosql/persistence/impl +polaris-persistence-nosql-benchmark=persistence/nosql/persistence/benchmark polaris-persistence-nosql-standalone=persistence/nosql/persistence/standalone polaris-persistence-nosql-testextension=persistence/nosql/persistence/testextension polaris-persistence-nosql-varint=persistence/nosql/persistence/varint diff --git a/persistence/nosql/persistence/benchmark/NOTES.md b/persistence/nosql/persistence/benchmark/NOTES.md new file mode 100644 index 0000000000..ef8bc95fea --- /dev/null +++ b/persistence/nosql/persistence/benchmark/NOTES.md @@ -0,0 +1,41 @@ + + +Some container run commands... + +```bash +podman run --rm -ti \ + --name demo_mongo \ + -p 27017:27017 \ + docker.io/library/mongo:8.0.5 +``` + +```bash +./gradlew :polaris-persistence-nosql-benchmark:jmhJar && java \ + -Dpolaris.persistence.backend.type=InMemory \ + -jar persistence/benchmark/build/libs/polaris-persistence-nosql-benchmark-1.0.0-incubating-SNAPSHOT-jmh.jar +``` + +```bash +./gradlew :polaris-persistence-nosql-benchmark:jmhJar && java \ + -Dpolaris.persistence.backend.type=MongoDb \ + -Dpolaris.persistence.backend.mongodb.connection-string=mongodb://localhost:27017/ \ + -Dpolaris.persistence.backend.mongodb.database-name=test \ + -jar persistence/benchmark/build/libs/polaris-persistence-nosql-benchmark-1.0.0-incubating-SNAPSHOT-jmh.jar +``` diff --git a/persistence/nosql/persistence/benchmark/build.gradle.kts b/persistence/nosql/persistence/benchmark/build.gradle.kts new file mode 100644 index 0000000000..0eccfe4d81 --- /dev/null +++ b/persistence/nosql/persistence/benchmark/build.gradle.kts @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar + +plugins { + id("polaris-server") + id("com.gradleup.shadow") + alias(libs.plugins.jmh) +} + +description = "Polaris NoSQL persistence benchmarks, no production code" + +dependencies { + implementation(project(":polaris-persistence-nosql-api")) + implementation(project(":polaris-persistence-nosql-impl")) + implementation(project(":polaris-persistence-nosql-standalone")) + implementation(project(":polaris-idgen-api")) + implementation(project(":polaris-idgen-impl")) + implementation(project(":polaris-idgen-spi")) + + compileOnly(project(":polaris-immutables")) + annotationProcessor(project(":polaris-immutables", configuration = "processor")) + + compileOnly(platform(libs.jackson.bom)) + compileOnly("com.fasterxml.jackson.core:jackson-annotations") + compileOnly("com.fasterxml.jackson.core:jackson-databind") + + compileOnly(libs.jakarta.annotation.api) + compileOnly(libs.jakarta.validation.api) + + jmhImplementation(libs.jmh.core) + jmhAnnotationProcessor(libs.jmh.generator.annprocess) + + jmhRuntimeOnly(project(":polaris-persistence-nosql-inmemory")) + jmhRuntimeOnly(testFixtures(project(":polaris-persistence-nosql-inmemory"))) + + jmhRuntimeOnly(project(":polaris-persistence-nosql-mongodb")) + jmhRuntimeOnly(testFixtures(project(":polaris-persistence-nosql-mongodb"))) +} + +tasks.named("jmhJar").configure { + outputs.cacheIf { false } // do not cache uber/shaded jars + mergeServiceFiles() + isZip64 = true +} diff --git a/persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/BaseParam.java b/persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/BaseParam.java new file mode 100644 index 0000000000..a8edd60a19 --- /dev/null +++ b/persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/BaseParam.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.benchmark; + +import static java.util.function.Function.identity; + +import java.util.Map; +import org.apache.polaris.ids.api.MonotonicClock; +import org.apache.polaris.ids.impl.MonotonicClockImpl; +import org.apache.polaris.ids.impl.SnowflakeIdGeneratorFactory; +import org.apache.polaris.ids.spi.IdGeneratorSource; +import org.apache.polaris.persistence.nosql.api.Persistence; +import org.apache.polaris.persistence.nosql.api.PersistenceParams; +import org.apache.polaris.persistence.nosql.api.backend.Backend; +import org.apache.polaris.persistence.nosql.standalone.PersistenceConfigurer; + +class BaseParam { + Backend backend; + Persistence persistence; + MonotonicClock clock; + + void setupPersistence() { + var configurer = PersistenceConfigurer.defaultBackendConfigurer(); + var factory = configurer.buildBackendFactory(); + this.clock = MonotonicClockImpl.newDefaultInstance(); + this.backend = configurer.buildBackendFromConfiguration(factory); + var info = backend.setupSchema().orElse(""); + System.out.printf("Opened new persistence backend '%s' %s%n", backend.type(), info); + + var idGenerator = + new SnowflakeIdGeneratorFactory() + .buildIdGenerator( + Map.of(), + new IdGeneratorSource() { + @Override + public int nodeId() { + return 42; + } + + @Override + public long currentTimeMillis() { + return clock.currentTimeMillis(); + } + }); + this.persistence = + backend.newPersistence( + identity(), + PersistenceParams.BuildablePersistenceParams.builder().build(), + "42", + clock, + idGenerator); + + // TODO allow caching + } + + void shutdownPersistence() throws Exception { + if (clock != null) { + clock.close(); + } + if (backend != null) { + backend.close(); + } + } +} diff --git a/persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/CommitBenchmark.java b/persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/CommitBenchmark.java new file mode 100644 index 0000000000..7fe5a6191e --- /dev/null +++ b/persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/CommitBenchmark.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.benchmark; + +import static java.util.concurrent.TimeUnit.MICROSECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import java.util.Optional; +import org.apache.polaris.persistence.nosql.api.commit.RetryConfig; +import org.apache.polaris.persistence.nosql.api.commit.RetryTimeoutException; +import org.apache.polaris.persistence.nosql.impl.commits.CommitterWithStats; +import org.apache.polaris.persistence.nosql.impl.commits.retry.RetryStatsConsumer; +import org.openjdk.jmh.annotations.AuxCounters; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Warmup; + +@Warmup(iterations = 4, time = 1000, timeUnit = MILLISECONDS) +@Measurement(iterations = 10, time = 10_000, timeUnit = MILLISECONDS) +@Fork(1) +@Threads(4) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(MICROSECONDS) +public class CommitBenchmark { + @State(Scope.Benchmark) + public static class BenchmarkParam extends BaseParam { + + RetryConfig retryConfig; + + String refName; + CommitterWithStats committer; + + @Setup + public void init() { + setupPersistence(); + + refName = "commit-bench-" + System.currentTimeMillis(); + persistence.createReference(refName, Optional.empty()); + + committer = + (CommitterWithStats) + persistence.createCommitter(refName, SimpleCommitTestObj.class, String.class); + } + + @TearDown + public void tearDown() throws Exception { + shutdownPersistence(); + } + } + + @State(Scope.Thread) + @AuxCounters(AuxCounters.Type.EVENTS) + public static class ThreadParam implements RetryStatsConsumer { + public int timeouts; + public int success; + public int retries; + public long retrySleepMillis; + + String refName; + CommitterWithStats committer; + + @Setup(Level.Iteration) + public void clean() { + timeouts = 0; + success = 0; + } + + @Setup + public void createBranch(BenchmarkParam param) { + refName = + "commit-bench-thread-" + + System.currentTimeMillis() + + "-" + + Thread.currentThread().threadId(); + param.persistence.createReference(refName, Optional.empty()); + committer = + (CommitterWithStats) + param.persistence.createCommitter(refName, SimpleCommitTestObj.class, String.class); + } + + @Override + public void retryLoopFinished( + Result result, int retries, long sleepTimeMillis, long totalDurationNanos) { + switch (result) { + case SUCCESS: + success++; + break; + case TIMEOUT: + timeouts++; + break; + case CONFLICT: + case ERROR: + break; + } + this.retries += retries; + this.retrySleepMillis += sleepTimeMillis; + } + } + + @Benchmark + public Optional commitSingleRef(BenchmarkParam benchParam, ThreadParam threadParam) + throws Exception { + try { + return benchParam.committer.commit( + (state, refObjSupplier) -> { + var refObj = refObjSupplier.get(); + return state.commitResult( + "fooo", ImmutableSimpleCommitTestObj.builder().payload("some payload"), refObj); + }, + threadParam); + } catch (RetryTimeoutException e) { + return null; + } + } + + @Benchmark + public Optional commitDistinctRefs(ThreadParam threadParam) throws Exception { + try { + return threadParam.committer.commit( + (state, refObjSupplier) -> { + var refObj = refObjSupplier.get(); + return state.commitResult( + "foo", ImmutableSimpleCommitTestObj.builder().payload("some payload"), refObj); + }, + threadParam); + } catch (RetryTimeoutException e) { + return Optional.empty(); + } + } +} diff --git a/persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/SimpleBenchmark.java b/persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/SimpleBenchmark.java new file mode 100644 index 0000000000..17beae800d --- /dev/null +++ b/persistence/nosql/persistence/benchmark/src/jmh/java/org/apache/polaris/persistence/nosql/benchmark/SimpleBenchmark.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.benchmark; + +import static java.util.concurrent.TimeUnit.MICROSECONDS; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +import java.util.Arrays; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.IntStream; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.TearDown; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.annotations.Warmup; + +@Warmup(iterations = 4, time = 1000, timeUnit = MILLISECONDS) +@Measurement(iterations = 20, time = 1000, timeUnit = MILLISECONDS) +@Fork(1) +@Threads(4) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(MICROSECONDS) +public class SimpleBenchmark { + @State(Scope.Benchmark) + public static class BenchmarkParam extends BaseParam { + private String payload; + + @Param({"1", "1000", "10000", "100000"}) + public int payloadLength; + + private long[] reusedIds; + private final AtomicInteger reusedIdIndex = new AtomicInteger(); + + @Setup + public void init() { + setupPersistence(); + + this.payload = "x".repeat(payloadLength); + + // populate the rows for 'reusedIds' + this.reusedIds = IntStream.range(0, 20).mapToLong(x -> persistence.generateId()).toArray(); + persistence.writeMany( + ImmutableObj.class, + Arrays.stream(reusedIds) + .mapToObj(id -> ImmutableImmutableObj.builder().id(id).build()) + .toArray(ImmutableObj[]::new)); + } + + @TearDown + public void tearDown() throws Exception { + shutdownPersistence(); + } + } + + @Benchmark + public ImmutableObj singleIdConditionalInsert(BenchmarkParam param) { + return param.persistence.conditionalInsert( + ImmutableImmutableObj.builder() + .id(param.persistence.generateId()) + .value(param.payload) + .build(), + ImmutableObj.class); + } + + @Benchmark + public ImmutableObj singleIdWriteNoConflict(BenchmarkParam param) { + return param.persistence.write( + ImmutableImmutableObj.builder() + .id(param.persistence.generateId()) + .value(param.payload) + .build(), + ImmutableObj.class); + } + + @Benchmark + public ImmutableObj singleIdWriteExisting(BenchmarkParam param) { + var id = param.reusedIds[(param.reusedIdIndex.incrementAndGet() % param.reusedIds.length)]; + return param.persistence.write( + ImmutableImmutableObj.builder().id(id).value(param.payload).build(), ImmutableObj.class); + } + + @Benchmark + public ImmutableObj[] manyIdWriteExisting(BenchmarkParam param) { + return param.persistence.writeMany( + ImmutableObj.class, + Arrays.stream(param.reusedIds) + .mapToObj(id -> ImmutableImmutableObj.builder().id(id).value(param.payload).build()) + .toArray(ImmutableObj[]::new)); + } + + @Benchmark + public ImmutableObj[] manyIdWriteNoConflict(BenchmarkParam param) { + return param.persistence.writeMany( + ImmutableObj.class, + IntStream.range(0, 20) + .mapToObj( + x -> + ImmutableImmutableObj.builder() + .id(param.persistence.generateId()) + .value(param.payload) + .build()) + .toArray(ImmutableObj[]::new)); + } +} diff --git a/persistence/nosql/persistence/benchmark/src/main/java/org/apache/polaris/persistence/nosql/benchmark/ImmutableObj.java b/persistence/nosql/persistence/benchmark/src/main/java/org/apache/polaris/persistence/nosql/benchmark/ImmutableObj.java new file mode 100644 index 0000000000..4220526ca4 --- /dev/null +++ b/persistence/nosql/persistence/benchmark/src/main/java/org/apache/polaris/persistence/nosql/benchmark/ImmutableObj.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.benchmark; + +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import org.apache.polaris.immutables.PolarisImmutable; +import org.apache.polaris.persistence.nosql.api.obj.AbstractObjType; +import org.apache.polaris.persistence.nosql.api.obj.Obj; +import org.apache.polaris.persistence.nosql.api.obj.ObjType; +import org.immutables.value.Value; + +@PolarisImmutable +@JsonSerialize(as = ImmutableImmutableObj.class) +@JsonDeserialize(as = ImmutableImmutableObj.class) +public interface ImmutableObj extends Obj { + ObjType TYPE = new ImmutableObjType(); + + @Override + default ObjType type() { + return TYPE; + } + + @Value.Default + default String value() { + return "foo"; + } + + @Override + @Value.Default + default String versionToken() { + return "vt"; + } + + final class ImmutableObjType extends AbstractObjType { + public ImmutableObjType() { + super("jmh-imm", "immutable", ImmutableObj.class); + } + } +} diff --git a/persistence/nosql/persistence/benchmark/src/main/java/org/apache/polaris/persistence/nosql/benchmark/SimpleCommitTestObj.java b/persistence/nosql/persistence/benchmark/src/main/java/org/apache/polaris/persistence/nosql/benchmark/SimpleCommitTestObj.java new file mode 100644 index 0000000000..d88d1b5cb3 --- /dev/null +++ b/persistence/nosql/persistence/benchmark/src/main/java/org/apache/polaris/persistence/nosql/benchmark/SimpleCommitTestObj.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.benchmark; + +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import org.apache.polaris.immutables.PolarisImmutable; +import org.apache.polaris.persistence.nosql.api.obj.AbstractObjType; +import org.apache.polaris.persistence.nosql.api.obj.BaseCommitObj; +import org.apache.polaris.persistence.nosql.api.obj.ObjType; + +/** A concrete object */ +@PolarisImmutable +@JsonSerialize(as = ImmutableSimpleCommitTestObj.class) +@JsonDeserialize(as = ImmutableSimpleCommitTestObj.class) +public interface SimpleCommitTestObj extends BaseCommitObj { + ObjType TYPE = new SimpleCommitTestObjType(); + + @Override + default ObjType type() { + return TYPE; + } + + String payload(); + + final class SimpleCommitTestObjType extends AbstractObjType { + public SimpleCommitTestObjType() { + super("test-s-c", "simple commit", SimpleCommitTestObj.class); + } + } + + interface Builder extends BaseCommitObj.Builder {} +} diff --git a/persistence/nosql/persistence/benchmark/src/main/resources/META-INF/services/org.apache.polaris.persistence.nosql.api.obj.ObjType b/persistence/nosql/persistence/benchmark/src/main/resources/META-INF/services/org.apache.polaris.persistence.nosql.api.obj.ObjType new file mode 100644 index 0000000000..f42259ffeb --- /dev/null +++ b/persistence/nosql/persistence/benchmark/src/main/resources/META-INF/services/org.apache.polaris.persistence.nosql.api.obj.ObjType @@ -0,0 +1,21 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +org.apache.polaris.persistence.nosql.benchmark.SimpleCommitTestObj$SimpleCommitTestObjType +org.apache.polaris.persistence.nosql.benchmark.ImmutableObj$ImmutableObjType diff --git a/persistence/nosql/persistence/benchmark/src/test/resources/logback-test.xml b/persistence/nosql/persistence/benchmark/src/test/resources/logback-test.xml new file mode 100644 index 0000000000..fb74fc2c54 --- /dev/null +++ b/persistence/nosql/persistence/benchmark/src/test/resources/logback-test.xml @@ -0,0 +1,30 @@ + + + + + + + %date{ISO8601} [%thread] %-5level %logger{36} - %msg%n + + + + + + From 17b75ded5525a1f07ac9033d5264ce53a86b0729 Mon Sep 17 00:00:00 2001 From: cccs-cat001 <56204545+cccs-cat001@users.noreply.github.com> Date: Mon, 10 Nov 2025 13:16:30 -0500 Subject: [PATCH 07/14] Helm chart: include configmap checksum in deployment annotations (#3023) --- CHANGELOG.md | 1 + helm/polaris/templates/deployment.yaml | 5 +- helm/polaris/tests/deployment_test.yaml | 87 +++++++++++++++++++++++-- 3 files changed, 87 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e6fb7e78d..493e0c60db 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,6 +54,7 @@ request adding CHANGELOG notes for breaking (!) changes and possibly other secti - `client.region` is no longer considered a "credential" property (related to Iceberg REST Catalog API). - Relaxed the requirements for S3 storage's ARN to allow Polaris to connect to more non-AWS S3 storage appliances. +- Added checksum to helm deployment so that it will restart when the configmap has changed. ### Deprecations diff --git a/helm/polaris/templates/deployment.yaml b/helm/polaris/templates/deployment.yaml index a7cec81a48..9ee0a18924 100644 --- a/helm/polaris/templates/deployment.yaml +++ b/helm/polaris/templates/deployment.yaml @@ -39,10 +39,11 @@ spec: {{- include "polaris.selectorLabels" . | nindent 6 }} template: metadata: - {{- if .Values.podAnnotations }} annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- if .Values.podAnnotations }} {{- tpl (toYaml .Values.podAnnotations) . | nindent 8 }} - {{- end }} + {{- end }} labels: {{- include "polaris.selectorLabels" . | nindent 8 }} {{- if .Values.podLabels }} diff --git a/helm/polaris/tests/deployment_test.yaml b/helm/polaris/tests/deployment_test.yaml index d80b72b649..df16d0a153 100644 --- a/helm/polaris/tests/deployment_test.yaml +++ b/helm/polaris/tests/deployment_test.yaml @@ -27,16 +27,19 @@ release: templates: - deployment.yaml + - configmap.yaml tests: # metadata.name - it: should set deployment name + template: deployment.yaml asserts: - equal: path: metadata.name value: polaris-release - it: should set deployment name with override + template: deployment.yaml set: nameOverride: polaris-override asserts: @@ -44,6 +47,7 @@ tests: path: metadata.name value: polaris-release-polaris-override - it: should set deployment name with full override + template: deployment.yaml set: fullnameOverride: polaris-override asserts: @@ -53,6 +57,7 @@ tests: # metadata.namespace - it: should set deployment namespace + template: deployment.yaml asserts: - equal: path: metadata.namespace @@ -60,6 +65,7 @@ tests: # metadata.labels - it: should set deployment default labels + template: deployment.yaml asserts: - isSubset: path: metadata.labels @@ -70,6 +76,7 @@ tests: app.kubernetes.io/managed-by: Helm helm.sh/chart: polaris-1.2.3 - it: should set podLabels in deployment labels + template: deployment.yaml set: podLabels: app.kubernetes.io/component: polaris @@ -81,11 +88,13 @@ tests: # spec.replicas - it: should set default replicas + template: deployment.yaml asserts: - equal: path: spec.replicas value: 1 - it: should set replicas + template: deployment.yaml set: replicaCount: 3 asserts: @@ -93,6 +102,7 @@ tests: path: spec.replicas value: 3 - it: should not set replicas if autoscaling is enabled + template: deployment.yaml set: replicaCount: 3 autoscaling: @@ -103,22 +113,26 @@ tests: # spec.revisionHistoryLimit - it: should not set revisionHistoryLimit by default with null + template: deployment.yaml asserts: - notExists: path: spec.revisionHistoryLimit - it: should not set revisionHistoryLimit with quote empty string + template: deployment.yaml set: revisionHistoryLimit: "" asserts: - notExists: path: spec.revisionHistoryLimit - it: should not set revisionHistoryLimit with empty string + template: deployment.yaml set: revisionHistoryLimit: asserts: - notExists: path: spec.revisionHistoryLimit - it: should set revisionHistoryLimit + template: deployment.yaml set: revisionHistoryLimit: 1 asserts: @@ -126,6 +140,7 @@ tests: path: spec.revisionHistoryLimit value: 1 - it: should set revisionHistoryLimit (disabled revision history) + template: deployment.yaml set: revisionHistoryLimit: 0 asserts: @@ -135,6 +150,7 @@ tests: # spec.selector.matchLabels + spec.template.metadata.labels - it: should set deployment selector labels + template: deployment.yaml asserts: - isSubset: path: spec.selector.matchLabels @@ -147,6 +163,7 @@ tests: app.kubernetes.io/name: polaris app.kubernetes.io/instance: polaris-release - it: should include podLabels in spec.template.metadata.labels only + template: deployment.yaml set: podLabels: app.kubernetes.io/component: polaris @@ -161,11 +178,16 @@ tests: app.kubernetes.io/component: polaris # spec.template.metadata.annotations - - it: should not set pod annotations by default - asserts: - - notExists: - path: spec.template.metadata.annotations + - it: should only set checksum annotation by default + template: deployment.yaml + asserts: + - exists: + path: spec.template.metadata.annotations.checksum/config + - matchRegex: + path: spec.template.metadata.annotations.checksum/config + pattern: "^[a-f0-9]{64}$" - it: should set pod annotations + template: deployment.yaml set: podAnnotations: foo: bar @@ -177,10 +199,12 @@ tests: # spec.template.spec.imagePullSecrets - it: should not set imagePullSecrets by default + template: deployment.yaml asserts: - notExists: path: spec.template.spec.imagePullSecrets - it: should set imagePullSecrets + template: deployment.yaml set: imagePullSecrets: - test-secret @@ -192,11 +216,13 @@ tests: # spec.template.spec.serviceAccountName - it: should set default service account name + template: deployment.yaml asserts: - equal: path: spec.template.spec.serviceAccountName value: polaris-release - it: should set service account name when serviceAccount.create is true + template: deployment.yaml set: serviceAccount: create: true @@ -205,6 +231,7 @@ tests: path: spec.template.spec.serviceAccountName value: polaris-release - it: should set custom service account name when serviceAccount.create is true + template: deployment.yaml set: serviceAccount: create: true @@ -214,6 +241,7 @@ tests: path: spec.template.spec.serviceAccountName value: polaris-sa - it: should set service account name to default when serviceAccount.create is false + template: deployment.yaml set: serviceAccount: create: false @@ -222,6 +250,7 @@ tests: path: spec.template.spec.serviceAccountName value: default - it: should set custom service account name when serviceAccount.create is false + template: deployment.yaml set: serviceAccount: create: false @@ -233,6 +262,7 @@ tests: # spec.template.spec.securityContext - it: should set securityContext by default + template: deployment.yaml asserts: - isSubset: path: spec.template.spec.securityContext @@ -241,6 +271,7 @@ tests: seccompProfile: type: RuntimeDefault - it: should set custom securityContext + template: deployment.yaml set: podSecurityContext: fsGroup: 1234 @@ -252,6 +283,7 @@ tests: # spec.template.spec.containers - it: should set container name + template: deployment.yaml asserts: - equal: path: spec.template.spec.containers[0].name @@ -259,6 +291,7 @@ tests: # spec.template.spec.containers[0].securityContext - it: should set container securityContext by default + template: deployment.yaml asserts: - isSubset: path: spec.template.spec.containers[0].securityContext @@ -271,6 +304,7 @@ tests: seccompProfile: type: RuntimeDefault - it: should set custom container securityContext + template: deployment.yaml set: containerSecurityContext: allowPrivilegeEscalation: true @@ -286,6 +320,7 @@ tests: # spec.template.spec.containers[0].image - it: should set container image + template: deployment.yaml set: image: repository: test-repo @@ -295,6 +330,7 @@ tests: path: spec.template.spec.containers[0].image value: test-repo:test-tag - it: should set container image with template + template: deployment.yaml set: image: repository: test-repo-{{ .Chart.Version }} @@ -304,6 +340,7 @@ tests: path: spec.template.spec.containers[0].image value: test-repo-1.2.3:test-tag-polaris-release - it: should set container image with chart version if no tag provided + template: deployment.yaml set: image: repository: test-repo @@ -315,6 +352,7 @@ tests: # spec.template.spec.containers[0].imagePullPolicy - it: should set container pull policy + template: deployment.yaml set: image: pullPolicy: Always @@ -325,10 +363,12 @@ tests: # spec.template.spec.containers[0].env - it: should not set container env by default + template: deployment.yaml asserts: - notExists: path: spec.template.spec.containers[0].env - it: should set container env + template: deployment.yaml set: extraEnv: - name: foo @@ -342,6 +382,7 @@ tests: # spec.template.spec.containers[0].volumeMounts + spec.template.spec.volumes - it: should not set persistence volumes by default + template: deployment.yaml asserts: - lengthEqual: path: spec.template.spec.volumes @@ -358,6 +399,7 @@ tests: # spec.template.spec.containers[0].ports - it: should set container ports by default + template: deployment.yaml asserts: - lengthEqual: path: spec.template.spec.containers[0].ports @@ -376,6 +418,7 @@ tests: protocol: TCP - it: should set custom container ports + template: deployment.yaml set: service: ports: @@ -415,6 +458,7 @@ tests: protocol: TCP - it: should use targetPort if defined + template: deployment.yaml set: service: ports: @@ -456,6 +500,7 @@ tests: protocol: TCP - it: should fail if port name is not unique (#1) + template: deployment.yaml set: service: ports: @@ -468,6 +513,7 @@ tests: errorPattern: "service.ports\\[\\d\\]: port name already taken: polaris-http" - it: should fail if port name is not unique (#2) + template: deployment.yaml set: managementService: ports: @@ -480,6 +526,7 @@ tests: errorPattern: "managementService.ports\\[\\d\\]: port name already taken: polaris-mgmt" - it: should fail if port name is not unique (#3) + template: deployment.yaml set: service: ports: @@ -494,6 +541,7 @@ tests: errorPattern: "managementService.ports\\[\\d\\]: port name already taken: polaris" - it: should not fail when extra service references the same port name and number + template: deployment.yaml set: extraServices: - nameSuffix: "-extra" @@ -519,6 +567,7 @@ tests: protocol: TCP - it: should fail when extra service references the same port name with different number (#1) + template: deployment.yaml set: extraServices: - nameSuffix: "-extra" @@ -531,6 +580,7 @@ tests: errorPattern: "extraServices\\[\\d\\].ports\\[\\d\\]: wrong port number for port polaris-http, expected 8181, got 9999" - it: should fail when extra service references the same port name with different number (#2) + template: deployment.yaml set: extraServices: - nameSuffix: "-extra" @@ -543,6 +593,7 @@ tests: errorPattern: "extraServices\\[\\d\\].ports\\[\\d\\]: wrong port number for port polaris-mgmt, expected 8182, got 9999" - it: should fail when extra service references the same port name with different number (#3) + template: deployment.yaml set: service: ports: @@ -561,6 +612,7 @@ tests: errorPattern: "extraServices\\[\\d\\].ports\\[\\d\\]: wrong port number for port polaris-https, expected 8043, got 9999" - it: should fail when extra service references the same port name with different protocol + template: deployment.yaml set: service: ports: @@ -578,6 +630,7 @@ tests: errorPattern: "extraServices\\[\\d\\].ports\\[\\d\\]: wrong protocol for port polaris-http, expected TCP, got UDP" - it: should create 2 ports with same number + template: deployment.yaml set: service: ports: @@ -602,6 +655,7 @@ tests: containerPort: 8181 protocol: TCP - it: should create 2 ports with same number using targetPort + template: deployment.yaml set: service: ports: @@ -628,6 +682,7 @@ tests: containerPort: 8181 protocol: TCP - it: should set port protocols + template: deployment.yaml set: service: ports: @@ -671,6 +726,7 @@ tests: # spec.template.spec.containers[0].livenessProbe - it: should set container livenessProbe by default + template: deployment.yaml set: managementService: ports: @@ -700,6 +756,7 @@ tests: # spec.template.spec.containers[0].readinessProbe - it: should set container readinessProbe by default + template: deployment.yaml set: managementService: ports: @@ -727,10 +784,12 @@ tests: # spec.template.spec.containers[0].resources - it: should not set container resources by default + template: deployment.yaml asserts: - notExists: path: spec.template.spec.containers[0].resources - it: should set container resources + template: deployment.yaml set: resources: requests: @@ -752,10 +811,12 @@ tests: # spec.template.spec.nodeSelector - it: should not set nodeSelector by default + template: deployment.yaml asserts: - notExists: path: spec.template.spec.nodeSelector - it: should set nodeSelector + template: deployment.yaml set: nodeSelector: disktype: ssd @@ -767,10 +828,12 @@ tests: # spec.template.spec.affinity - it: should not set affinity by default + template: deployment.yaml asserts: - notExists: path: spec.template.spec.affinity - it: should set affinity + template: deployment.yaml set: affinity: nodeAffinity: @@ -798,10 +861,12 @@ tests: # spec.template.spec.tolerations - it: should not set tolerations by default + template: deployment.yaml asserts: - notExists: path: spec.template.spec.tolerations - it: should set tolerations + template: deployment.yaml set: tolerations: - key: "key" @@ -818,6 +883,7 @@ tests: effect: "NoSchedule" - it: should set storage credentials + template: deployment.yaml set: storage: secret: @@ -852,6 +918,7 @@ tests: key: gcpToken - it: should set extra env + template: deployment.yaml set: extraEnv: - name: foo @@ -877,6 +944,7 @@ tests: key: key - it: should set extra env + storage credentials + template: deployment.yaml set: storage: secret: @@ -901,11 +969,13 @@ tests: key: awsAccessKeyId - it: should not set any environment variables by default + template: deployment.yaml asserts: - notExists: path: spec.template.spec.containers[0].env - it: should configure config volume + template: deployment.yaml set: image.configDir: /config/dir asserts: @@ -928,6 +998,7 @@ tests: path: application.properties - it: should configure config volume with token broker secret (rsa-key-pair - deprecation) + template: deployment.yaml set: image.configDir: /config/dir authentication: @@ -970,6 +1041,7 @@ tests: path: private.pem - it: should configure config volume with token broker secret (symmetric-key - deprecation) + template: deployment.yaml set: image.configDir: /config/dir authentication: @@ -1010,6 +1082,7 @@ tests: path: symmetric.key - it: should configure config volume with token broker secret (rsa-key-pair) + template: deployment.yaml set: image.configDir: /config/dir authentication: @@ -1052,6 +1125,7 @@ tests: path: private.pem - it: should configure config volume with token broker secret (symmetric-key) + template: deployment.yaml set: image.configDir: /config/dir authentication: @@ -1092,6 +1166,7 @@ tests: path: symmetric.key - it: should configure config volume with authentication including per-realm overrides + template: deployment.yaml set: image.configDir: /config/dir authentication: @@ -1144,6 +1219,7 @@ tests: path: REALM+2/private.pem - it: should set relational-jdbc persistence environment variables + template: deployment.yaml set: persistence: { type: "relational-jdbc", relationalJdbc: { secret: { name: "polaris-persistence", username: "username", password: "password", jdbcUrl: "jdbcUrl" } } } asserts: @@ -1173,6 +1249,7 @@ tests: key: jdbcUrl - it: should configure volume for file logging + template: deployment.yaml set: logging.file.enabled: true logging.file.logsDir: /custom/logs @@ -1191,6 +1268,7 @@ tests: claimName: polaris-release-logs - it: should include extra volumes and volume mounts + template: deployment.yaml set: extraVolumes: - name: extra-volume @@ -1213,6 +1291,7 @@ tests: emptyDir: {} - it: should set OIDC client secret + template: deployment.yaml set: oidc: { client: { secret: { name: polaris-oidc-secret, key: client-secret } } } asserts: From ce42f914ba6a19afc4e4520f39ace30c0fcecd8b Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 10 Nov 2025 19:22:29 +0000 Subject: [PATCH 08/14] fix(deps): update dependency ch.qos.logback:logback-classic to v1.5.21 (#3025) --- gradle/libs.versions.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml index cd2d3d341a..83438c876b 100644 --- a/gradle/libs.versions.toml +++ b/gradle/libs.versions.toml @@ -80,7 +80,7 @@ keycloak-admin-client = { module = "org.keycloak:keycloak-admin-client", version jcstress-core = { module = "org.openjdk.jcstress:jcstress-core", version = "0.16" } jmh-core = { module = "org.openjdk.jmh:jmh-core", version.ref = "jmh" } jmh-generator-annprocess = { module = "org.openjdk.jmh:jmh-generator-annprocess", version.ref = "jmh" } -logback-classic = { module = "ch.qos.logback:logback-classic", version = "1.5.20" } +logback-classic = { module = "ch.qos.logback:logback-classic", version = "1.5.21" } micrometer-bom = { module = "io.micrometer:micrometer-bom", version = "1.16.0" } microprofile-fault-tolerance-api = { module = "org.eclipse.microprofile.fault-tolerance:microprofile-fault-tolerance-api", version = "4.1.2" } mockito-core = { module = "org.mockito:mockito-core", version = "5.20.0" } From 6753055d9ca86d13e233e467eb453726b0dd2cca Mon Sep 17 00:00:00 2001 From: Robert Stupp Date: Tue, 11 Nov 2025 00:42:20 +0100 Subject: [PATCH 09/14] NoSQL: Realms handling (#3007) Introduces handling for realms including realm-state management/transition. The `RealmStore` implementation for NoSQL depends on CDI components, coming in a follo-up PR. --- bom/build.gradle.kts | 4 + gradle/projects.main.properties | 4 + persistence/nosql/realms/README.md | 32 +++ persistence/nosql/realms/api/build.gradle.kts | 41 ++++ .../api/RealmAlreadyExistsException.java | 25 +++ .../nosql/realms/api/RealmDefinition.java | 100 +++++++++ .../RealmExpectedStateMismatchException.java | 25 +++ .../nosql/realms/api/RealmManagement.java | 94 ++++++++ .../realms/api/RealmNotFoundException.java | 25 +++ .../nosql/realms/impl/build.gradle.kts | 56 +++++ .../realms/impl/RealmManagementImpl.java | 203 ++++++++++++++++++ .../nosql/realms/impl/package-info.java | 20 ++ .../src/main/resources/META-INF/beans.xml | 24 +++ .../realms/impl/TestRealmManagementImpl.java | 176 +++++++++++++++ .../impl/src/test/resources/logback-test.xml | 30 +++ persistence/nosql/realms/spi/build.gradle.kts | 48 +++++ .../nosql/realms/spi/RealmStore.java | 89 ++++++++ .../nosql/realms/spi/MockRealmStore.java | 88 ++++++++ 18 files changed, 1084 insertions(+) create mode 100644 persistence/nosql/realms/README.md create mode 100644 persistence/nosql/realms/api/build.gradle.kts create mode 100644 persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmAlreadyExistsException.java create mode 100644 persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmDefinition.java create mode 100644 persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmExpectedStateMismatchException.java create mode 100644 persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmManagement.java create mode 100644 persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmNotFoundException.java create mode 100644 persistence/nosql/realms/impl/build.gradle.kts create mode 100644 persistence/nosql/realms/impl/src/main/java/org/apache/polaris/persistence/nosql/realms/impl/RealmManagementImpl.java create mode 100644 persistence/nosql/realms/impl/src/main/java/org/apache/polaris/persistence/nosql/realms/impl/package-info.java create mode 100644 persistence/nosql/realms/impl/src/main/resources/META-INF/beans.xml create mode 100644 persistence/nosql/realms/impl/src/test/java/org/apache/polaris/persistence/nosql/realms/impl/TestRealmManagementImpl.java create mode 100644 persistence/nosql/realms/impl/src/test/resources/logback-test.xml create mode 100644 persistence/nosql/realms/spi/build.gradle.kts create mode 100644 persistence/nosql/realms/spi/src/main/java/org/apache/polaris/persistence/nosql/realms/spi/RealmStore.java create mode 100644 persistence/nosql/realms/spi/src/testFixtures/java/org/apache/polaris/persistence/nosql/realms/spi/MockRealmStore.java diff --git a/bom/build.gradle.kts b/bom/build.gradle.kts index d590052228..7bdc40545c 100644 --- a/bom/build.gradle.kts +++ b/bom/build.gradle.kts @@ -48,6 +48,10 @@ dependencies { api(project(":polaris-nodes-impl")) api(project(":polaris-nodes-spi")) + api(project(":polaris-persistence-nosql-realms-api")) + api(project(":polaris-persistence-nosql-realms-impl")) + api(project(":polaris-persistence-nosql-realms-spi")) + api(project(":polaris-persistence-nosql-api")) api(project(":polaris-persistence-nosql-impl")) api(project(":polaris-persistence-nosql-benchmark")) diff --git a/gradle/projects.main.properties b/gradle/projects.main.properties index 7c4fd61e53..1f7bd19cc5 100644 --- a/gradle/projects.main.properties +++ b/gradle/projects.main.properties @@ -62,6 +62,10 @@ polaris-idgen-spi=persistence/nosql/idgen/spi polaris-nodes-api=persistence/nosql/nodes/api polaris-nodes-impl=persistence/nosql/nodes/impl polaris-nodes-spi=persistence/nosql/nodes/spi +# realms +polaris-persistence-nosql-realms-api=persistence/nosql/realms/api +polaris-persistence-nosql-realms-impl=persistence/nosql/realms/impl +polaris-persistence-nosql-realms-spi=persistence/nosql/realms/spi # persistence / database agnostic polaris-persistence-nosql-api=persistence/nosql/persistence/api polaris-persistence-nosql-impl=persistence/nosql/persistence/impl diff --git a/persistence/nosql/realms/README.md b/persistence/nosql/realms/README.md new file mode 100644 index 0000000000..d620e6389c --- /dev/null +++ b/persistence/nosql/realms/README.md @@ -0,0 +1,32 @@ + + +# Dynamic realm management + +Framework to manage realms. + +## Code structure + +The code is structured into multiple modules. Consuming code should almost always pull in only the API module. + +* `polaris-persistence-nosql-realms-api` provides the necessary Java interfaces and immutable types. +* `polaris-persistence-nosql-realms-id` provides a type-safe holder for a realm ID. +* `polaris-persistence-nosql-realms-impl` provides the storage agnostic implementation. +* `polaris-persistence-nosql-realms-spi` provides the necessary interfaces to provide a storage specific implementation. +* `polaris-persistence-nosql-realms-store-nosql` provides the storage implementation based on `polaris-persistence-nosql-api`. diff --git a/persistence/nosql/realms/api/build.gradle.kts b/persistence/nosql/realms/api/build.gradle.kts new file mode 100644 index 0000000000..52c1ebbdaf --- /dev/null +++ b/persistence/nosql/realms/api/build.gradle.kts @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +plugins { + id("org.kordamp.gradle.jandex") + id("polaris-server") +} + +description = "Polaris realms API, no concrete implementations" + +dependencies { + implementation(project(":polaris-idgen-api")) + implementation(libs.guava) + + compileOnly(project(":polaris-immutables")) + annotationProcessor(project(":polaris-immutables", configuration = "processor")) + + implementation(platform(libs.jackson.bom)) + implementation("com.fasterxml.jackson.core:jackson-databind") + + compileOnly(libs.jakarta.annotation.api) + compileOnly(libs.jakarta.validation.api) + compileOnly(libs.jakarta.inject.api) + compileOnly(libs.jakarta.enterprise.cdi.api) +} diff --git a/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmAlreadyExistsException.java b/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmAlreadyExistsException.java new file mode 100644 index 0000000000..a706d0957e --- /dev/null +++ b/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmAlreadyExistsException.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.realms.api; + +public class RealmAlreadyExistsException extends RuntimeException { + public RealmAlreadyExistsException(String message) { + super(message); + } +} diff --git a/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmDefinition.java b/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmDefinition.java new file mode 100644 index 0000000000..5229cd1fc7 --- /dev/null +++ b/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmDefinition.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.realms.api; + +import com.fasterxml.jackson.annotation.JsonIgnore; +import com.fasterxml.jackson.annotation.JsonInclude; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import java.time.Instant; +import java.util.Map; +import org.apache.polaris.immutables.PolarisImmutable; +import org.immutables.value.Value; + +@PolarisImmutable +@JsonSerialize(as = ImmutableRealmDefinition.class) +@JsonDeserialize(as = ImmutableRealmDefinition.class) +public interface RealmDefinition { + String id(); + + Instant created(); + + Instant updated(); + + RealmStatus status(); + + @JsonInclude(JsonInclude.Include.NON_EMPTY) + Map properties(); + + static ImmutableRealmDefinition.Builder builder() { + return ImmutableRealmDefinition.builder(); + } + + @JsonIgnore + @Value.NonAttribute + default boolean needsBootstrap() { + return switch (status()) { + case CREATED, LOADING, INITIALIZING -> true; + default -> false; + }; + } + + /** Realms are assigned */ + enum RealmStatus { + /** + * The initial state of a realm is "created", which means that the realm ID is reserved, but the + * realm is not yet usable. This state can transition to {@link #LOADING} or {@link + * #INITIALIZING} or the realm can be directly deleted. + */ + CREATED, + /** + * State used to indicate that the realm data is being imported. This state can transition to + * {@link #ACTIVE} or {@link #INACTIVE} or {@link #PURGING}. + */ + LOADING, + /** + * State used to indicate that the realm is being initialized. This state can transition to + * {@link #ACTIVE} or {@link #INACTIVE} or {@link #PURGING}. + */ + INITIALIZING, + /** + * When a realm is fully set up, its state is "active". This state can only transition to {@link + * #INACTIVE}. + */ + ACTIVE, + /** + * An {@link #ACTIVE} realm can be put into "inactive" state, which means that the realm cannot + * be used, but it can be put back into {@link #ACTIVE} state. + */ + INACTIVE, + /** + * An {@link #INACTIVE} realm can be put into "purging" state, which means that the realm's data + * is being purged from the persistence database. This is next to the final and terminal state + * {@link #PURGED} of a realm. Once all data of the realm has been purged, it must at least be + * set into {@link #PURGED} status or be entirely removed. + */ + PURGING, + /** + * "Purged" is the terminal state of every realm. A purged realm can be safely {@linkplain + * RealmManagement#delete(RealmDefinition) deleted}. The difference between a "purged" realm and + * a non-existing (deleted) realm is that the ID of a purged realm cannot be (re)used. + */ + PURGED, + } +} diff --git a/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmExpectedStateMismatchException.java b/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmExpectedStateMismatchException.java new file mode 100644 index 0000000000..8cc6519025 --- /dev/null +++ b/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmExpectedStateMismatchException.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.realms.api; + +public class RealmExpectedStateMismatchException extends RuntimeException { + public RealmExpectedStateMismatchException(String message) { + super(message); + } +} diff --git a/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmManagement.java b/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmManagement.java new file mode 100644 index 0000000000..c6df568689 --- /dev/null +++ b/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmManagement.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.realms.api; + +import com.google.errorprone.annotations.MustBeClosed; +import jakarta.annotation.Nonnull; +import java.util.Optional; +import java.util.stream.Stream; + +/** + * Low-level realm management functionality. + * + *

Realm IDs must conform to the following constraints: + * + *

    + *
  • Must not start or end with whitespaces. + *
  • Must only consist of US-ASCII letters or digits or hyphens ({@code -}) or underscores + * ({@code _}). + *
  • Must not start with two consecutive colons ({@code ::}). + *
  • Must not be empty. + *
  • Must not be longer than 128 characters. + *
+ * + *

Note: In a CDI container {@link RealmManagement} can be directly injected. + */ +public interface RealmManagement { + /** + * Creates a new realm in {@linkplain RealmDefinition.RealmStatus#CREATED created status} with the + * given realm ID. + * + * @return the persisted state of the realm definition + * @throws RealmAlreadyExistsException if a realm with the given ID already exists + */ + @Nonnull + RealmDefinition create(@Nonnull String realmId); + + /** Returns a stream of all realm definitions. The returned stream must be closed. */ + @Nonnull + @MustBeClosed + Stream list(); + + /** + * Retrieve a realm definition by realm ID. + * + * @return the realm definition if it exists. + */ + @Nonnull + Optional get(@Nonnull String realmId); + + /** + * Updates a realm definition to {@code update}, if the persisted state matches the {@code + * expected} state, and if the {@linkplain RealmDefinition#status() status} transition is valid. + * + * @param expected The expected persisted state of the realm definition. This must exactly + * represent the persisted realm definition as returned by {@link #create(String)} or {@link + * #get(String)} or a prior {@link #update(RealmDefinition, RealmDefinition)}. + * @param update the new state of the realm definition to be persisted, the {@link + * RealmDefinition#created() created} and {@link RealmDefinition#updated() updated} attributes + * are solely managed by the implementation. + * @return the persisted state of the realm definition + * @throws RealmNotFoundException if a realm with the given ID does not exist + * @throws RealmExpectedStateMismatchException if the expected state does not match + * @throws IllegalArgumentException if the transition is not valid. + */ + @Nonnull + RealmDefinition update(@Nonnull RealmDefinition expected, @Nonnull RealmDefinition update); + + /** + * Deletes the given realm. + * + * @param expected The expected persisted state of the realm definition. This must exactly + * represent the persisted realm definition as returned by {@link #create(String)} or {@link + * #get(String)} or {@link #update(RealmDefinition, RealmDefinition)}. + * @throws RealmNotFoundException if a realm with the given ID does not exist + * @throws RealmExpectedStateMismatchException if the expected state does not match + */ + void delete(@Nonnull RealmDefinition expected); +} diff --git a/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmNotFoundException.java b/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmNotFoundException.java new file mode 100644 index 0000000000..c898cfca4d --- /dev/null +++ b/persistence/nosql/realms/api/src/main/java/org/apache/polaris/persistence/nosql/realms/api/RealmNotFoundException.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.realms.api; + +public class RealmNotFoundException extends RuntimeException { + public RealmNotFoundException(String message) { + super(message); + } +} diff --git a/persistence/nosql/realms/impl/build.gradle.kts b/persistence/nosql/realms/impl/build.gradle.kts new file mode 100644 index 0000000000..2c5c360ec7 --- /dev/null +++ b/persistence/nosql/realms/impl/build.gradle.kts @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +plugins { + id("org.kordamp.gradle.jandex") + id("polaris-server") +} + +description = "Polaris realms management implementation" + +dependencies { + implementation(project(":polaris-persistence-nosql-realms-api")) + implementation(project(":polaris-persistence-nosql-realms-spi")) + implementation(project(":polaris-idgen-api")) + + implementation(libs.guava) + implementation(libs.slf4j.api) + + compileOnly(platform(libs.jackson.bom)) + compileOnly("com.fasterxml.jackson.core:jackson-annotations") + + compileOnly(project(":polaris-immutables")) + annotationProcessor(project(":polaris-immutables", configuration = "processor")) + + compileOnly(libs.jakarta.annotation.api) + compileOnly(libs.jakarta.validation.api) + compileOnly(libs.jakarta.inject.api) + compileOnly(libs.jakarta.enterprise.cdi.api) + + testImplementation(testFixtures(project(":polaris-persistence-nosql-realms-spi"))) + + testRuntimeOnly(project(":polaris-idgen-impl")) + + testCompileOnly(libs.jakarta.annotation.api) + testCompileOnly(libs.jakarta.validation.api) + testCompileOnly(libs.jakarta.inject.api) + testCompileOnly(libs.jakarta.enterprise.cdi.api) +} + +tasks.withType { isFailOnError = false } diff --git a/persistence/nosql/realms/impl/src/main/java/org/apache/polaris/persistence/nosql/realms/impl/RealmManagementImpl.java b/persistence/nosql/realms/impl/src/main/java/org/apache/polaris/persistence/nosql/realms/impl/RealmManagementImpl.java new file mode 100644 index 0000000000..4741eafbe2 --- /dev/null +++ b/persistence/nosql/realms/impl/src/main/java/org/apache/polaris/persistence/nosql/realms/impl/RealmManagementImpl.java @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.realms.impl; + +import static com.google.common.base.Preconditions.checkArgument; +import static java.lang.String.format; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.ACTIVE; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.CREATED; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.INACTIVE; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.INITIALIZING; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.LOADING; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.PURGED; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.PURGING; + +import com.google.errorprone.annotations.MustBeClosed; +import jakarta.annotation.Nonnull; +import jakarta.enterprise.context.ApplicationScoped; +import jakarta.inject.Inject; +import java.time.Instant; +import java.util.Optional; +import java.util.function.Supplier; +import java.util.regex.Pattern; +import java.util.stream.Stream; +import org.apache.polaris.ids.api.MonotonicClock; +import org.apache.polaris.persistence.nosql.realms.api.RealmDefinition; +import org.apache.polaris.persistence.nosql.realms.api.RealmExpectedStateMismatchException; +import org.apache.polaris.persistence.nosql.realms.api.RealmManagement; +import org.apache.polaris.persistence.nosql.realms.spi.RealmStore; + +@ApplicationScoped +class RealmManagementImpl implements RealmManagement { + private static final Pattern VALID_REALM_ID_PATTERN = Pattern.compile("^[a-zA-Z0-9_-]{1,128}$"); + + private final RealmStore store; + private final Supplier clock; + + @SuppressWarnings("CdiInjectionPointsInspection") + @Inject + RealmManagementImpl(RealmStore store, MonotonicClock clock) { + this(store, clock::currentInstant); + } + + RealmManagementImpl(RealmStore store, Supplier clock) { + this.store = store; + this.clock = clock; + } + + @Override + @Nonnull + @MustBeClosed + public Stream list() { + return store.list(); + } + + private static void validateRealmId(@Nonnull String realmId) { + checkArgument( + realmId != null && VALID_REALM_ID_PATTERN.matcher(realmId).matches(), + "Invalid realm ID '%s'", + realmId); + } + + @Override + @Nonnull + public Optional get(@Nonnull String realmId) { + validateRealmId(realmId); + + return store.get(realmId); + } + + @Override + @Nonnull + public RealmDefinition create(@Nonnull String realmId) { + validateRealmId(realmId); + + var now = clock.get(); + return store.create( + realmId, + RealmDefinition.builder().status(CREATED).id(realmId).created(now).updated(now).build()); + } + + private void verifyStateTransition(RealmDefinition expected, RealmDefinition update) { + switch (expected.status()) { + case CREATED -> + checkArgument( + update.status() == CREATED + || update.status() == LOADING + || update.status() == INITIALIZING + || update.status() == PURGING, + "Invalid realm state transition from %s to %s for realm '%s'", + expected.status(), + update.status(), + expected.id()); + case LOADING, INITIALIZING -> + checkArgument( + update.status() == INACTIVE + || update.status() == ACTIVE + || update.status() == PURGING, + "Invalid realm state transition from %s to %s for realm '%s'", + expected.status(), + update.status(), + expected.id()); + case ACTIVE -> + checkArgument( + update.status() == ACTIVE || update.status() == INACTIVE, + "Invalid realm state transition from %s to %s for realm '%s'", + expected.status(), + update.status(), + expected.id()); + case INACTIVE -> + checkArgument( + update.status() == ACTIVE + || update.status() == INACTIVE + || update.status() == PURGING, + "Invalid realm state transition from %s to %s for realm '%s'", + expected.status(), + update.status(), + expected.id()); + case PURGING -> + checkArgument( + update.status() == PURGING || update.status() == PURGED, + "Invalid realm state transition from %s to %s for realm '%s'", + expected.status(), + update.status(), + expected.id()); + case PURGED -> + checkArgument( + update.status() == PURGED, + "Invalid realm state transition from %s to %s for realm '%s'", + expected.status(), + update.status(), + expected.id()); + default -> + throw new IllegalStateException( + format("Unknown realm status %s for realm '%s'", expected.status(), expected.id())); + } + } + + @Override + @Nonnull + public RealmDefinition update( + @Nonnull RealmDefinition expected, @Nonnull RealmDefinition update) { + validateRealmId(expected.id()); + var realmId = expected.id(); + checkArgument( + realmId.equals(update.id()), + "Expected and update must contain the same realm ID ('%s' / '%s')", + realmId, + update.id()); + + verifyStateTransition(expected, update); + + return store.update( + realmId, + current -> { + if (!current.equals(expected)) { + throw new RealmExpectedStateMismatchException( + format("Realm '%s' does not match the expected state", expected.id())); + } + var now = clock.get(); + return RealmDefinition.builder() + .from(update) + .created(current.created()) + .updated(now) + .build(); + }); + } + + @Override + public void delete(@Nonnull RealmDefinition expected) { + var realmId = expected.id(); + validateRealmId(realmId); + checkArgument( + expected.status() == PURGED, + "Realm '%s' must be in state %s to be deleted", + expected.id(), + PURGED); + + store.delete( + realmId, + current -> { + if (!current.equals(expected)) { + throw new RealmExpectedStateMismatchException( + format("Realm '%s' does not match the expected state", expected.id())); + } + }); + } +} diff --git a/persistence/nosql/realms/impl/src/main/java/org/apache/polaris/persistence/nosql/realms/impl/package-info.java b/persistence/nosql/realms/impl/src/main/java/org/apache/polaris/persistence/nosql/realms/impl/package-info.java new file mode 100644 index 0000000000..b36f3cf46a --- /dev/null +++ b/persistence/nosql/realms/impl/src/main/java/org/apache/polaris/persistence/nosql/realms/impl/package-info.java @@ -0,0 +1,20 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/** Realms management implementation: do not directly use the types in this package. */ +package org.apache.polaris.persistence.nosql.realms.impl; diff --git a/persistence/nosql/realms/impl/src/main/resources/META-INF/beans.xml b/persistence/nosql/realms/impl/src/main/resources/META-INF/beans.xml new file mode 100644 index 0000000000..a297f1aa53 --- /dev/null +++ b/persistence/nosql/realms/impl/src/main/resources/META-INF/beans.xml @@ -0,0 +1,24 @@ + + + + + \ No newline at end of file diff --git a/persistence/nosql/realms/impl/src/test/java/org/apache/polaris/persistence/nosql/realms/impl/TestRealmManagementImpl.java b/persistence/nosql/realms/impl/src/test/java/org/apache/polaris/persistence/nosql/realms/impl/TestRealmManagementImpl.java new file mode 100644 index 0000000000..9a4eb98ff3 --- /dev/null +++ b/persistence/nosql/realms/impl/src/test/java/org/apache/polaris/persistence/nosql/realms/impl/TestRealmManagementImpl.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.realms.impl; + +import static java.time.Instant.now; +import static org.apache.polaris.persistence.nosql.api.Realms.SYSTEM_REALM_ID; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.ACTIVE; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.CREATED; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.INACTIVE; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.INITIALIZING; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.PURGED; +import static org.apache.polaris.persistence.nosql.realms.api.RealmDefinition.RealmStatus.PURGING; + +import java.time.Instant; +import java.util.Map; +import org.apache.polaris.persistence.nosql.realms.api.RealmAlreadyExistsException; +import org.apache.polaris.persistence.nosql.realms.api.RealmDefinition; +import org.apache.polaris.persistence.nosql.realms.api.RealmExpectedStateMismatchException; +import org.apache.polaris.persistence.nosql.realms.api.RealmNotFoundException; +import org.apache.polaris.persistence.nosql.realms.spi.MockRealmStore; +import org.assertj.core.api.SoftAssertions; +import org.assertj.core.api.junit.jupiter.InjectSoftAssertions; +import org.assertj.core.api.junit.jupiter.SoftAssertionsExtension; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(SoftAssertionsExtension.class) +public class TestRealmManagementImpl { + @InjectSoftAssertions protected SoftAssertions soft; + + @Test + public void createUpdateDelete() { + var realmsManagement = new RealmManagementImpl(new MockRealmStore(), Instant::now); + + var something = + RealmDefinition.builder() + .id("something") + .created(now()) + .updated(now()) + .status(ACTIVE) + .build(); + var another = + RealmDefinition.builder() + .id("another") + .created(now()) + .updated(now()) + .status(ACTIVE) + .build(); + + soft.assertThatIllegalArgumentException() + .isThrownBy(() -> realmsManagement.create(SYSTEM_REALM_ID)) + .withMessage("Invalid realm ID '%s'", SYSTEM_REALM_ID); + soft.assertThatIllegalArgumentException() + .isThrownBy(() -> realmsManagement.create("::something")) + .withMessage("Invalid realm ID '%s'", "::something"); + soft.assertThatIllegalArgumentException() + .isThrownBy( + () -> + realmsManagement.update( + something.withId("::something"), something.withId("::something"))) + .withMessage("Invalid realm ID '%s'", "::something"); + soft.assertThatIllegalArgumentException() + .isThrownBy(() -> realmsManagement.delete(something.withId("::something"))) + .withMessage("Invalid realm ID '%s'", "::something"); + + // empty index + soft.assertThatThrownBy( + () -> + realmsManagement.update( + something, RealmDefinition.builder().from(something).build())) + .isInstanceOf(RealmNotFoundException.class) + .hasMessage("No realm with ID 'something' exists"); + soft.assertThatThrownBy(() -> realmsManagement.delete(something.withStatus(PURGED))) + .hasMessage("No realm with ID 'something' exists"); + + var created = realmsManagement.create(something.id()); + + soft.assertThat(created).extracting(RealmDefinition::id).isEqualTo(something.id()); + soft.assertThatThrownBy(() -> realmsManagement.create(something.id())) + .isInstanceOf(RealmAlreadyExistsException.class) + .hasMessage("A realm with ID 'something' already exists"); + var gotOpt = realmsManagement.get(something.id()); + soft.assertThat(gotOpt).contains(created); + var got = gotOpt.orElse(null); + + var createdAnother = realmsManagement.create(another.id()); + soft.assertThat(createdAnother).extracting(RealmDefinition::id).isEqualTo(another.id()); + + // RealmsStateObj present + soft.assertThatThrownBy( + () -> realmsManagement.update(something.withId("foo"), something.withId("foo"))) + .isInstanceOf(RealmNotFoundException.class) + .hasMessage("No realm with ID 'foo' exists"); + soft.assertThatThrownBy( + () -> realmsManagement.delete(something.withId("foo").withStatus(PURGED))) + .isInstanceOf(RealmNotFoundException.class) + .hasMessage("No realm with ID 'foo' exists"); + + // Update with different realm-IDs (duh!) + soft.assertThatIllegalArgumentException() + .isThrownBy( + () -> + realmsManagement.update( + got, RealmDefinition.builder().from(got).id("something-else").build())); + // Update with different expected state + soft.assertThatThrownBy( + () -> + realmsManagement.update( + RealmDefinition.builder().from(got).putProperty("foo", "bar").build(), + RealmDefinition.builder().from(got).putProperty("meep", "meep").build())) + .isInstanceOf(RealmExpectedStateMismatchException.class) + .hasMessage("Realm '%s' does not match the expected state", created.id()); + + var updated = + realmsManagement.update( + got, RealmDefinition.builder().from(got).putProperty("foo", "bar").build()); + soft.assertThat(updated) + .extracting(RealmDefinition::id, RealmDefinition::properties) + .containsExactly(something.id(), Map.of("foo", "bar")); + var got2Opt = realmsManagement.get(something.id()); + soft.assertThat(got2Opt).contains(updated); + var got2 = got2Opt.orElse(null); + + soft.assertThatIllegalArgumentException() + .isThrownBy(() -> realmsManagement.delete(got2)) + .withMessage("Realm '%s' must be in state PURGED to be deleted", got2.id()); + var initializing = + realmsManagement.update( + got2, RealmDefinition.builder().from(got2).status(INITIALIZING).build()); + var active = + realmsManagement.update( + initializing, RealmDefinition.builder().from(initializing).status(ACTIVE).build()); + soft.assertThatIllegalArgumentException() + .isThrownBy(() -> realmsManagement.delete(active)) + .withMessage("Realm '%s' must be in state PURGED to be deleted", active.id()); + soft.assertThatIllegalArgumentException() + .isThrownBy( + () -> + realmsManagement.update( + active, RealmDefinition.builder().from(active).status(CREATED).build())) + .withMessage( + "Invalid realm state transition from ACTIVE to CREATED for realm '%s'", active.id()); + var inactive = + realmsManagement.update( + active, RealmDefinition.builder().from(got2).status(INACTIVE).build()); + var purging = + realmsManagement.update( + inactive, RealmDefinition.builder().from(inactive).status(PURGING).build()); + soft.assertThat(purging).extracting(RealmDefinition::status).isSameAs(PURGING); + var purged = + realmsManagement.update( + purging, RealmDefinition.builder().from(inactive).status(PURGED).build()); + soft.assertThat(purged).extracting(RealmDefinition::status).isSameAs(PURGED); + soft.assertThatCode(() -> realmsManagement.delete(purged)).doesNotThrowAnyException(); + + soft.assertThat(realmsManagement.get(something.id())).isEmpty(); + + soft.assertThat(realmsManagement.get(another.id())).contains(createdAnother); + } +} diff --git a/persistence/nosql/realms/impl/src/test/resources/logback-test.xml b/persistence/nosql/realms/impl/src/test/resources/logback-test.xml new file mode 100644 index 0000000000..fb74fc2c54 --- /dev/null +++ b/persistence/nosql/realms/impl/src/test/resources/logback-test.xml @@ -0,0 +1,30 @@ + + + + + + + %date{ISO8601} [%thread] %-5level %logger{36} - %msg%n + + + + + + diff --git a/persistence/nosql/realms/spi/build.gradle.kts b/persistence/nosql/realms/spi/build.gradle.kts new file mode 100644 index 0000000000..436e3447ba --- /dev/null +++ b/persistence/nosql/realms/spi/build.gradle.kts @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +plugins { + id("org.kordamp.gradle.jandex") + id("polaris-server") +} + +description = "Polaris realms SPI" + +dependencies { + implementation(project(":polaris-persistence-nosql-realms-api")) + implementation(libs.guava) + + compileOnly(platform(libs.jackson.bom)) + compileOnly("com.fasterxml.jackson.core:jackson-annotations") + + compileOnly(libs.jakarta.annotation.api) + compileOnly(libs.jakarta.validation.api) + compileOnly(libs.jakarta.inject.api) + compileOnly(libs.jakarta.enterprise.cdi.api) + + testFixturesApi(platform(libs.jackson.bom)) + testFixturesApi("com.fasterxml.jackson.core:jackson-annotations") + + testFixturesApi(project(":polaris-persistence-nosql-api")) + testFixturesApi(project(":polaris-idgen-api")) + testFixturesApi(project(":polaris-nodes-api")) + testFixturesApi(project(":polaris-nodes-spi")) + testFixturesApi(project(":polaris-persistence-nosql-realms-api")) + testFixturesApi(project(":polaris-persistence-nosql-realms-spi")) +} diff --git a/persistence/nosql/realms/spi/src/main/java/org/apache/polaris/persistence/nosql/realms/spi/RealmStore.java b/persistence/nosql/realms/spi/src/main/java/org/apache/polaris/persistence/nosql/realms/spi/RealmStore.java new file mode 100644 index 0000000000..ac6444005d --- /dev/null +++ b/persistence/nosql/realms/spi/src/main/java/org/apache/polaris/persistence/nosql/realms/spi/RealmStore.java @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.realms.spi; + +import com.google.errorprone.annotations.MustBeClosed; +import java.util.Optional; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Stream; +import org.apache.polaris.persistence.nosql.realms.api.RealmAlreadyExistsException; +import org.apache.polaris.persistence.nosql.realms.api.RealmDefinition; +import org.apache.polaris.persistence.nosql.realms.api.RealmManagement; +import org.apache.polaris.persistence.nosql.realms.api.RealmNotFoundException; + +/** + * Interface to be implemented by persistence-specific implementations (NoSQL or metastore manager + * based). + * + *

Implementations must not perform any validation of the realm definitions unless explicitly + * stated below. + */ +public interface RealmStore { + /** Returns a stream of all realm definitions. The returned stream must be closed. */ + @MustBeClosed + Stream list(); + + /** + * Returns a realm definition if it exists. + * + *

Unlike the updating functions, this function does not throw an exception if a realm does not + * exist. + */ + Optional get(String realmId); + + /** + * Deletes a realm definition. + * + * @param callback receives the persisted realm definition that is being deleted. If the callback + * throws any exception, the delete operation must not be persisted. All thrown exceptions + * must be propagated to the caller. + * @throws RealmNotFoundException if a realm with the given ID does not exist + */ + void delete(String realmId, Consumer callback); + + /** + * Updates a realm definition. + * + *

Implementations update the persisted state of the realm definition. The created timestamp + * must be carried forwards from the persisted state. + * + *

{@link RealmManagement} implementations, which call this function, take care of "properly" + * populating the attributes of the realm definition to persist. + * + * @param updater receives the current definition and returns the updated definition. If the + * updated throws any exception, the update operation must not be persisted. All thrown + * exceptions must be propagated to the caller. + * @return the persisted realm definition + * @throws RealmNotFoundException if a realm with the given ID does not exist + */ + RealmDefinition update(String realmId, Function updater); + + /** + * Create a new realm. + * + *

{@link RealmManagement} implementations, which call this function, take care of "properly" + * populating the attributes of the realm definition to persist. + * + * @param definition the realm definition of the realm to be created, to be persisted as given. + * @return the persisted realm definition + * @throws RealmAlreadyExistsException if the realm already exists + */ + RealmDefinition create(String realmId, RealmDefinition definition); +} diff --git a/persistence/nosql/realms/spi/src/testFixtures/java/org/apache/polaris/persistence/nosql/realms/spi/MockRealmStore.java b/persistence/nosql/realms/spi/src/testFixtures/java/org/apache/polaris/persistence/nosql/realms/spi/MockRealmStore.java new file mode 100644 index 0000000000..d368f9b033 --- /dev/null +++ b/persistence/nosql/realms/spi/src/testFixtures/java/org/apache/polaris/persistence/nosql/realms/spi/MockRealmStore.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.polaris.persistence.nosql.realms.spi; + +import static java.lang.String.format; +import static java.util.Objects.requireNonNull; + +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.stream.Stream; +import org.apache.polaris.persistence.nosql.realms.api.RealmAlreadyExistsException; +import org.apache.polaris.persistence.nosql.realms.api.RealmDefinition; +import org.apache.polaris.persistence.nosql.realms.api.RealmNotFoundException; + +public class MockRealmStore implements RealmStore { + private final Map realms = new ConcurrentHashMap<>(); + + @Override + public RealmDefinition create(String realmId, RealmDefinition definition) { + var ex = realms.putIfAbsent(realmId, definition); + if (ex != null) { + throw new RealmAlreadyExistsException(format("A realm with ID '%s' already exists", realmId)); + } + return definition; + } + + @Override + public RealmDefinition update( + String realmId, Function updater) { + var computed = new AtomicBoolean(); + var updated = + realms.computeIfPresent( + realmId, + (id, current) -> { + computed.set(true); + return requireNonNull(updater.apply(current)); + }); + if (!computed.get()) { + throw new RealmNotFoundException(format("No realm with ID '%s' exists", realmId)); + } + return updated; + } + + @Override + public void delete(String realmId, Consumer callback) { + var computed = new AtomicBoolean(); + realms.computeIfPresent( + realmId, + (id, current) -> { + computed.set(true); + callback.accept(current); + return null; + }); + if (!computed.get()) { + throw new RealmNotFoundException(format("No realm with ID '%s' exists", realmId)); + } + } + + @Override + public Optional get(String realmId) { + return Optional.ofNullable(realms.get(realmId)); + } + + @Override + public Stream list() { + return realms.values().stream(); + } +} From ff48a7d8897953e8a68cc858113a38ce6a3107cb Mon Sep 17 00:00:00 2001 From: Nuoya Jiang <98131931+CodingBangboo@users.noreply.github.com> Date: Mon, 10 Nov 2025 17:53:01 -0600 Subject: [PATCH 10/14] Rename AccessConfig and AccessConfigProvider for clarity (#2883) * rename AccessConfig for clarity * rename getStorageAccessConfig() and add javadoc --- .../connection/ConnectionCredentials.java | 10 +++--- .../AtomicOperationMetaStoreManager.java | 6 ++-- .../dao/entity/ScopedCredentialsResult.java | 20 +++++------- .../TransactionalMetaStoreManagerImpl.java | 6 ++-- .../storage/PolarisStorageIntegration.java | 2 +- ...ssConfig.java => StorageAccessConfig.java} | 8 ++--- .../aws/AwsCredentialsStorageIntegration.java | 6 ++-- .../AzureCredentialsStorageIntegration.java | 10 +++--- .../storage/cache/StorageCredentialCache.java | 10 +++--- .../cache/StorageCredentialCacheEntry.java | 15 +++++---- .../gcp/GcpCredentialsStorageIntegration.java | 6 ++-- .../InMemoryStorageIntegrationTest.java | 2 +- ...Test.java => StorageAccessConfigTest.java} | 20 ++++++------ ...zureCredentialsStorageIntegrationTest.java | 8 ++--- .../cache/StorageCredentialCacheTest.java | 12 +++---- .../AwsCredentialsStorageIntegrationTest.java | 32 +++++++++---------- ...AzureCredentialStorageIntegrationTest.java | 26 +++++++-------- .../GcpCredentialsStorageIntegrationTest.java | 14 ++++---- .../catalog/iceberg/IcebergCatalog.java | 16 +++++----- .../iceberg/IcebergCatalogAdapter.java | 10 +++--- .../iceberg/IcebergCatalogHandler.java | 20 ++++++------ .../catalog/io/DefaultFileIOFactory.java | 10 +++--- .../service/catalog/io/FileIOFactory.java | 7 ++-- .../service/catalog/io/FileIOUtil.java | 16 +++++----- ....java => StorageAccessConfigProvider.java} | 16 +++++----- .../io/WasbTranslatingFileIOFactory.java | 4 +-- .../PolarisCallContextCatalogFactory.java | 10 +++--- ...PolarisStorageIntegrationProviderImpl.java | 6 ++-- .../service/task/TaskFileIOSupplier.java | 16 +++++----- .../service/admin/PolarisAuthzTestBase.java | 8 ++--- ...bstractPolarisGenericTableCatalogTest.java | 10 +++--- .../iceberg/AbstractIcebergCatalogTest.java | 20 ++++++------ .../AbstractIcebergCatalogViewTest.java | 10 +++--- .../IcebergCatalogHandlerAuthzTest.java | 10 +++--- ...CatalogHandlerFineGrainedDisabledTest.java | 2 +- .../service/catalog/io/FileIOFactoryTest.java | 5 +-- .../policy/AbstractPolicyCatalogTest.java | 10 +++--- .../task/BatchFileCleanupTaskHandlerTest.java | 4 +-- .../ManifestFileCleanupTaskHandlerTest.java | 4 +-- .../task/TableCleanupTaskHandlerTest.java | 4 +-- .../service/task/TaskExecutorImplTest.java | 2 +- .../polaris/service/TestFileIOFactory.java | 4 +-- .../apache/polaris/service/TestServices.java | 14 ++++---- .../catalog/io/MeasuredFileIOFactory.java | 6 ++-- 44 files changed, 228 insertions(+), 229 deletions(-) rename polaris-core/src/main/java/org/apache/polaris/core/storage/{AccessConfig.java => StorageAccessConfig.java} (94%) rename polaris-core/src/test/java/org/apache/polaris/core/storage/{AccessConfigTest.java => StorageAccessConfigTest.java} (87%) rename runtime/service/src/main/java/org/apache/polaris/service/catalog/io/{AccessConfigProvider.java => StorageAccessConfigProvider.java} (88%) diff --git a/polaris-core/src/main/java/org/apache/polaris/core/credentials/connection/ConnectionCredentials.java b/polaris-core/src/main/java/org/apache/polaris/core/credentials/connection/ConnectionCredentials.java index bc8cd3e958..1d86bf03c0 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/credentials/connection/ConnectionCredentials.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/credentials/connection/ConnectionCredentials.java @@ -22,21 +22,21 @@ import java.time.Instant; import java.util.Map; import java.util.Optional; -import org.apache.polaris.core.storage.AccessConfig; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.immutables.PolarisImmutable; /** * Encapsulates credentials and configuration needed to connect to external federated catalogs. * - *

Similar to {@link AccessConfig} for storage, this class holds the credentials and properties - * required for Polaris to authenticate with remote catalog services (e.g., AWS Glue, other Iceberg - * REST catalogs). + *

Similar to {@link StorageAccessConfig} for storage, this class holds the credentials and + * properties required for Polaris to authenticate with remote catalog services (e.g., AWS Glue, + * other Iceberg REST catalogs). * *

Credentials may be temporary and include an expiration time. * *

Note: This interface currently includes only {@code credentials} and {@code expiresAt}. * Additional fields like {@code extraProperties} and {@code internalProperties} (similar to {@link - * AccessConfig}) are not included for now but can be added later if needed for more complex + * StorageAccessConfig}) are not included for now but can be added later if needed for more complex * credential scenarios. */ @PolarisImmutable diff --git a/polaris-core/src/main/java/org/apache/polaris/core/persistence/AtomicOperationMetaStoreManager.java b/polaris-core/src/main/java/org/apache/polaris/core/persistence/AtomicOperationMetaStoreManager.java index 1ec8c89d41..3ae2ac2c9e 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/persistence/AtomicOperationMetaStoreManager.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/persistence/AtomicOperationMetaStoreManager.java @@ -77,9 +77,9 @@ import org.apache.polaris.core.policy.PolicyEntity; import org.apache.polaris.core.policy.PolicyMappingUtil; import org.apache.polaris.core.policy.PolicyType; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.PolarisStorageConfigurationInfo; import org.apache.polaris.core.storage.PolarisStorageIntegration; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -1635,14 +1635,14 @@ private void revokeGrantRecord( entityId); try { - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = storageIntegration.getSubscopedCreds( callCtx.getRealmConfig(), allowListOperation, allowedReadLocations, allowedWriteLocations, refreshCredentialsEndpoint); - return new ScopedCredentialsResult(accessConfig); + return new ScopedCredentialsResult(storageAccessConfig); } catch (Exception ex) { return new ScopedCredentialsResult( BaseResult.ReturnStatus.SUBSCOPE_CREDS_ERROR, ex.getMessage()); diff --git a/polaris-core/src/main/java/org/apache/polaris/core/persistence/dao/entity/ScopedCredentialsResult.java b/polaris-core/src/main/java/org/apache/polaris/core/persistence/dao/entity/ScopedCredentialsResult.java index 76526a8635..fab339243f 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/persistence/dao/entity/ScopedCredentialsResult.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/persistence/dao/entity/ScopedCredentialsResult.java @@ -20,13 +20,13 @@ import jakarta.annotation.Nonnull; import jakarta.annotation.Nullable; -import org.apache.polaris.core.storage.AccessConfig; +import org.apache.polaris.core.storage.StorageAccessConfig; /** Result of a getSubscopedCredsForEntity() call */ public class ScopedCredentialsResult extends BaseResult { // null if not success. Else, set of name/value pairs for the credentials - private final AccessConfig accessConfig; + private final StorageAccessConfig storageAccessConfig; /** * Constructor for an error @@ -37,20 +37,16 @@ public class ScopedCredentialsResult extends BaseResult { public ScopedCredentialsResult( @Nonnull ReturnStatus errorCode, @Nullable String extraInformation) { super(errorCode, extraInformation); - this.accessConfig = null; + this.storageAccessConfig = null; } - /** - * Constructor for success - * - * @param accessConfig credentials - */ - public ScopedCredentialsResult(AccessConfig accessConfig) { + /** Constructor for success */ + public ScopedCredentialsResult(StorageAccessConfig storageAccessConfig) { super(ReturnStatus.SUCCESS); - this.accessConfig = accessConfig; + this.storageAccessConfig = storageAccessConfig; } - public AccessConfig getAccessConfig() { - return accessConfig; + public StorageAccessConfig getStorageAccessConfig() { + return storageAccessConfig; } } diff --git a/polaris-core/src/main/java/org/apache/polaris/core/persistence/transactional/TransactionalMetaStoreManagerImpl.java b/polaris-core/src/main/java/org/apache/polaris/core/persistence/transactional/TransactionalMetaStoreManagerImpl.java index db3ccd0f35..815e119d30 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/persistence/transactional/TransactionalMetaStoreManagerImpl.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/persistence/transactional/TransactionalMetaStoreManagerImpl.java @@ -82,9 +82,9 @@ import org.apache.polaris.core.policy.PolicyEntity; import org.apache.polaris.core.policy.PolicyMappingUtil; import org.apache.polaris.core.policy.PolicyType; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.PolarisStorageConfigurationInfo; import org.apache.polaris.core.storage.PolarisStorageIntegration; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -2128,14 +2128,14 @@ private PolarisEntityResolver resolveSecurableToRoleGrant( entityId); try { - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = storageIntegration.getSubscopedCreds( callCtx.getRealmConfig(), allowListOperation, allowedReadLocations, allowedWriteLocations, refreshCredentialsEndpoint); - return new ScopedCredentialsResult(accessConfig); + return new ScopedCredentialsResult(storageAccessConfig); } catch (Exception ex) { return new ScopedCredentialsResult( BaseResult.ReturnStatus.SUBSCOPE_CREDS_ERROR, ex.getMessage()); diff --git a/polaris-core/src/main/java/org/apache/polaris/core/storage/PolarisStorageIntegration.java b/polaris-core/src/main/java/org/apache/polaris/core/storage/PolarisStorageIntegration.java index 1828d01c81..8a2ae7c3a8 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/storage/PolarisStorageIntegration.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/storage/PolarisStorageIntegration.java @@ -62,7 +62,7 @@ public String getStorageIdentifierOrId() { * handling the relative path * @return An enum map including the scoped credentials */ - public abstract AccessConfig getSubscopedCreds( + public abstract StorageAccessConfig getSubscopedCreds( @Nonnull RealmConfig realmConfig, boolean allowListOperation, @Nonnull Set allowedReadLocations, diff --git a/polaris-core/src/main/java/org/apache/polaris/core/storage/AccessConfig.java b/polaris-core/src/main/java/org/apache/polaris/core/storage/StorageAccessConfig.java similarity index 94% rename from polaris-core/src/main/java/org/apache/polaris/core/storage/AccessConfig.java rename to polaris-core/src/main/java/org/apache/polaris/core/storage/StorageAccessConfig.java index 94e74a3d66..19745322d2 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/storage/AccessConfig.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/storage/StorageAccessConfig.java @@ -26,7 +26,7 @@ import org.immutables.value.Value; @PolarisImmutable -public interface AccessConfig { +public interface StorageAccessConfig { Map credentials(); Map extraProperties(); @@ -57,8 +57,8 @@ default String get(StorageAccessProperty key) { } } - static AccessConfig.Builder builder() { - return ImmutableAccessConfig.builder(); + static StorageAccessConfig.Builder builder() { + return ImmutableStorageAccessConfig.builder(); } interface Builder { @@ -89,6 +89,6 @@ default Builder put(StorageAccessProperty key, String value) { } } - AccessConfig build(); + StorageAccessConfig build(); } } diff --git a/polaris-core/src/main/java/org/apache/polaris/core/storage/aws/AwsCredentialsStorageIntegration.java b/polaris-core/src/main/java/org/apache/polaris/core/storage/aws/AwsCredentialsStorageIntegration.java index 8023f7a607..e393911f71 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/storage/aws/AwsCredentialsStorageIntegration.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/storage/aws/AwsCredentialsStorageIntegration.java @@ -28,8 +28,8 @@ import java.util.Set; import java.util.stream.Stream; import org.apache.polaris.core.config.RealmConfig; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.InMemoryStorageIntegration; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; import org.apache.polaris.core.storage.StorageUtil; import org.apache.polaris.core.storage.aws.StsClientProvider.StsDestination; @@ -70,7 +70,7 @@ public AwsCredentialsStorageIntegration( /** {@inheritDoc} */ @Override - public AccessConfig getSubscopedCreds( + public StorageAccessConfig getSubscopedCreds( @Nonnull RealmConfig realmConfig, boolean allowListOperation, @Nonnull Set allowedReadLocations, @@ -80,7 +80,7 @@ public AccessConfig getSubscopedCreds( realmConfig.getConfig(STORAGE_CREDENTIAL_DURATION_SECONDS); AwsStorageConfigurationInfo storageConfig = config(); String region = storageConfig.getRegion(); - AccessConfig.Builder accessConfig = AccessConfig.builder(); + StorageAccessConfig.Builder accessConfig = StorageAccessConfig.builder(); if (shouldUseSts(storageConfig)) { AssumeRoleRequest.Builder request = diff --git a/polaris-core/src/main/java/org/apache/polaris/core/storage/azure/AzureCredentialsStorageIntegration.java b/polaris-core/src/main/java/org/apache/polaris/core/storage/azure/AzureCredentialsStorageIntegration.java index a043a7daa5..0b189b3116 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/storage/azure/AzureCredentialsStorageIntegration.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/storage/azure/AzureCredentialsStorageIntegration.java @@ -49,8 +49,8 @@ import java.util.Optional; import java.util.Set; import org.apache.polaris.core.config.RealmConfig; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.InMemoryStorageIntegration; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -73,7 +73,7 @@ public AzureCredentialsStorageIntegration(AzureStorageConfigurationInfo config) } @Override - public AccessConfig getSubscopedCreds( + public StorageAccessConfig getSubscopedCreds( @Nonnull RealmConfig realmConfig, boolean allowListOperation, @Nonnull Set allowedReadLocations, @@ -176,12 +176,12 @@ public AccessConfig getSubscopedCreds( } @VisibleForTesting - static AccessConfig toAccessConfig( + static StorageAccessConfig toAccessConfig( String sasToken, String storageDnsName, Instant expiresAt, Optional refreshCredentialsEndpoint) { - AccessConfig.Builder accessConfig = AccessConfig.builder(); + StorageAccessConfig.Builder accessConfig = StorageAccessConfig.builder(); handleAzureCredential(accessConfig, sasToken, storageDnsName, expiresAt); accessConfig.put( StorageAccessProperty.EXPIRATION_TIME, String.valueOf(expiresAt.toEpochMilli())); @@ -193,7 +193,7 @@ static AccessConfig toAccessConfig( } private static void handleAzureCredential( - AccessConfig.Builder config, String sasToken, String host, Instant expiresAt) { + StorageAccessConfig.Builder config, String sasToken, String host, Instant expiresAt) { config.putCredential(StorageAccessProperty.AZURE_SAS_TOKEN.getPropertyName() + host, sasToken); config.putCredential( StorageAccessProperty.AZURE_SAS_TOKEN_EXPIRES_AT_MS_PREFIX.getPropertyName() + host, diff --git a/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCache.java b/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCache.java index 82de799152..93f5e351ab 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCache.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCache.java @@ -37,8 +37,8 @@ import org.apache.polaris.core.entity.PolarisEntity; import org.apache.polaris.core.entity.PolarisEntityType; import org.apache.polaris.core.persistence.dao.entity.ScopedCredentialsResult; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.PolarisCredentialVendor; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -103,7 +103,7 @@ private long maxCacheDurationMs(RealmConfig realmConfig) { * @param allowedWriteLocations a set of allowed to write locations. * @return the a map of string containing the scoped creds information */ - public AccessConfig getOrGenerateSubScopeCreds( + public StorageAccessConfig getOrGenerateSubScopeCreds( @Nonnull PolarisCredentialVendor credentialVendor, @Nonnull PolarisCallContext callCtx, @Nonnull PolarisEntity polarisEntity, @@ -140,7 +140,7 @@ public AccessConfig getOrGenerateSubScopeCreds( if (scopedCredentialsResult.isSuccess()) { long maxCacheDurationMs = maxCacheDurationMs(callCtx.getRealmConfig()); return new StorageCredentialCacheEntry( - scopedCredentialsResult.getAccessConfig(), maxCacheDurationMs); + scopedCredentialsResult.getStorageAccessConfig(), maxCacheDurationMs); } LOGGER .atDebug() @@ -156,11 +156,11 @@ public AccessConfig getOrGenerateSubScopeCreds( @VisibleForTesting @Nullable Map getIfPresent(StorageCredentialCacheKey key) { - return getAccessConfig(key).map(AccessConfig::credentials).orElse(null); + return getAccessConfig(key).map(StorageAccessConfig::credentials).orElse(null); } @VisibleForTesting - Optional getAccessConfig(StorageCredentialCacheKey key) { + Optional getAccessConfig(StorageCredentialCacheKey key) { return Optional.ofNullable(cache.getIfPresent(key)) .map(StorageCredentialCacheEntry::toAccessConfig); } diff --git a/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheEntry.java b/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheEntry.java index 7f5789ecbf..1141b34bfb 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheEntry.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheEntry.java @@ -19,17 +19,18 @@ package org.apache.polaris.core.storage.cache; import java.time.Instant; -import org.apache.polaris.core.storage.AccessConfig; +import org.apache.polaris.core.storage.StorageAccessConfig; /** A storage credential cached entry. */ public class StorageCredentialCacheEntry { /** The scoped creds map that is fetched from a creds vending service */ - public final AccessConfig accessConfig; + public final StorageAccessConfig storageAccessConfig; private final long maxCacheDurationMs; - public StorageCredentialCacheEntry(AccessConfig accessConfig, long maxCacheDurationMs) { - this.accessConfig = accessConfig; + public StorageCredentialCacheEntry( + StorageAccessConfig storageAccessConfig, long maxCacheDurationMs) { + this.storageAccessConfig = storageAccessConfig; this.maxCacheDurationMs = maxCacheDurationMs; } @@ -39,7 +40,7 @@ public long getMaxCacheDurationMs() { /** Get the expiration time in millisecond for the cached entry */ public long getExpirationTime() { - return accessConfig.expiresAt().map(Instant::toEpochMilli).orElse(Long.MAX_VALUE); + return storageAccessConfig.expiresAt().map(Instant::toEpochMilli).orElse(Long.MAX_VALUE); } /** @@ -47,7 +48,7 @@ public long getExpirationTime() { * * @return a map of string representing the subscoped creds info. */ - AccessConfig toAccessConfig() { - return accessConfig; + StorageAccessConfig toAccessConfig() { + return storageAccessConfig; } } diff --git a/polaris-core/src/main/java/org/apache/polaris/core/storage/gcp/GcpCredentialsStorageIntegration.java b/polaris-core/src/main/java/org/apache/polaris/core/storage/gcp/GcpCredentialsStorageIntegration.java index c0568cc9b5..5f524d9ae4 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/storage/gcp/GcpCredentialsStorageIntegration.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/storage/gcp/GcpCredentialsStorageIntegration.java @@ -39,9 +39,9 @@ import java.util.Set; import java.util.stream.Stream; import org.apache.polaris.core.config.RealmConfig; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.InMemoryStorageIntegration; import org.apache.polaris.core.storage.PolarisStorageIntegration; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; import org.apache.polaris.core.storage.StorageUtil; import org.slf4j.Logger; @@ -72,7 +72,7 @@ public GcpCredentialsStorageIntegration( } @Override - public AccessConfig getSubscopedCreds( + public StorageAccessConfig getSubscopedCreds( @Nonnull RealmConfig realmConfig, boolean allowListOperation, @Nonnull Set allowedReadLocations, @@ -109,7 +109,7 @@ public AccessConfig getSubscopedCreds( // If expires_in missing, use source credential's expire time, which require another api call to // get. - AccessConfig.Builder accessConfig = AccessConfig.builder(); + StorageAccessConfig.Builder accessConfig = StorageAccessConfig.builder(); accessConfig.put(StorageAccessProperty.GCS_ACCESS_TOKEN, token.getTokenValue()); accessConfig.put( StorageAccessProperty.GCS_ACCESS_TOKEN_EXPIRES_AT, diff --git a/polaris-core/src/test/java/org/apache/polaris/core/storage/InMemoryStorageIntegrationTest.java b/polaris-core/src/test/java/org/apache/polaris/core/storage/InMemoryStorageIntegrationTest.java index 9ba5271ab4..e9640cef81 100644 --- a/polaris-core/src/test/java/org/apache/polaris/core/storage/InMemoryStorageIntegrationTest.java +++ b/polaris-core/src/test/java/org/apache/polaris/core/storage/InMemoryStorageIntegrationTest.java @@ -194,7 +194,7 @@ public MockInMemoryStorageIntegration() { } @Override - public AccessConfig getSubscopedCreds( + public StorageAccessConfig getSubscopedCreds( @Nonnull RealmConfig realmConfig, boolean allowListOperation, @Nonnull Set allowedReadLocations, diff --git a/polaris-core/src/test/java/org/apache/polaris/core/storage/AccessConfigTest.java b/polaris-core/src/test/java/org/apache/polaris/core/storage/StorageAccessConfigTest.java similarity index 87% rename from polaris-core/src/test/java/org/apache/polaris/core/storage/AccessConfigTest.java rename to polaris-core/src/test/java/org/apache/polaris/core/storage/StorageAccessConfigTest.java index 57e1f14650..98fad9ef9b 100644 --- a/polaris-core/src/test/java/org/apache/polaris/core/storage/AccessConfigTest.java +++ b/polaris-core/src/test/java/org/apache/polaris/core/storage/StorageAccessConfigTest.java @@ -30,14 +30,14 @@ import java.util.Map; import org.junit.jupiter.api.Test; -public class AccessConfigTest { +public class StorageAccessConfigTest { @Test public void testPutGet() { - AccessConfig.Builder b = AccessConfig.builder(); + StorageAccessConfig.Builder b = StorageAccessConfig.builder(); b.put(AWS_ENDPOINT, "ep1"); b.put(AWS_SECRET_KEY, "sk2"); - AccessConfig c = b.build(); + StorageAccessConfig c = b.build(); assertThat(c.credentials()).isEqualTo(Map.of(AWS_SECRET_KEY.getPropertyName(), "sk2")); assertThat(c.extraProperties()).isEqualTo(Map.of(AWS_ENDPOINT.getPropertyName(), "ep1")); assertThat(c.get(AWS_SECRET_KEY)).isEqualTo("sk2"); @@ -46,19 +46,19 @@ public void testPutGet() { @Test public void testGetExtraProperty() { - AccessConfig.Builder b = AccessConfig.builder(); + StorageAccessConfig.Builder b = StorageAccessConfig.builder(); b.putExtraProperty(AWS_ENDPOINT.getPropertyName(), "extra"); - AccessConfig c = b.build(); + StorageAccessConfig c = b.build(); assertThat(c.extraProperties()).isEqualTo(Map.of(AWS_ENDPOINT.getPropertyName(), "extra")); assertThat(c.get(AWS_ENDPOINT)).isEqualTo("extra"); } @Test public void testGetInternalProperty() { - AccessConfig.Builder b = AccessConfig.builder(); + StorageAccessConfig.Builder b = StorageAccessConfig.builder(); b.putExtraProperty(AWS_ENDPOINT.getPropertyName(), "extra"); b.putInternalProperty(AWS_ENDPOINT.getPropertyName(), "ep1"); - AccessConfig c = b.build(); + StorageAccessConfig c = b.build(); assertThat(c.extraProperties()).isEqualTo(Map.of(AWS_ENDPOINT.getPropertyName(), "extra")); assertThat(c.internalProperties()).isEqualTo(Map.of(AWS_ENDPOINT.getPropertyName(), "ep1")); assertThat(c.get(AWS_ENDPOINT)).isEqualTo("ep1"); @@ -66,11 +66,11 @@ public void testGetInternalProperty() { @Test public void testNoCredentialOverride() { - AccessConfig.Builder b = AccessConfig.builder(); + StorageAccessConfig.Builder b = StorageAccessConfig.builder(); b.put(AWS_SECRET_KEY, "sk-test"); b.putExtraProperty(AWS_SECRET_KEY.getPropertyName(), "sk-extra"); b.putInternalProperty(AWS_SECRET_KEY.getPropertyName(), "sk-internal"); - AccessConfig c = b.build(); + StorageAccessConfig c = b.build(); assertThat(c.get(AWS_SECRET_KEY)).isEqualTo("sk-test"); assertThat(c.extraProperties()).isEqualTo(Map.of(AWS_SECRET_KEY.getPropertyName(), "sk-extra")); assertThat(c.internalProperties()) @@ -79,7 +79,7 @@ public void testNoCredentialOverride() { @Test public void testExpiresAt() { - AccessConfig.Builder b = AccessConfig.builder(); + StorageAccessConfig.Builder b = StorageAccessConfig.builder(); assertThat(b.build().expiresAt()).isEmpty(); b.put(GCS_ACCESS_TOKEN_EXPIRES_AT, "111"); assertThat(b.build().expiresAt()).hasValue(Instant.ofEpochMilli(111)); diff --git a/polaris-core/src/test/java/org/apache/polaris/core/storage/azure/AzureCredentialsStorageIntegrationTest.java b/polaris-core/src/test/java/org/apache/polaris/core/storage/azure/AzureCredentialsStorageIntegrationTest.java index 794ae25fe5..eda958fcac 100644 --- a/polaris-core/src/test/java/org/apache/polaris/core/storage/azure/AzureCredentialsStorageIntegrationTest.java +++ b/polaris-core/src/test/java/org/apache/polaris/core/storage/azure/AzureCredentialsStorageIntegrationTest.java @@ -23,7 +23,7 @@ import java.time.Instant; import java.util.Optional; -import org.apache.polaris.core.storage.AccessConfig; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.Test; @@ -34,7 +34,7 @@ public class AzureCredentialsStorageIntegrationTest { public void testAzureCredentialFormatting() { Instant expiresAt = Instant.ofEpochMilli(Long.MAX_VALUE); - AccessConfig noSuffixResult = + StorageAccessConfig noSuffixResult = toAccessConfig("sasToken", "some_account", expiresAt, Optional.empty()); Assertions.assertThat(noSuffixResult.credentials()).hasSize(3); Assertions.assertThat(noSuffixResult.credentials()).containsKey("adls.sas-token.some_account"); @@ -44,7 +44,7 @@ public void testAzureCredentialFormatting() { .doesNotContainKey( StorageAccessProperty.AZURE_REFRESH_CREDENTIALS_ENDPOINT.getPropertyName()); - AccessConfig adlsSuffixResult = + StorageAccessConfig adlsSuffixResult = toAccessConfig( "sasToken", "some_account." + AzureLocation.ADLS_ENDPOINT, @@ -63,7 +63,7 @@ public void testAzureCredentialFormatting() { StorageAccessProperty.AZURE_REFRESH_CREDENTIALS_ENDPOINT.getPropertyName(), "endpoint/credentials"); - AccessConfig blobSuffixResult = + StorageAccessConfig blobSuffixResult = toAccessConfig( "sasToken", "some_account." + AzureLocation.BLOB_ENDPOINT, expiresAt, Optional.empty()); Assertions.assertThat(blobSuffixResult.credentials()).hasSize(4); diff --git a/polaris-core/src/test/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheTest.java b/polaris-core/src/test/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheTest.java index a51badf4b8..f1e5ac1f61 100644 --- a/polaris-core/src/test/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheTest.java +++ b/polaris-core/src/test/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheTest.java @@ -42,7 +42,7 @@ import org.apache.polaris.core.persistence.transactional.TransactionalPersistence; import org.apache.polaris.core.persistence.transactional.TreeMapMetaStore; import org.apache.polaris.core.persistence.transactional.TreeMapTransactionalPersistenceImpl; -import org.apache.polaris.core.storage.AccessConfig; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.RepeatedTest; @@ -412,7 +412,7 @@ private static List getFakeScopedCreds(int number, bool : String.valueOf(Long.MAX_VALUE); res.add( new ScopedCredentialsResult( - AccessConfig.builder() + StorageAccessConfig.builder() .put(StorageAccessProperty.AWS_KEY_ID, "key_id_" + finalI) .put(StorageAccessProperty.AWS_SECRET_KEY, "key_secret_" + finalI) .put(StorageAccessProperty.AWS_SESSION_TOKEN_EXPIRES_AT_MS, expireTime) @@ -421,14 +421,14 @@ private static List getFakeScopedCreds(int number, bool if (res.size() == number) return res; res.add( new ScopedCredentialsResult( - AccessConfig.builder() + StorageAccessConfig.builder() .put(StorageAccessProperty.AZURE_SAS_TOKEN, "sas_token_" + finalI) .put(StorageAccessProperty.EXPIRATION_TIME, expireTime) .build())); if (res.size() == number) return res; res.add( new ScopedCredentialsResult( - AccessConfig.builder() + StorageAccessConfig.builder() .put(StorageAccessProperty.GCS_ACCESS_TOKEN, "gcs_token_" + finalI) .put(StorageAccessProperty.GCS_ACCESS_TOKEN_EXPIRES_AT, expireTime) .build())); @@ -459,7 +459,7 @@ public void testExtraProperties() { storageCredentialCache = newStorageCredentialCache(); ScopedCredentialsResult properties = new ScopedCredentialsResult( - AccessConfig.builder() + StorageAccessConfig.builder() .put(StorageAccessProperty.AWS_SECRET_KEY, "super-secret-123") .put(StorageAccessProperty.AWS_ENDPOINT, "test-endpoint1") .put(StorageAccessProperty.AWS_PATH_STYLE_ACCESS, "true") @@ -477,7 +477,7 @@ public void testExtraProperties() { .thenReturn(properties); List entityList = getPolarisEntities(); - AccessConfig config = + StorageAccessConfig config = storageCredentialCache.getOrGenerateSubScopeCreds( metaStoreManager, callCtx, diff --git a/polaris-core/src/test/java/org/apache/polaris/service/storage/aws/AwsCredentialsStorageIntegrationTest.java b/polaris-core/src/test/java/org/apache/polaris/service/storage/aws/AwsCredentialsStorageIntegrationTest.java index ac1ba85fd2..fb0c63c403 100644 --- a/polaris-core/src/test/java/org/apache/polaris/service/storage/aws/AwsCredentialsStorageIntegrationTest.java +++ b/polaris-core/src/test/java/org/apache/polaris/service/storage/aws/AwsCredentialsStorageIntegrationTest.java @@ -25,8 +25,8 @@ import java.util.List; import java.util.Optional; import java.util.Set; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.BaseStorageIntegrationTest; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; import org.apache.polaris.core.storage.aws.AwsCredentialsStorageIntegration; import org.apache.polaris.core.storage.aws.AwsStorageConfigurationInfo; @@ -84,7 +84,7 @@ public void testGetSubscopedCreds(String scheme) { return ASSUME_ROLE_RESPONSE; }); String warehouseDir = scheme + "://bucket/path/to/warehouse"; - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = new AwsCredentialsStorageIntegration( AwsStorageConfigurationInfo.builder() .addAllowedLocation(warehouseDir) @@ -98,7 +98,7 @@ public void testGetSubscopedCreds(String scheme) { Set.of(warehouseDir + "/namespace/table"), Set.of(warehouseDir + "/namespace/table"), Optional.of("/namespace/table/credentials")); - assertThat(accessConfig.credentials()) + assertThat(storageAccessConfig.credentials()) .isNotEmpty() .containsEntry(StorageAccessProperty.AWS_TOKEN.getPropertyName(), "sess") .containsEntry(StorageAccessProperty.AWS_KEY_ID.getPropertyName(), "accessKey") @@ -106,7 +106,7 @@ public void testGetSubscopedCreds(String scheme) { .containsEntry( StorageAccessProperty.AWS_SESSION_TOKEN_EXPIRES_AT_MS.getPropertyName(), String.valueOf(EXPIRE_TIME.toEpochMilli())); - assertThat(accessConfig.extraProperties()) + assertThat(storageAccessConfig.extraProperties()) .containsEntry( StorageAccessProperty.AWS_REFRESH_CREDENTIALS_ENDPOINT.getPropertyName(), "/namespace/table/credentials"); @@ -254,7 +254,7 @@ public void testGetSubscopedCredsInlinePolicy(String awsPartition) { break; case AWS_PARTITION: case "aws-us-gov": - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = new AwsCredentialsStorageIntegration( AwsStorageConfigurationInfo.builder() .addAllowedLocation(s3Path(bucket, warehouseKeyPrefix)) @@ -269,7 +269,7 @@ public void testGetSubscopedCredsInlinePolicy(String awsPartition) { Set.of(s3Path(bucket, firstPath), s3Path(bucket, secondPath)), Set.of(s3Path(bucket, firstPath)), Optional.empty()); - assertThat(accessConfig.credentials()) + assertThat(storageAccessConfig.credentials()) .isNotEmpty() .containsEntry(StorageAccessProperty.AWS_TOKEN.getPropertyName(), "sess") .containsEntry(StorageAccessProperty.AWS_KEY_ID.getPropertyName(), "accessKey") @@ -355,7 +355,7 @@ public void testGetSubscopedCredsInlinePolicyWithoutList() { }); return ASSUME_ROLE_RESPONSE; }); - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = new AwsCredentialsStorageIntegration( AwsStorageConfigurationInfo.builder() .addAllowedLocation(s3Path(bucket, warehouseKeyPrefix)) @@ -370,7 +370,7 @@ public void testGetSubscopedCredsInlinePolicyWithoutList() { Set.of(s3Path(bucket, firstPath), s3Path(bucket, secondPath)), Set.of(s3Path(bucket, firstPath)), Optional.empty()); - assertThat(accessConfig.credentials()) + assertThat(storageAccessConfig.credentials()) .isNotEmpty() .containsEntry(StorageAccessProperty.AWS_TOKEN.getPropertyName(), "sess") .containsEntry(StorageAccessProperty.AWS_KEY_ID.getPropertyName(), "accessKey") @@ -450,7 +450,7 @@ public void testGetSubscopedCredsInlinePolicyWithoutWrites() { }); return ASSUME_ROLE_RESPONSE; }); - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = new AwsCredentialsStorageIntegration( AwsStorageConfigurationInfo.builder() .addAllowedLocation(s3Path(bucket, warehouseKeyPrefix)) @@ -465,7 +465,7 @@ public void testGetSubscopedCredsInlinePolicyWithoutWrites() { Set.of(s3Path(bucket, firstPath), s3Path(bucket, secondPath)), Set.of(), Optional.empty()); - assertThat(accessConfig.credentials()) + assertThat(storageAccessConfig.credentials()) .isNotEmpty() .containsEntry(StorageAccessProperty.AWS_TOKEN.getPropertyName(), "sess") .containsEntry(StorageAccessProperty.AWS_KEY_ID.getPropertyName(), "accessKey") @@ -517,7 +517,7 @@ public void testGetSubscopedCredsInlinePolicyWithEmptyReadAndWrite() { }); return ASSUME_ROLE_RESPONSE; }); - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = new AwsCredentialsStorageIntegration( AwsStorageConfigurationInfo.builder() .addAllowedLocation(s3Path(bucket, warehouseKeyPrefix)) @@ -532,7 +532,7 @@ public void testGetSubscopedCredsInlinePolicyWithEmptyReadAndWrite() { Set.of(), Set.of(), Optional.empty()); - assertThat(accessConfig.credentials()) + assertThat(storageAccessConfig.credentials()) .isNotEmpty() .containsEntry(StorageAccessProperty.AWS_TOKEN.getPropertyName(), "sess") .containsEntry(StorageAccessProperty.AWS_KEY_ID.getPropertyName(), "accessKey") @@ -578,7 +578,7 @@ public void testClientRegion(String awsPartition) { break; case AWS_PARTITION: case "aws-us-gov": - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = new AwsCredentialsStorageIntegration( AwsStorageConfigurationInfo.builder() .addAllowedLocation(s3Path(bucket, warehouseKeyPrefix)) @@ -593,7 +593,7 @@ public void testClientRegion(String awsPartition) { Set.of(), Set.of(), Optional.empty()); - assertThat(accessConfig.credentials()) + assertThat(storageAccessConfig.credentials()) .containsEntry(StorageAccessProperty.AWS_TOKEN.getPropertyName(), "sess") .containsEntry(StorageAccessProperty.AWS_KEY_ID.getPropertyName(), "accessKey") .containsEntry(StorageAccessProperty.AWS_SECRET_KEY.getPropertyName(), "secretKey") @@ -619,7 +619,7 @@ public void testNoClientRegion(String awsPartition) { }); switch (awsPartition) { case AWS_PARTITION: - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = new AwsCredentialsStorageIntegration( AwsStorageConfigurationInfo.builder() .addAllowedLocation(s3Path(bucket, warehouseKeyPrefix)) @@ -633,7 +633,7 @@ public void testNoClientRegion(String awsPartition) { Set.of(), Set.of(), Optional.empty()); - assertThat(accessConfig.credentials()) + assertThat(storageAccessConfig.credentials()) .isNotEmpty() .doesNotContainKey(StorageAccessProperty.CLIENT_REGION.getPropertyName()); break; diff --git a/polaris-core/src/test/java/org/apache/polaris/service/storage/azure/AzureCredentialStorageIntegrationTest.java b/polaris-core/src/test/java/org/apache/polaris/service/storage/azure/AzureCredentialStorageIntegrationTest.java index 96e4410007..42a8bd3272 100644 --- a/polaris-core/src/test/java/org/apache/polaris/service/storage/azure/AzureCredentialStorageIntegrationTest.java +++ b/polaris-core/src/test/java/org/apache/polaris/service/storage/azure/AzureCredentialStorageIntegrationTest.java @@ -47,8 +47,8 @@ import java.util.List; import java.util.Optional; import java.util.stream.Stream; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.BaseStorageIntegrationTest; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; import org.apache.polaris.core.storage.azure.AzureCredentialsStorageIntegration; import org.apache.polaris.core.storage.azure.AzureStorageConfigurationInfo; @@ -121,13 +121,13 @@ public void testGetSubscopedTokenList(boolean allowListAction, String service) { String.format( "abfss://container@icebergdfsstorageacct.%s.core.windows.net/polaris-test/", service)); - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = subscopedCredsForOperations( /* allowedReadLoc= */ allowedLoc, /* allowedWriteLoc= */ new ArrayList<>(), allowListAction); - Assertions.assertThat(accessConfig.credentials()).hasSize(2); - String sasToken = accessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN); + Assertions.assertThat(storageAccessConfig.credentials()).hasSize(2); + String sasToken = storageAccessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN); Assertions.assertThat(sasToken).isNotNull(); String serviceEndpoint = String.format("https://icebergdfsstorageacct.%s.core.windows.net", service); @@ -192,7 +192,7 @@ public void testGetSubscopedTokenRead( String.format( "abfss://container@icebergdfsstorageacct.%s.core.windows.net/%s", service, allowedPrefix)); - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = subscopedCredsForOperations( /* allowedReadLoc= */ allowedLoc, /* allowedWriteLoc= */ new ArrayList<>(), @@ -200,7 +200,7 @@ public void testGetSubscopedTokenRead( BlobClient blobClient = createBlobClient( - accessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), + storageAccessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), "https://icebergdfsstorageacct.dfs.core.windows.net", "container", allowedPrefix); @@ -231,7 +231,7 @@ public void testGetSubscopedTokenRead( // read fail because container is blocked BlobClient blobClientReadFail = createBlobClient( - accessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), + storageAccessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), String.format("https://icebergdfsstorageacct.%s.core.windows.net", service), "regtest", blockedPrefix); @@ -262,7 +262,7 @@ public void testGetSubscopedTokenWrite( String.format( "abfss://container@icebergdfsstorageacct.%s.core.windows.net/%s", service, allowedPrefix)); - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = subscopedCredsForOperations( /* allowedReadLoc= */ new ArrayList<>(), /* allowedWriteLoc= */ allowedLoc, @@ -271,13 +271,13 @@ public void testGetSubscopedTokenWrite( String.format("https://icebergdfsstorageacct.%s.core.windows.net", service); BlobClient blobClient = createBlobClient( - accessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), + storageAccessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), serviceEndpoint, "container", allowedPrefix + "metadata/00000-65ffa17b-fe64-4c38-bcb9-06f9bd12aa2a.metadata.json"); DataLakeFileClient fileClient = createDatalakeFileClient( - accessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), + storageAccessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), serviceEndpoint, "container", "polaris-test/scopedcreds/metadata", @@ -312,13 +312,13 @@ public void testGetSubscopedTokenWrite( String blockedContainer = "regtest"; BlobClient blobClientWriteFail = createBlobClient( - accessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), + storageAccessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), serviceEndpoint, blockedContainer, blockedPrefix); DataLakeFileClient fileClientFail = createDatalakeFileClient( - accessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), + storageAccessConfig.get(StorageAccessProperty.AZURE_SAS_TOKEN), serviceEndpoint, blockedContainer, "polaris-test/scopedcreds/metadata", @@ -339,7 +339,7 @@ public void testGetSubscopedTokenWrite( } } - private AccessConfig subscopedCredsForOperations( + private StorageAccessConfig subscopedCredsForOperations( List allowedReadLoc, List allowedWriteLoc, boolean allowListAction) { AzureStorageConfigurationInfo azureConfig = AzureStorageConfigurationInfo.builder() diff --git a/polaris-core/src/test/java/org/apache/polaris/service/storage/gcp/GcpCredentialsStorageIntegrationTest.java b/polaris-core/src/test/java/org/apache/polaris/service/storage/gcp/GcpCredentialsStorageIntegrationTest.java index c4f026d86a..b0be0883d8 100644 --- a/polaris-core/src/test/java/org/apache/polaris/service/storage/gcp/GcpCredentialsStorageIntegrationTest.java +++ b/polaris-core/src/test/java/org/apache/polaris/service/storage/gcp/GcpCredentialsStorageIntegrationTest.java @@ -44,8 +44,8 @@ import java.util.List; import java.util.Optional; import java.util.Set; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.BaseStorageIntegrationTest; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; import org.apache.polaris.core.storage.gcp.GcpCredentialsStorageIntegration; import org.apache.polaris.core.storage.gcp.GcpStorageConfigurationInfo; @@ -144,20 +144,20 @@ BlobInfo createStorageBlob(String bucket, String prefix, String fileName) { return BlobInfo.newBuilder(blobId).build(); } - private Storage createStorageClient(AccessConfig accessConfig) { + private Storage createStorageClient(StorageAccessConfig storageAccessConfig) { AccessToken accessToken = new AccessToken( - accessConfig.get(StorageAccessProperty.GCS_ACCESS_TOKEN), + storageAccessConfig.get(StorageAccessProperty.GCS_ACCESS_TOKEN), new Date( Long.parseLong( - accessConfig.get(StorageAccessProperty.GCS_ACCESS_TOKEN_EXPIRES_AT)))); + storageAccessConfig.get(StorageAccessProperty.GCS_ACCESS_TOKEN_EXPIRES_AT)))); return StorageOptions.newBuilder() .setCredentials(GoogleCredentials.create(accessToken)) .build() .getService(); } - private AccessConfig subscopedCredsForOperations( + private StorageAccessConfig subscopedCredsForOperations( List allowedReadLoc, List allowedWriteLoc, boolean allowListAction) throws IOException { GcpStorageConfigurationInfo gcpConfig = @@ -302,10 +302,10 @@ public void testRefreshCredentialsEndpointIsReturned() throws IOException { .isNotNull() .isNotEmpty(); - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = subscopedCredsForOperations( List.of("gs://bucket1/path/to/data"), List.of("gs://bucket1/path/to/data"), true); - assertThat(accessConfig.get(StorageAccessProperty.GCS_REFRESH_CREDENTIALS_ENDPOINT)) + assertThat(storageAccessConfig.get(StorageAccessProperty.GCS_REFRESH_CREDENTIALS_ENDPOINT)) .isEqualTo(REFRESH_ENDPOINT); } diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java index 75742412e3..89624418a6 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java @@ -119,16 +119,16 @@ import org.apache.polaris.core.persistence.resolver.ResolverFactory; import org.apache.polaris.core.persistence.resolver.ResolverPath; import org.apache.polaris.core.persistence.resolver.ResolverStatus; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.PolarisStorageActions; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageLocation; import org.apache.polaris.core.storage.StorageUtil; import org.apache.polaris.service.catalog.SupportsNotifications; import org.apache.polaris.service.catalog.common.CatalogUtils; import org.apache.polaris.service.catalog.common.LocationUtils; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; import org.apache.polaris.service.catalog.io.FileIOFactory; import org.apache.polaris.service.catalog.io.FileIOUtil; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.apache.polaris.service.catalog.validation.IcebergPropertiesValidation; import org.apache.polaris.service.events.IcebergRestCatalogEvents; import org.apache.polaris.service.events.listeners.PolarisEventListener; @@ -179,7 +179,7 @@ public class IcebergCatalog extends BaseMetastoreViewCatalog private long catalogId = -1; private String defaultBaseLocation; private Map catalogProperties; - private final AccessConfigProvider accessConfigProvider; + private final StorageAccessConfigProvider storageAccessConfigProvider; private FileIOFactory fileIOFactory; private PolarisMetaStoreManager metaStoreManager; @@ -197,7 +197,7 @@ public IcebergCatalog( PolarisResolutionManifestCatalogView resolvedEntityView, PolarisPrincipal principal, TaskExecutor taskExecutor, - AccessConfigProvider accessConfigProvider, + StorageAccessConfigProvider storageAccessConfigProvider, FileIOFactory fileIOFactory, PolarisEventListener polarisEventListener) { this.diagnostics = diagnostics; @@ -210,7 +210,7 @@ public IcebergCatalog( this.taskExecutor = taskExecutor; this.catalogId = catalogEntity.getId(); this.catalogName = catalogEntity.getName(); - this.accessConfigProvider = accessConfigProvider; + this.storageAccessConfigProvider = storageAccessConfigProvider; this.fileIOFactory = fileIOFactory; this.metaStoreManager = metaStoreManager; this.polarisEventListener = polarisEventListener; @@ -2078,8 +2078,8 @@ private FileIO loadFileIOForTableLike( PolarisResolvedPathWrapper resolvedStorageEntity, Map tableProperties, Set storageActions) { - AccessConfig accessConfig = - accessConfigProvider.getAccessConfig( + StorageAccessConfig storageAccessConfig = + storageAccessConfigProvider.getStorageAccessConfig( callContext, identifier, readLocations, @@ -2087,7 +2087,7 @@ private FileIO loadFileIOForTableLike( Optional.empty(), resolvedStorageEntity); // Reload fileIO based on table specific context - FileIO fileIO = fileIOFactory.loadFileIO(accessConfig, ioImplClassName, tableProperties); + FileIO fileIO = fileIOFactory.loadFileIO(storageAccessConfig, ioImplClassName, tableProperties); // ensure the new fileIO is closed when the catalog is closed closeableGroup.addCloseable(fileIO); return fileIO; diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogAdapter.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogAdapter.java index c636fb075c..352d1a81cc 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogAdapter.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogAdapter.java @@ -81,7 +81,7 @@ import org.apache.polaris.service.catalog.api.IcebergRestCatalogApiService; import org.apache.polaris.service.catalog.api.IcebergRestConfigurationApiService; import org.apache.polaris.service.catalog.common.CatalogAdapter; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.apache.polaris.service.config.ReservedProperties; import org.apache.polaris.service.context.catalog.CallContextCatalogFactory; import org.apache.polaris.service.events.listeners.PolarisEventListener; @@ -150,7 +150,7 @@ public class IcebergCatalogAdapter private final CatalogHandlerUtils catalogHandlerUtils; private final Instance externalCatalogFactories; private final PolarisEventListener polarisEventListener; - private final AccessConfigProvider accessConfigProvider; + private final StorageAccessConfigProvider storageAccessConfigProvider; private final PolarisMetricsReporter metricsReporter; @Inject @@ -169,7 +169,7 @@ public IcebergCatalogAdapter( CatalogHandlerUtils catalogHandlerUtils, @Any Instance externalCatalogFactories, PolarisEventListener polarisEventListener, - AccessConfigProvider accessConfigProvider, + StorageAccessConfigProvider storageAccessConfigProvider, PolarisMetricsReporter metricsReporter) { this.diagnostics = diagnostics; this.realmContext = realmContext; @@ -186,7 +186,7 @@ public IcebergCatalogAdapter( this.catalogHandlerUtils = catalogHandlerUtils; this.externalCatalogFactories = externalCatalogFactories; this.polarisEventListener = polarisEventListener; - this.accessConfigProvider = accessConfigProvider; + this.storageAccessConfigProvider = storageAccessConfigProvider; this.metricsReporter = metricsReporter; } @@ -228,7 +228,7 @@ IcebergCatalogHandler newHandlerWrapper(SecurityContext securityContext, String catalogHandlerUtils, externalCatalogFactories, polarisEventListener, - accessConfigProvider); + storageAccessConfigProvider); } @Override diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandler.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandler.java index a10c7afe76..1de129cacc 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandler.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandler.java @@ -99,14 +99,14 @@ import org.apache.polaris.core.persistence.pagination.Page; import org.apache.polaris.core.persistence.pagination.PageToken; import org.apache.polaris.core.persistence.resolver.ResolutionManifestFactory; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.PolarisStorageActions; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageUtil; import org.apache.polaris.service.catalog.AccessDelegationMode; import org.apache.polaris.service.catalog.SupportsNotifications; import org.apache.polaris.service.catalog.common.CatalogHandler; import org.apache.polaris.service.catalog.common.CatalogUtils; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.apache.polaris.service.config.ReservedProperties; import org.apache.polaris.service.context.catalog.CallContextCatalogFactory; import org.apache.polaris.service.events.listeners.PolarisEventListener; @@ -139,7 +139,7 @@ public class IcebergCatalogHandler extends CatalogHandler implements AutoCloseab private final ReservedProperties reservedProperties; private final CatalogHandlerUtils catalogHandlerUtils; private final PolarisEventListener polarisEventListener; - private final AccessConfigProvider accessConfigProvider; + private final StorageAccessConfigProvider storageAccessConfigProvider; // Catalog instance will be initialized after authorizing resolver successfully resolves // the catalog entity. @@ -164,7 +164,7 @@ public IcebergCatalogHandler( CatalogHandlerUtils catalogHandlerUtils, Instance externalCatalogFactories, PolarisEventListener polarisEventListener, - AccessConfigProvider accessConfigProvider) { + StorageAccessConfigProvider storageAccessConfigProvider) { super( diagnostics, callContext, @@ -179,7 +179,7 @@ public IcebergCatalogHandler( this.reservedProperties = reservedProperties; this.catalogHandlerUtils = catalogHandlerUtils; this.polarisEventListener = polarisEventListener; - this.accessConfigProvider = accessConfigProvider; + this.storageAccessConfigProvider = storageAccessConfigProvider; } private CatalogEntity getResolvedCatalogEntity() { @@ -810,15 +810,15 @@ ALLOW_FEDERATED_CATALOGS_CREDENTIAL_VENDING, getResolvedCatalogEntity())) { validateRemoteTableLocations(tableIdentifier, tableLocations, resolvedStoragePath); } - AccessConfig accessConfig = - accessConfigProvider.getAccessConfig( + StorageAccessConfig storageAccessConfig = + storageAccessConfigProvider.getStorageAccessConfig( callContext, tableIdentifier, tableLocations, actions, refreshCredentialsEndpoint, resolvedStoragePath); - Map credentialConfig = accessConfig.credentials(); + Map credentialConfig = storageAccessConfig.credentials(); if (delegationModes.contains(VENDED_CREDENTIALS)) { if (!credentialConfig.isEmpty()) { responseBuilder.addAllConfig(credentialConfig); @@ -831,12 +831,12 @@ ALLOW_FEDERATED_CATALOGS_CREDENTIAL_VENDING, getResolvedCatalogEntity())) { Boolean skipCredIndirection = realmConfig.getConfig(FeatureConfiguration.SKIP_CREDENTIAL_SUBSCOPING_INDIRECTION); Preconditions.checkArgument( - !accessConfig.supportsCredentialVending() || skipCredIndirection, + !storageAccessConfig.supportsCredentialVending() || skipCredIndirection, "Credential vending was requested for table %s, but no credentials are available", tableIdentifier); } } - responseBuilder.addAllConfig(accessConfig.extraProperties()); + responseBuilder.addAllConfig(storageAccessConfig.extraProperties()); } return responseBuilder; diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/DefaultFileIOFactory.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/DefaultFileIOFactory.java index a2e78524dc..c132322f5d 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/DefaultFileIOFactory.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/DefaultFileIOFactory.java @@ -27,7 +27,7 @@ import java.util.Map; import org.apache.iceberg.CatalogUtil; import org.apache.iceberg.io.FileIO; -import org.apache.polaris.core.storage.AccessConfig; +import org.apache.polaris.core.storage.StorageAccessConfig; /** * A default FileIO factory implementation for creating Iceberg {@link FileIO} instances with @@ -45,7 +45,7 @@ public DefaultFileIOFactory() {} @Override public FileIO loadFileIO( - @Nonnull AccessConfig accessConfig, + @Nonnull StorageAccessConfig storageAccessConfig, @Nonnull String ioImplClassName, @Nonnull Map properties) { @@ -56,9 +56,9 @@ public FileIO loadFileIO( // Update with properties in case there are table-level overrides the credentials should // always override table-level properties, since storage configuration will be found at // whatever entity defines it - properties.putAll(accessConfig.credentials()); - properties.putAll(accessConfig.extraProperties()); - properties.putAll(accessConfig.internalProperties()); + properties.putAll(storageAccessConfig.credentials()); + properties.putAll(storageAccessConfig.extraProperties()); + properties.putAll(storageAccessConfig.internalProperties()); return loadFileIOInternal(ioImplClassName, properties); } diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOFactory.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOFactory.java index 5c6007efa1..b9bfbf97e9 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOFactory.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOFactory.java @@ -22,7 +22,7 @@ import jakarta.enterprise.context.ApplicationScoped; import java.util.Map; import org.apache.iceberg.io.FileIO; -import org.apache.polaris.core.storage.AccessConfig; +import org.apache.polaris.core.storage.StorageAccessConfig; /** * Interface for providing a way to construct FileIO objects, such as for reading/writing S3. @@ -37,13 +37,14 @@ public interface FileIOFactory { *

This method may obtain subscoped credentials to restrict the FileIO's permissions, ensuring * secure and limited access to the table's data and locations. * - * @param accessConfig the access configuration containing credentials and other properties. + * @param storageAccessConfig the storage access configuration containing credentials and other + * properties. * @param ioImplClassName the class name of the FileIO implementation to load. * @param properties configuration properties for the FileIO. * @return a configured FileIO instance. */ FileIO loadFileIO( - @Nonnull AccessConfig accessConfig, + @Nonnull StorageAccessConfig storageAccessConfig, @Nonnull String ioImplClassName, @Nonnull Map properties); } diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOUtil.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOUtil.java index f4a6320d67..7d5a112bba 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOUtil.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOUtil.java @@ -26,9 +26,9 @@ import org.apache.polaris.core.entity.PolarisEntity; import org.apache.polaris.core.entity.PolarisEntityConstants; import org.apache.polaris.core.persistence.PolarisResolvedPathWrapper; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.PolarisCredentialVendor; import org.apache.polaris.core.storage.PolarisStorageActions; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.cache.StorageCredentialCache; import org.apache.polaris.service.catalog.iceberg.IcebergCatalog; import org.slf4j.Logger; @@ -74,7 +74,7 @@ public static Optional findStorageInfoFromHierarchy( * and read/write metadata JSON files. * */ - public static AccessConfig refreshAccessConfig( + public static StorageAccessConfig refreshAccessConfig( CallContext callContext, StorageCredentialCache storageCredentialCache, PolarisCredentialVendor credentialVendor, @@ -93,7 +93,7 @@ public static AccessConfig refreshAccessConfig( .atDebug() .addKeyValue("tableIdentifier", tableIdentifier) .log("Skipping generation of subscoped creds for table"); - return AccessConfig.builder().build(); + return StorageAccessConfig.builder().build(); } boolean allowList = @@ -105,7 +105,7 @@ public static AccessConfig refreshAccessConfig( || storageActions.contains(PolarisStorageActions.ALL) ? tableLocations : Set.of(); - AccessConfig accessConfig = + StorageAccessConfig storageAccessConfig = storageCredentialCache.getOrGenerateSubScopeCreds( credentialVendor, callContext.getPolarisCallContext(), @@ -117,12 +117,12 @@ public static AccessConfig refreshAccessConfig( LOGGER .atDebug() .addKeyValue("tableIdentifier", tableIdentifier) - .addKeyValue("credentialKeys", accessConfig.credentials().keySet()) - .addKeyValue("extraProperties", accessConfig.extraProperties()) + .addKeyValue("credentialKeys", storageAccessConfig.credentials().keySet()) + .addKeyValue("extraProperties", storageAccessConfig.extraProperties()) .log("Loaded scoped credentials for table"); - if (accessConfig.credentials().isEmpty()) { + if (storageAccessConfig.credentials().isEmpty()) { LOGGER.debug("No credentials found for table"); } - return accessConfig; + return storageAccessConfig; } } diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/AccessConfigProvider.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/StorageAccessConfigProvider.java similarity index 88% rename from runtime/service/src/main/java/org/apache/polaris/service/catalog/io/AccessConfigProvider.java rename to runtime/service/src/main/java/org/apache/polaris/service/catalog/io/StorageAccessConfigProvider.java index d336040273..80e62856ae 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/AccessConfigProvider.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/StorageAccessConfigProvider.java @@ -29,8 +29,8 @@ import org.apache.polaris.core.entity.PolarisEntity; import org.apache.polaris.core.persistence.MetaStoreManagerFactory; import org.apache.polaris.core.persistence.PolarisResolvedPathWrapper; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.PolarisStorageActions; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.cache.StorageCredentialCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,15 +43,15 @@ * primary entrypoint to get sub-scoped credentials for accessing table data. */ @ApplicationScoped -public class AccessConfigProvider { +public class StorageAccessConfigProvider { - private static final Logger LOGGER = LoggerFactory.getLogger(AccessConfigProvider.class); + private static final Logger LOGGER = LoggerFactory.getLogger(StorageAccessConfigProvider.class); private final StorageCredentialCache storageCredentialCache; private final MetaStoreManagerFactory metaStoreManagerFactory; @Inject - public AccessConfigProvider( + public StorageAccessConfigProvider( StorageCredentialCache storageCredentialCache, MetaStoreManagerFactory metaStoreManagerFactory) { this.storageCredentialCache = storageCredentialCache; @@ -68,10 +68,10 @@ public AccessConfigProvider( * to * @param refreshCredentialsEndpoint optional endpoint URL for clients to refresh credentials * @param resolvedPath the entity hierarchy to search for storage configuration - * @return {@link AccessConfig} with scoped credentials and metadata; empty if no storage config - * found + * @return {@link StorageAccessConfig} with scoped credentials and metadata; empty if no storage + * config found */ - public AccessConfig getAccessConfig( + public StorageAccessConfig getStorageAccessConfig( @Nonnull CallContext callContext, @Nonnull TableIdentifier tableIdentifier, @Nonnull Set tableLocations, @@ -89,7 +89,7 @@ public AccessConfig getAccessConfig( .atWarn() .addKeyValue("tableIdentifier", tableIdentifier) .log("Table entity has no storage configuration in its hierarchy"); - return AccessConfig.builder().supportsCredentialVending(false).build(); + return StorageAccessConfig.builder().supportsCredentialVending(false).build(); } return FileIOUtil.refreshAccessConfig( callContext, diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/WasbTranslatingFileIOFactory.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/WasbTranslatingFileIOFactory.java index 47617309ce..3e4b7f306f 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/WasbTranslatingFileIOFactory.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/WasbTranslatingFileIOFactory.java @@ -24,7 +24,7 @@ import jakarta.inject.Inject; import java.util.Map; import org.apache.iceberg.io.FileIO; -import org.apache.polaris.core.storage.AccessConfig; +import org.apache.polaris.core.storage.StorageAccessConfig; /** A {@link FileIOFactory} that translates WASB paths to ABFS ones */ @ApplicationScoped @@ -40,7 +40,7 @@ public WasbTranslatingFileIOFactory() { @Override public FileIO loadFileIO( - @Nonnull AccessConfig accessConfig, + @Nonnull StorageAccessConfig accessConfig, @Nonnull String ioImplClassName, @Nonnull Map properties) { return new WasbTranslatingFileIO( diff --git a/runtime/service/src/main/java/org/apache/polaris/service/context/catalog/PolarisCallContextCatalogFactory.java b/runtime/service/src/main/java/org/apache/polaris/service/context/catalog/PolarisCallContextCatalogFactory.java index 70cb8e6b1d..65eb7f9d57 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/context/catalog/PolarisCallContextCatalogFactory.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/context/catalog/PolarisCallContextCatalogFactory.java @@ -32,8 +32,8 @@ import org.apache.polaris.core.persistence.resolver.PolarisResolutionManifest; import org.apache.polaris.core.persistence.resolver.ResolverFactory; import org.apache.polaris.service.catalog.iceberg.IcebergCatalog; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; import org.apache.polaris.service.catalog.io.FileIOFactory; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.apache.polaris.service.events.listeners.PolarisEventListener; import org.apache.polaris.service.task.TaskExecutor; import org.slf4j.Logger; @@ -46,7 +46,7 @@ public class PolarisCallContextCatalogFactory implements CallContextCatalogFacto private final PolarisDiagnostics diagnostics; private final TaskExecutor taskExecutor; - private final AccessConfigProvider accessConfigProvider; + private final StorageAccessConfigProvider storageAccessConfigProvider; private final FileIOFactory fileIOFactory; private final ResolverFactory resolverFactory; private final PolarisEventListener polarisEventListener; @@ -59,7 +59,7 @@ public PolarisCallContextCatalogFactory( PolarisDiagnostics diagnostics, ResolverFactory resolverFactory, TaskExecutor taskExecutor, - AccessConfigProvider accessConfigProvider, + StorageAccessConfigProvider storageAccessConfigProvider, FileIOFactory fileIOFactory, PolarisEventListener polarisEventListener, PolarisMetaStoreManager metaStoreManager, @@ -68,7 +68,7 @@ public PolarisCallContextCatalogFactory( this.diagnostics = diagnostics; this.resolverFactory = resolverFactory; this.taskExecutor = taskExecutor; - this.accessConfigProvider = accessConfigProvider; + this.storageAccessConfigProvider = storageAccessConfigProvider; this.fileIOFactory = fileIOFactory; this.polarisEventListener = polarisEventListener; this.metaStoreManager = metaStoreManager; @@ -94,7 +94,7 @@ public Catalog createCallContextCatalog(final PolarisResolutionManifest resolved resolvedManifest, principal, taskExecutor, - accessConfigProvider, + storageAccessConfigProvider, fileIOFactory, polarisEventListener); diff --git a/runtime/service/src/main/java/org/apache/polaris/service/storage/PolarisStorageIntegrationProviderImpl.java b/runtime/service/src/main/java/org/apache/polaris/service/storage/PolarisStorageIntegrationProviderImpl.java index 23ec20abc3..706acb4222 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/storage/PolarisStorageIntegrationProviderImpl.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/storage/PolarisStorageIntegrationProviderImpl.java @@ -32,11 +32,11 @@ import java.util.Set; import java.util.function.Supplier; import org.apache.polaris.core.config.RealmConfig; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.PolarisStorageActions; import org.apache.polaris.core.storage.PolarisStorageConfigurationInfo; import org.apache.polaris.core.storage.PolarisStorageIntegration; import org.apache.polaris.core.storage.PolarisStorageIntegrationProvider; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.aws.AwsCredentialsStorageIntegration; import org.apache.polaris.core.storage.aws.AwsStorageConfigurationInfo; import org.apache.polaris.core.storage.aws.StsClientProvider; @@ -109,13 +109,13 @@ public PolarisStorageIntegrationProviderImpl( storageIntegration = new PolarisStorageIntegration<>((T) polarisStorageConfigurationInfo, "file") { @Override - public AccessConfig getSubscopedCreds( + public StorageAccessConfig getSubscopedCreds( @Nonnull RealmConfig realmConfig, boolean allowListOperation, @Nonnull Set allowedReadLocations, @Nonnull Set allowedWriteLocations, Optional refreshCredentialsEndpoint) { - return AccessConfig.builder().supportsCredentialVending(false).build(); + return StorageAccessConfig.builder().supportsCredentialVending(false).build(); } @Override diff --git a/runtime/service/src/main/java/org/apache/polaris/service/task/TaskFileIOSupplier.java b/runtime/service/src/main/java/org/apache/polaris/service/task/TaskFileIOSupplier.java index 720f204fc2..b4c31d6921 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/task/TaskFileIOSupplier.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/task/TaskFileIOSupplier.java @@ -33,21 +33,21 @@ import org.apache.polaris.core.entity.TaskEntity; import org.apache.polaris.core.persistence.PolarisResolvedPathWrapper; import org.apache.polaris.core.persistence.ResolvedPolarisEntity; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.PolarisStorageActions; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.service.catalog.io.FileIOFactory; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; @ApplicationScoped public class TaskFileIOSupplier { private final FileIOFactory fileIOFactory; - private final AccessConfigProvider accessConfigProvider; + private final StorageAccessConfigProvider accessConfigProvider; @Inject public TaskFileIOSupplier( - FileIOFactory fileIOFactory, AccessConfigProvider accessConfigProvider) { + FileIOFactory fileIOFactory, StorageAccessConfigProvider storageAccessConfigProvider) { this.fileIOFactory = fileIOFactory; - this.accessConfigProvider = accessConfigProvider; + this.accessConfigProvider = storageAccessConfigProvider; } public FileIO apply(TaskEntity task, TableIdentifier identifier, CallContext callContext) { @@ -62,14 +62,14 @@ public FileIO apply(TaskEntity task, TableIdentifier identifier, CallContext cal new ResolvedPolarisEntity(task, List.of(), List.of()); PolarisResolvedPathWrapper resolvedPath = new PolarisResolvedPathWrapper(List.of(resolvedTaskEntity)); - AccessConfig accessConfig = - accessConfigProvider.getAccessConfig( + StorageAccessConfig storageAccessConfig = + accessConfigProvider.getStorageAccessConfig( callContext, identifier, locations, storageActions, Optional.empty(), resolvedPath); String ioImpl = properties.getOrDefault( CatalogProperties.FILE_IO_IMPL, "org.apache.iceberg.io.ResolvingFileIO"); - return fileIOFactory.loadFileIO(accessConfig, ioImpl, properties); + return fileIOFactory.loadFileIO(storageAccessConfig, ioImpl, properties); } } diff --git a/runtime/service/src/test/java/org/apache/polaris/service/admin/PolarisAuthzTestBase.java b/runtime/service/src/test/java/org/apache/polaris/service/admin/PolarisAuthzTestBase.java index 4b1799d8cf..0bb4856eb2 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/admin/PolarisAuthzTestBase.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/admin/PolarisAuthzTestBase.java @@ -87,8 +87,8 @@ import org.apache.polaris.service.catalog.generic.PolarisGenericTableCatalog; import org.apache.polaris.service.catalog.iceberg.CatalogHandlerUtils; import org.apache.polaris.service.catalog.iceberg.IcebergCatalog; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; import org.apache.polaris.service.catalog.io.FileIOFactory; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.apache.polaris.service.catalog.policy.PolicyCatalog; import org.apache.polaris.service.config.ReservedProperties; import org.apache.polaris.service.context.catalog.CallContextCatalogFactory; @@ -203,7 +203,7 @@ public Map getConfigOverrides() { @Inject protected PolarisConfigurationStore configurationStore; @Inject protected StorageCredentialCache storageCredentialCache; @Inject protected ResolverFactory resolverFactory; - @Inject protected AccessConfigProvider accessConfigProvider; + @Inject protected StorageAccessConfigProvider storageAccessConfigProvider; protected IcebergCatalog baseCatalog; protected PolarisGenericTableCatalog genericTableCatalog; @@ -499,7 +499,7 @@ private void initBaseCatalog() { passthroughView, authenticatedRoot, Mockito.mock(), - accessConfigProvider, + storageAccessConfigProvider, fileIOFactory, polarisEventListener); this.baseCatalog.initialize( @@ -527,7 +527,7 @@ public TestPolarisCallContextCatalogFactory( PolarisDiagnostics diagnostics, ResolverFactory resolverFactory, TaskExecutor taskExecutor, - AccessConfigProvider accessConfigProvider, + StorageAccessConfigProvider accessConfigProvider, FileIOFactory fileIOFactory, PolarisEventListener polarisEventListener, PolarisMetaStoreManager metaStoreManager, diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/generic/AbstractPolarisGenericTableCatalogTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/generic/AbstractPolarisGenericTableCatalogTest.java index f4dceffe62..5cda9c7981 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/generic/AbstractPolarisGenericTableCatalogTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/generic/AbstractPolarisGenericTableCatalogTest.java @@ -66,9 +66,9 @@ import org.apache.polaris.service.admin.PolarisAdminService; import org.apache.polaris.service.catalog.PolarisPassthroughResolutionView; import org.apache.polaris.service.catalog.iceberg.IcebergCatalog; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; import org.apache.polaris.service.catalog.io.DefaultFileIOFactory; import org.apache.polaris.service.catalog.io.FileIOFactory; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.apache.polaris.service.config.ReservedProperties; import org.apache.polaris.service.events.listeners.NoOpPolarisEventListener; import org.apache.polaris.service.storage.PolarisStorageIntegrationProviderImpl; @@ -119,7 +119,7 @@ public abstract class AbstractPolarisGenericTableCatalogTest { private FileIOFactory fileIOFactory; private PolarisPrincipal authenticatedRoot; private PolarisEntity catalogEntity; - private AccessConfigProvider accessConfigProvider; + private StorageAccessConfigProvider storageAccessConfigProvider; protected static final Schema SCHEMA = new Schema( @@ -156,8 +156,8 @@ public void before(TestInfo testInfo) { metaStoreManagerFactory.getOrCreateSession(realmContext), configurationStore); realmConfig = polarisContext.getRealmConfig(); - accessConfigProvider = - new AccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); + storageAccessConfigProvider = + new StorageAccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); PrincipalEntity rootPrincipal = metaStoreManager.findRootPrincipal(polarisContext).orElseThrow(); @@ -242,7 +242,7 @@ public void before(TestInfo testInfo) { passthroughView, authenticatedRoot, taskExecutor, - accessConfigProvider, + storageAccessConfigProvider, fileIOFactory, new NoOpPolarisEventListener()); this.icebergCatalog.initialize( diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogTest.java index fa05af7efd..3e7cc19850 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogTest.java @@ -126,9 +126,9 @@ import org.apache.polaris.core.persistence.resolver.ResolverFactory; import org.apache.polaris.core.secrets.UserSecretsManager; import org.apache.polaris.core.secrets.UserSecretsManagerFactory; -import org.apache.polaris.core.storage.AccessConfig; import org.apache.polaris.core.storage.PolarisStorageIntegration; import org.apache.polaris.core.storage.PolarisStorageIntegrationProvider; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; import org.apache.polaris.core.storage.aws.AwsCredentialsStorageIntegration; import org.apache.polaris.core.storage.aws.AwsStorageConfigurationInfo; @@ -136,11 +136,11 @@ import org.apache.polaris.service.admin.PolarisAdminService; import org.apache.polaris.service.catalog.PolarisPassthroughResolutionView; import org.apache.polaris.service.catalog.Profiles; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; import org.apache.polaris.service.catalog.io.DefaultFileIOFactory; import org.apache.polaris.service.catalog.io.ExceptionMappingFileIO; import org.apache.polaris.service.catalog.io.FileIOFactory; import org.apache.polaris.service.catalog.io.MeasuredFileIOFactory; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.apache.polaris.service.config.ReservedProperties; import org.apache.polaris.service.events.IcebergRestCatalogEvents; import org.apache.polaris.service.events.listeners.PolarisEventListener; @@ -252,7 +252,7 @@ public Map getConfigOverrides() { private PolarisPrincipal authenticatedRoot; private TestPolarisEventListener testPolarisEventListener; private ReservedProperties reservedProperties; - private AccessConfigProvider accessConfigProvider; + private StorageAccessConfigProvider storageAccessConfigProvider; @BeforeAll public static void setUpMocks() { @@ -290,8 +290,8 @@ public void before(TestInfo testInfo) { metaStoreManagerFactory.getOrCreateSession(realmContext), configurationStore); realmConfig = polarisContext.getRealmConfig(); - accessConfigProvider = - new AccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); + storageAccessConfigProvider = + new StorageAccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); EntityCache entityCache = createEntityCache(diagServices, realmConfig, metaStoreManager); resolverFactory = (principal, referenceCatalogName) -> @@ -454,7 +454,7 @@ protected IcebergCatalog newIcebergCatalog( passthroughView, authenticatedRoot, taskExecutor, - accessConfigProvider, + storageAccessConfigProvider, fileIOFactory, polarisEventListener); } @@ -1905,7 +1905,7 @@ public void testDropTableWithPurge() { Set.of(tableMetadata.location()), Set.of(tableMetadata.location()), Optional.empty()) - .getAccessConfig() + .getStorageAccessConfig() .credentials(); Assertions.assertThat(credentials) .isNotNull() @@ -1914,7 +1914,7 @@ public void testDropTableWithPurge() { .containsEntry(StorageAccessProperty.AWS_SECRET_KEY.getPropertyName(), SECRET_ACCESS_KEY) .containsEntry(StorageAccessProperty.AWS_TOKEN.getPropertyName(), SESSION_TOKEN); FileIO fileIO = - new TaskFileIOSupplier(new DefaultFileIOFactory(), accessConfigProvider) + new TaskFileIOSupplier(new DefaultFileIOFactory(), storageAccessConfigProvider) .apply(taskEntity, TABLE, polarisContext); Assertions.assertThat(fileIO).isNotNull().isInstanceOf(ExceptionMappingFileIO.class); Assertions.assertThat(((ExceptionMappingFileIO) fileIO).getInnerIo()) @@ -2083,14 +2083,14 @@ public void testFileIOWrapper() { new FileIOFactory() { @Override public FileIO loadFileIO( - @Nonnull AccessConfig accessConfig, + @Nonnull StorageAccessConfig accessConfig, @Nonnull String ioImplClassName, @Nonnull Map properties) { return measured.loadFileIO( accessConfig, "org.apache.iceberg.inmemory.InMemoryFileIO", Map.of()); } }, - accessConfigProvider); + storageAccessConfigProvider); TableCleanupTaskHandler handler = new TableCleanupTaskHandler( diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogViewTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogViewTest.java index 2406108abb..f8468d6bf1 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogViewTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogViewTest.java @@ -57,9 +57,9 @@ import org.apache.polaris.service.admin.PolarisAdminService; import org.apache.polaris.service.catalog.PolarisPassthroughResolutionView; import org.apache.polaris.service.catalog.Profiles; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; import org.apache.polaris.service.catalog.io.DefaultFileIOFactory; import org.apache.polaris.service.catalog.io.FileIOFactory; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.apache.polaris.service.config.ReservedProperties; import org.apache.polaris.service.events.IcebergRestCatalogEvents; import org.apache.polaris.service.events.listeners.PolarisEventListener; @@ -121,7 +121,7 @@ public Map getConfigOverrides() { private UserSecretsManager userSecretsManager; private PolarisCallContext polarisContext; private RealmConfig realmConfig; - private AccessConfigProvider accessConfigProvider; + private StorageAccessConfigProvider storageAccessConfigProvider; private TestPolarisEventListener testPolarisEventListener; @@ -162,8 +162,8 @@ public void before(TestInfo testInfo) { metaStoreManagerFactory.getOrCreateSession(realmContext), configurationStore); realmConfig = polarisContext.getRealmConfig(); - accessConfigProvider = - new AccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); + storageAccessConfigProvider = + new StorageAccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); PrincipalEntity rootPrincipal = metaStoreManager.findRootPrincipal(polarisContext).orElseThrow(); PolarisPrincipal authenticatedRoot = PolarisPrincipal.of(rootPrincipal, Set.of()); @@ -215,7 +215,7 @@ public void before(TestInfo testInfo) { passthroughView, authenticatedRoot, Mockito.mock(), - accessConfigProvider, + storageAccessConfigProvider, fileIOFactory, polarisEventListener); Map properties = diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandlerAuthzTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandlerAuthzTest.java index 924e376e4b..34118d6a71 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandlerAuthzTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandlerAuthzTest.java @@ -134,7 +134,7 @@ private IcebergCatalogHandler newWrapper( catalogHandlerUtils, emptyExternalCatalogFactory(), polarisEventListener, - accessConfigProvider); + storageAccessConfigProvider); } protected void doTestInsufficientPrivileges( @@ -274,7 +274,7 @@ public void testInsufficientPermissionsPriorToSecretRotation() { catalogHandlerUtils, emptyExternalCatalogFactory(), polarisEventListener, - accessConfigProvider); + storageAccessConfigProvider); // a variety of actions are all disallowed because the principal's credentials must be rotated doTestInsufficientPrivileges( @@ -312,7 +312,7 @@ public void testInsufficientPermissionsPriorToSecretRotation() { catalogHandlerUtils, emptyExternalCatalogFactory(), polarisEventListener, - accessConfigProvider); + storageAccessConfigProvider); doTestSufficientPrivilegeSets( List.of(Set.of(PolarisPrivilege.NAMESPACE_LIST)), @@ -1189,7 +1189,7 @@ public T getConfig(PolarisConfiguration config, CatalogEntity catalogEnti catalogHandlerUtils, emptyExternalCatalogFactory(), polarisEventListener, - accessConfigProvider); + storageAccessConfigProvider); } @Test @@ -1894,7 +1894,7 @@ public void testSendNotificationSufficientPrivileges() { diagServices, resolverFactory, Mockito.mock(), - accessConfigProvider, + storageAccessConfigProvider, new DefaultFileIOFactory(), polarisEventListener, metaStoreManager, diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandlerFineGrainedDisabledTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandlerFineGrainedDisabledTest.java index 7c5ae41b61..0b4fcc910e 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandlerFineGrainedDisabledTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandlerFineGrainedDisabledTest.java @@ -71,7 +71,7 @@ private IcebergCatalogHandler newWrapper() { catalogHandlerUtils, emptyExternalCatalogFactory(), polarisEventListener, - accessConfigProvider); + storageAccessConfigProvider); } public static class Profile extends PolarisAuthzTestBase.Profile { diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/io/FileIOFactoryTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/io/FileIOFactoryTest.java index 731ab6f071..673e0f2923 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/io/FileIOFactoryTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/io/FileIOFactoryTest.java @@ -169,7 +169,8 @@ public void testLoadFileIOForCleanupTask(String scheme) { Assertions.assertThat(tasks).hasSize(1); TaskEntity taskEntity = TaskEntity.of(tasks.get(0)); FileIO fileIO = - new TaskFileIOSupplier(testServices.fileIOFactory(), testServices.accessConfigProvider()) + new TaskFileIOSupplier( + testServices.fileIOFactory(), testServices.storageAccessConfigProvider()) .apply(taskEntity, TABLE, callContext); Assertions.assertThat(fileIO).isNotNull().isInstanceOf(ExceptionMappingFileIO.class); Assertions.assertThat(((ExceptionMappingFileIO) fileIO).getInnerIo()) @@ -216,7 +217,7 @@ IcebergCatalog createCatalog(TestServices services, String scheme) { passthroughView, services.principal(), services.taskExecutor(), - services.accessConfigProvider(), + services.storageAccessConfigProvider(), services.fileIOFactory(), services.polarisEventListener()); polarisCatalog.initialize( diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/policy/AbstractPolicyCatalogTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/policy/AbstractPolicyCatalogTest.java index 69b9cde7b6..3ee2faf340 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/policy/AbstractPolicyCatalogTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/policy/AbstractPolicyCatalogTest.java @@ -78,9 +78,9 @@ import org.apache.polaris.service.admin.PolarisAdminService; import org.apache.polaris.service.catalog.PolarisPassthroughResolutionView; import org.apache.polaris.service.catalog.iceberg.IcebergCatalog; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; import org.apache.polaris.service.catalog.io.DefaultFileIOFactory; import org.apache.polaris.service.catalog.io.FileIOFactory; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.apache.polaris.service.config.ReservedProperties; import org.apache.polaris.service.events.listeners.NoOpPolarisEventListener; import org.apache.polaris.service.storage.PolarisStorageIntegrationProviderImpl; @@ -145,7 +145,7 @@ public abstract class AbstractPolicyCatalogTest { private FileIOFactory fileIOFactory; private PolarisPrincipal authenticatedRoot; private PolarisEntity catalogEntity; - private AccessConfigProvider accessConfigProvider; + private StorageAccessConfigProvider storageAccessConfigProvider; @BeforeAll public static void setUpMocks() { @@ -177,8 +177,8 @@ public void before(TestInfo testInfo) { metaStoreManagerFactory.getOrCreateSession(realmContext), configurationStore); realmConfig = polarisContext.getRealmConfig(); - accessConfigProvider = - new AccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); + storageAccessConfigProvider = + new StorageAccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); PrincipalEntity rootPrincipal = metaStoreManager.findRootPrincipal(polarisContext).orElseThrow(); @@ -259,7 +259,7 @@ public void before(TestInfo testInfo) { passthroughView, authenticatedRoot, taskExecutor, - accessConfigProvider, + storageAccessConfigProvider, fileIOFactory, new NoOpPolarisEventListener()); this.icebergCatalog.initialize( diff --git a/runtime/service/src/test/java/org/apache/polaris/service/task/BatchFileCleanupTaskHandlerTest.java b/runtime/service/src/test/java/org/apache/polaris/service/task/BatchFileCleanupTaskHandlerTest.java index 28db39376e..b984ea8239 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/task/BatchFileCleanupTaskHandlerTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/task/BatchFileCleanupTaskHandlerTest.java @@ -49,7 +49,7 @@ import org.apache.polaris.core.persistence.BasePersistence; import org.apache.polaris.core.persistence.MetaStoreManagerFactory; import org.apache.polaris.service.TestFileIOFactory; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -60,7 +60,7 @@ public class BatchFileCleanupTaskHandlerTest { private TaskFileIOSupplier buildTaskFileIOSupplier(FileIO fileIO) { return new TaskFileIOSupplier( - new TestFileIOFactory(fileIO), Mockito.mock(AccessConfigProvider.class)); + new TestFileIOFactory(fileIO), Mockito.mock(StorageAccessConfigProvider.class)); } private PolarisCallContext newCallContext() { diff --git a/runtime/service/src/test/java/org/apache/polaris/service/task/ManifestFileCleanupTaskHandlerTest.java b/runtime/service/src/test/java/org/apache/polaris/service/task/ManifestFileCleanupTaskHandlerTest.java index d9ca54fac4..5db3d78622 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/task/ManifestFileCleanupTaskHandlerTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/task/ManifestFileCleanupTaskHandlerTest.java @@ -45,7 +45,7 @@ import org.apache.polaris.core.persistence.BasePersistence; import org.apache.polaris.core.persistence.MetaStoreManagerFactory; import org.apache.polaris.service.TestFileIOFactory; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.junit.jupiter.api.Test; import org.mockito.Mockito; @@ -57,7 +57,7 @@ class ManifestFileCleanupTaskHandlerTest { private TaskFileIOSupplier buildTaskFileIOSupplier(FileIO fileIO) { return new TaskFileIOSupplier( - new TestFileIOFactory(fileIO), Mockito.mock(AccessConfigProvider.class)); + new TestFileIOFactory(fileIO), Mockito.mock(StorageAccessConfigProvider.class)); } private PolarisCallContext newCallContext() { diff --git a/runtime/service/src/test/java/org/apache/polaris/service/task/TableCleanupTaskHandlerTest.java b/runtime/service/src/test/java/org/apache/polaris/service/task/TableCleanupTaskHandlerTest.java index 8ebb96a421..c2569847f5 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/task/TableCleanupTaskHandlerTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/task/TableCleanupTaskHandlerTest.java @@ -51,7 +51,7 @@ import org.apache.polaris.core.persistence.PolarisMetaStoreManager; import org.apache.polaris.core.persistence.pagination.PageToken; import org.apache.polaris.service.TestFileIOFactory; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.assertj.core.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -72,7 +72,7 @@ class TableCleanupTaskHandlerTest { private TableCleanupTaskHandler newTableCleanupTaskHandler(FileIO fileIO) { TaskFileIOSupplier taskFileIOSupplier = new TaskFileIOSupplier( - new TestFileIOFactory(fileIO), Mockito.mock(AccessConfigProvider.class)); + new TestFileIOFactory(fileIO), Mockito.mock(StorageAccessConfigProvider.class)); return new TableCleanupTaskHandler( Mockito.mock(), clock, metaStoreManagerFactory, taskFileIOSupplier); } diff --git a/runtime/service/src/test/java/org/apache/polaris/service/task/TaskExecutorImplTest.java b/runtime/service/src/test/java/org/apache/polaris/service/task/TaskExecutorImplTest.java index d6743a3720..0c6061e5a1 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/task/TaskExecutorImplTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/task/TaskExecutorImplTest.java @@ -64,7 +64,7 @@ void testEventsAreEmitted() { testServices.clock(), testServices.metaStoreManagerFactory(), new TaskFileIOSupplier( - testServices.fileIOFactory(), testServices.accessConfigProvider()), + testServices.fileIOFactory(), testServices.storageAccessConfigProvider()), testServices.polarisEventListener(), null); diff --git a/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestFileIOFactory.java b/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestFileIOFactory.java index 74f2fbc4cf..faffe363b9 100644 --- a/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestFileIOFactory.java +++ b/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestFileIOFactory.java @@ -22,7 +22,7 @@ import jakarta.annotation.Nonnull; import java.util.Map; import org.apache.iceberg.io.FileIO; -import org.apache.polaris.core.storage.AccessConfig; +import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.service.catalog.io.FileIOFactory; /** A FileIOFactory that always returns the same FileIO instance. */ @@ -36,7 +36,7 @@ public TestFileIOFactory(@Nonnull FileIO fileIO) { @Override public FileIO loadFileIO( - @Nonnull AccessConfig accessConfig, + @Nonnull StorageAccessConfig accessConfig, @Nonnull String ioImplClassName, @Nonnull Map properties) { return fileIO; diff --git a/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestServices.java b/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestServices.java index 030e00b731..6041d2c489 100644 --- a/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestServices.java +++ b/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestServices.java @@ -70,9 +70,9 @@ import org.apache.polaris.service.catalog.api.IcebergRestConfigurationApi; import org.apache.polaris.service.catalog.iceberg.CatalogHandlerUtils; import org.apache.polaris.service.catalog.iceberg.IcebergCatalogAdapter; -import org.apache.polaris.service.catalog.io.AccessConfigProvider; import org.apache.polaris.service.catalog.io.FileIOFactory; import org.apache.polaris.service.catalog.io.MeasuredFileIOFactory; +import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; import org.apache.polaris.service.config.ReservedProperties; import org.apache.polaris.service.context.catalog.CallContextCatalogFactory; import org.apache.polaris.service.context.catalog.PolarisCallContextCatalogFactory; @@ -112,7 +112,7 @@ public record TestServices( FileIOFactory fileIOFactory, TaskExecutor taskExecutor, PolarisEventListener polarisEventListener, - AccessConfigProvider accessConfigProvider) { + StorageAccessConfigProvider storageAccessConfigProvider) { private static final RealmContext TEST_REALM = () -> "test-realm"; private static final String GCP_ACCESS_TOKEN = "abc"; @@ -273,8 +273,8 @@ public String getAuthenticationScheme() { PolarisCredentialManager credentialManager = new DefaultPolarisCredentialManager(realmContext, mockCredentialVendors); - AccessConfigProvider accessConfigProvider = - new AccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); + StorageAccessConfigProvider storageAccessConfigProvider = + new StorageAccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); FileIOFactory fileIOFactory = fileIOFactorySupplier.get(); TaskExecutor taskExecutor = Mockito.mock(TaskExecutor.class); @@ -285,7 +285,7 @@ public String getAuthenticationScheme() { diagnostics, resolverFactory, taskExecutor, - accessConfigProvider, + storageAccessConfigProvider, fileIOFactory, polarisEventListener, metaStoreManager, @@ -317,7 +317,7 @@ public String getAuthenticationScheme() { catalogHandlerUtils, externalCatalogFactory, polarisEventListener, - accessConfigProvider, + storageAccessConfigProvider, new DefaultMetricsReporter()); IcebergRestCatalogApi restApi = new IcebergRestCatalogApi(catalogService); @@ -359,7 +359,7 @@ public String getAuthenticationScheme() { fileIOFactory, taskExecutor, polarisEventListener, - accessConfigProvider); + storageAccessConfigProvider); } } diff --git a/runtime/service/src/testFixtures/java/org/apache/polaris/service/catalog/io/MeasuredFileIOFactory.java b/runtime/service/src/testFixtures/java/org/apache/polaris/service/catalog/io/MeasuredFileIOFactory.java index 1d5668d0fc..9d18f6a8ea 100644 --- a/runtime/service/src/testFixtures/java/org/apache/polaris/service/catalog/io/MeasuredFileIOFactory.java +++ b/runtime/service/src/testFixtures/java/org/apache/polaris/service/catalog/io/MeasuredFileIOFactory.java @@ -27,7 +27,7 @@ import java.util.Optional; import java.util.function.Supplier; import org.apache.iceberg.io.FileIO; -import org.apache.polaris.core.storage.AccessConfig; +import org.apache.polaris.core.storage.StorageAccessConfig; /** * A FileIOFactory that measures the number of bytes read, files written, and files deleted. It can @@ -52,7 +52,7 @@ public MeasuredFileIOFactory() { @Override public FileIO loadFileIO( - @Nonnull AccessConfig accessConfig, + @Nonnull StorageAccessConfig storageAccessConfig, @Nonnull String ioImplClassName, @Nonnull Map properties) { loadFileIOExceptionSupplier.ifPresent( @@ -62,7 +62,7 @@ public FileIO loadFileIO( MeasuredFileIO wrapped = new MeasuredFileIO( - defaultFileIOFactory.loadFileIO(accessConfig, ioImplClassName, properties), + defaultFileIOFactory.loadFileIO(storageAccessConfig, ioImplClassName, properties), newInputFileExceptionSupplier, newOutputFileExceptionSupplier, getLengthExceptionSupplier); From a5ce3939a0e6f5129027e2d388899e037e6feebd Mon Sep 17 00:00:00 2001 From: Yong Zheng Date: Tue, 11 Nov 2025 08:20:56 -0600 Subject: [PATCH 11/14] Refactor: improve and clean up Dockerfiles (#2957) * Refactor: improve and clean up Dockerfiles * Refactor: improve and clean up Dockerfiles * Refactor: improve and clean up Dockerfiles * Refactor: improve and clean up Dockerfiles * Refactor: improve and clean up Dockerfiles * Refactor: improve and clean up Dockerfiles --- getting-started/spark/notebooks/Dockerfile | 4 +- .../v3.5/getting-started/notebooks/Dockerfile | 7 ++- .../notebooks/SparkPolaris.ipynb | 3 +- plugins/spark/v3.5/regtests/Dockerfile | 34 +++++++------- regtests/Dockerfile | 47 +++++++++---------- runtime/admin/src/main/docker/Dockerfile.jvm | 24 +++++----- runtime/server/src/main/docker/Dockerfile.jvm | 32 ++++++------- site/docker/Dockerfile | 21 +++------ 8 files changed, 79 insertions(+), 93 deletions(-) diff --git a/getting-started/spark/notebooks/Dockerfile b/getting-started/spark/notebooks/Dockerfile index 32ee4067b6..bb57ee65ff 100644 --- a/getting-started/spark/notebooks/Dockerfile +++ b/getting-started/spark/notebooks/Dockerfile @@ -19,8 +19,8 @@ FROM docker.io/apache/spark:3.5.6-java17 -ENV PYTHONPATH="${SPARK_HOME}/python/:${SPARK_HOME}/python/lib/py4j-0.10.9.7-src.zip:/home/spark/venv/lib/python3.10/site-packages" -ENV PYSPARK_PYTHON=/home/spark/venv/bin/python +ENV PYSPARK_PYTHON=/home/spark/venv/bin/python \ + PYTHONPATH="${SPARK_HOME}/python/:${SPARK_HOME}/python/lib/py4j-0.10.9.7-src.zip:/home/spark/venv/lib/python3.10/site-packages" USER root diff --git a/plugins/spark/v3.5/getting-started/notebooks/Dockerfile b/plugins/spark/v3.5/getting-started/notebooks/Dockerfile index f5e052b2a3..392d79e0a4 100644 --- a/plugins/spark/v3.5/getting-started/notebooks/Dockerfile +++ b/plugins/spark/v3.5/getting-started/notebooks/Dockerfile @@ -19,8 +19,8 @@ FROM docker.io/apache/spark:3.5.6-java17 -ENV PYTHONPATH="${SPARK_HOME}/python/:${SPARK_HOME}/python/lib/py4j-0.10.9.7-src.zip:/home/spark/venv/lib/python3.10/site-packages" -ENV PYSPARK_PYTHON=/home/spark/venv/bin/python +ENV PYSPARK_PYTHON=/home/spark/venv/bin/python \ + PYTHONPATH="${SPARK_HOME}/python/:${SPARK_HOME}/python/lib/py4j-0.10.9.7-src.zip:/home/spark/venv/lib/python3.10/site-packages" USER root @@ -36,8 +36,7 @@ WORKDIR /home/spark COPY --chown=spark client /home/spark/client COPY --chown=spark regtests/requirements.txt /tmp COPY --chown=spark regtests/notebook_requirements.txt /tmp -COPY --chown=spark plugins/spark/v3.5/spark/build/2.12/libs /home/spark/polaris_libs - +COPY --chown=spark plugins/spark/v3.5/spark/build/2.12/libs/*bundle.jar /opt/spark/jars/ RUN python3 -m venv /home/spark/venv && \ . /home/spark/venv/bin/activate && \ diff --git a/plugins/spark/v3.5/getting-started/notebooks/SparkPolaris.ipynb b/plugins/spark/v3.5/getting-started/notebooks/SparkPolaris.ipynb index f4e4a00bb4..226a42920c 100644 --- a/plugins/spark/v3.5/getting-started/notebooks/SparkPolaris.ipynb +++ b/plugins/spark/v3.5/getting-started/notebooks/SparkPolaris.ipynb @@ -265,7 +265,8 @@ "from pyspark.sql import SparkSession\n", "\n", "spark = (SparkSession.builder\n", - " .config(\"spark.jars\", \"../polaris_libs/polaris-spark-3.5_2.12-1.2.0-incubating-SNAPSHOT-bundle.jar\") # TODO: add a way to automatically discover the Jar\n", + " # This jar is now automatically discovered, thus no longer needed\n", + " #.config(\"spark.jars\", \"../polaris_libs/polaris-spark-3.5_2.12-1.2.0-incubating-SNAPSHOT-bundle.jar\")\n", " .config(\"spark.jars.packages\", \"org.apache.iceberg:iceberg-aws-bundle:1.10.0,io.delta:delta-spark_2.12:3.2.1\")\n", " .config(\"spark.sql.catalog.spark_catalog\", \"org.apache.spark.sql.delta.catalog.DeltaCatalog\")\n", " .config('spark.sql.iceberg.vectorization.enabled', 'false')\n", diff --git a/plugins/spark/v3.5/regtests/Dockerfile b/plugins/spark/v3.5/regtests/Dockerfile index db84d3eb1f..5c4c480f20 100755 --- a/plugins/spark/v3.5/regtests/Dockerfile +++ b/plugins/spark/v3.5/regtests/Dockerfile @@ -18,31 +18,29 @@ # FROM docker.io/apache/spark:3.5.6-java17 -ARG POLARIS_HOST=polaris -ENV POLARIS_HOST=$POLARIS_HOST -ENV SPARK_HOME=/opt/spark -ENV CURRENT_SCALA_VERSION='2.12' -ENV LANGUAGE='en_US:en' + +ARG POLARIS_HOST=polaris \ + CURRENT_SCALA_VERSION=2.12 + +ENV POLARIS_HOST=${POLARIS_HOST} \ + CURRENT_SCALA_VERSION=${CURRENT_SCALA_VERSION} USER root -RUN apt update -RUN apt-get install -y diffutils wget curl -RUN mkdir -p /home/spark && \ - chown -R spark /home/spark && \ - mkdir -p /tmp/polaris-regtests && \ - chown -R spark /tmp/polaris-regtests -RUN mkdir /opt/spark/conf && chmod -R 777 /opt/spark/conf -USER spark +RUN apt-get update && \ + apt-get install -y --no-install-recommends diffutils wget curl && \ + rm -rf /var/lib/apt/lists/* && \ + mkdir -p /home/spark /tmp/polaris-regtests /opt/spark/conf && \ + chown -R spark:spark /home/spark /tmp/polaris-regtests && \ + chmod -R 777 /opt/spark/conf WORKDIR /home/spark/polaris -COPY --chown=spark ./v3.5 /home/spark/polaris/v3.5 +COPY --chown=spark:spark ./v3.5 /home/spark/polaris/v3.5 + +# /home/spark/.../regtests might not be writable in all situations, see https://github.com/apache/polaris/pull/205 +RUN chmod -R 777 /home/spark/polaris/v3.5/regtests -# /home/spark/regtests might not be writable in all situations, see https://github.com/apache/polaris/pull/205 -USER root -RUN chmod -R go+rwx /home/spark/polaris -RUN chmod -R 777 ./v3.5/regtests USER spark ENTRYPOINT ["./v3.5/regtests/run.sh"] diff --git a/regtests/Dockerfile b/regtests/Dockerfile index 88fa13ddaf..183701a5cf 100644 --- a/regtests/Dockerfile +++ b/regtests/Dockerfile @@ -18,45 +18,40 @@ # FROM docker.io/apache/spark:3.5.6-java17-python3 + ARG POLARIS_HOST=polaris -ENV POLARIS_HOST=$POLARIS_HOST -ENV SPARK_HOME=/opt/spark -ENV LANGUAGE='en_US:en' + +ENV POLARIS_HOST=${POLARIS_HOST} \ + PYTHONPATH="${SPARK_HOME}/python/:${SPARK_HOME}/python/lib/py4j-0.10.9.7-src.zip" USER root -RUN apt update -RUN apt-get install -y diffutils wget curl python3.10-venv jq -RUN mkdir -p /home/spark && \ - chown -R spark /home/spark && \ - mkdir -p /tmp/polaris-regtests && \ - chown -R spark /tmp/polaris-regtests -RUN mkdir /opt/spark/conf && chmod -R 777 /opt/spark/conf + +RUN apt-get update && \ + apt-get install -y --no-install-recommends diffutils wget curl python3.10-venv jq && \ + rm -rf /var/lib/apt/lists/* && \ + mkdir -p /home/spark /tmp/polaris-regtests /opt/spark/conf && \ + chown -R spark:spark /home/spark /tmp/polaris-regtests && \ + chmod -R 777 /opt/spark/conf + +COPY --chown=spark:spark ./regtests/setup.sh ./regtests/pyspark-setup.sh ./regtests/requirements.txt /home/spark/polaris/regtests/ +COPY --chown=spark:spark ./client/python /home/spark/polaris/client/python +COPY --chown=spark:spark ./polaris /home/spark/polaris/polaris +COPY --chown=spark:spark ./spec /home/spark/polaris/spec +COPY --chown=spark:spark ./regtests /home/spark/polaris/regtests + +# /home/spark/regtests might not be writable in all situations, see https://github.com/apache/polaris/pull/205 +RUN chmod -R go+rwx /home/spark/polaris USER spark -ENV PYTHONPATH="${SPARK_HOME}/python/:${SPARK_HOME}/python/lib/py4j-0.10.9.7-src.zip:$PYTHONPATH" -# Copy and run setup.sh separately so that test sources can change, but the setup script run is still cached WORKDIR /home/spark/polaris -COPY --chown=spark ./regtests/setup.sh /home/spark/polaris/regtests/setup.sh -COPY --chown=spark ./regtests/pyspark-setup.sh /home/spark/polaris/regtests/pyspark-setup.sh -COPY --chown=spark ./client/python /home/spark/polaris/client/python -COPY --chown=spark ./polaris /home/spark/polaris/polaris -COPY --chown=spark ./spec /home/spark/polaris/spec -COPY --chown=spark ./regtests/requirements.txt /tmp/ RUN python3 -m venv /home/spark/polaris/polaris-venv && \ . /home/spark/polaris/polaris-venv/bin/activate && \ - pip install -r /tmp/requirements.txt && \ + pip install -r /home/spark/polaris/regtests/requirements.txt && \ cd /home/spark/polaris/client/python && \ poetry install && \ deactivate && \ /home/spark/polaris/regtests/setup.sh -COPY --chown=spark ./regtests /home/spark/polaris/regtests - -# /home/spark/regtests might not be writable in all situations, see https://github.com/apache/polaris/pull/205 -USER root -RUN chmod -R go+rwx /home/spark/polaris -USER spark - ENTRYPOINT ["./regtests/run.sh"] diff --git a/runtime/admin/src/main/docker/Dockerfile.jvm b/runtime/admin/src/main/docker/Dockerfile.jvm index 4a6330ec38..ebd121d135 100644 --- a/runtime/admin/src/main/docker/Dockerfile.jvm +++ b/runtime/admin/src/main/docker/Dockerfile.jvm @@ -18,23 +18,25 @@ # FROM registry.access.redhat.com/ubi9/openjdk-21-runtime:1.23-6.1761164966 -LABEL org.opencontainers.image.source=https://github.com/apache/polaris -LABEL org.opencontainers.image.description="Apache Polaris (incubating) Admin Tool" -LABEL org.opencontainers.image.licenses=Apache-2.0 +LABEL org.opencontainers.image.source=https://github.com/apache/polaris \ + org.opencontainers.image.description="Apache Polaris (incubating) Admin Tool" \ + org.opencontainers.image.licenses=Apache-2.0 -ENV LANGUAGE='en_US:en' +ENV LANGUAGE='en_US:en' \ + USER=polaris \ + UID=10000 \ + HOME=/home/polaris USER root -RUN groupadd --gid 10001 polaris \ - && useradd --uid 10000 --gid polaris polaris \ - && chown -R polaris:polaris /opt/jboss/container \ - && chown -R polaris:polaris /deployments + +RUN groupadd --gid 10001 polaris && \ + useradd --uid 10000 --gid polaris -m polaris && \ + mkdir -p /deployments && \ + chown -R polaris:polaris /deployments /opt/jboss/container USER polaris + WORKDIR /home/polaris -ENV USER=polaris -ENV UID=10000 -ENV HOME=/home/polaris # We make four distinct layers so if there are application changes the library layers can be reused COPY --chown=polaris:polaris build/quarkus-app/lib/ /deployments/lib/ diff --git a/runtime/server/src/main/docker/Dockerfile.jvm b/runtime/server/src/main/docker/Dockerfile.jvm index 393c9c8a3f..70d694c0fb 100644 --- a/runtime/server/src/main/docker/Dockerfile.jvm +++ b/runtime/server/src/main/docker/Dockerfile.jvm @@ -18,23 +18,27 @@ # FROM registry.access.redhat.com/ubi9/openjdk-21-runtime:1.23-6.1761164966 -LABEL org.opencontainers.image.source=https://github.com/apache/polaris -LABEL org.opencontainers.image.description="Apache Polaris (incubating)" -LABEL org.opencontainers.image.licenses=Apache-2.0 +LABEL org.opencontainers.image.source=https://github.com/apache/polaris \ + org.opencontainers.image.description="Apache Polaris (incubating)" \ + org.opencontainers.image.licenses=Apache-2.0 -ENV LANGUAGE='en_US:en' +ENV LANGUAGE='en_US:en' \ + USER=polaris \ + UID=10000 \ + HOME=/home/polaris \ + AB_JOLOKIA_OFF="" \ + JAVA_APP_JAR="/deployments/quarkus-run.jar" USER root -RUN groupadd --gid 10001 polaris \ - && useradd --uid 10000 --gid polaris polaris \ - && chown -R polaris:polaris /opt/jboss/container \ - && chown -R polaris:polaris /deployments + +RUN groupadd --gid 10001 polaris && \ + useradd --uid 10000 --gid polaris polaris && \ + chown -R polaris:polaris /opt/jboss/container && \ + chown -R polaris:polaris /deployments USER polaris + WORKDIR /home/polaris -ENV USER=polaris -ENV UID=10000 -ENV HOME=/home/polaris # We make four distinct layers so if there are application changes the library layers can be reused COPY --chown=polaris:polaris build/quarkus-app/lib/ /deployments/lib/ @@ -45,8 +49,4 @@ COPY --chown=polaris:polaris distribution/LICENSE /deployments/ COPY --chown=polaris:polaris distribution/NOTICE /deployments/ COPY --chown=polaris:polaris distribution/DISCLAIMER /deployments/ -EXPOSE 8181 -EXPOSE 8182 - -ENV AB_JOLOKIA_OFF="" -ENV JAVA_APP_JAR="/deployments/quarkus-run.jar" +EXPOSE 8181 8182 \ No newline at end of file diff --git a/site/docker/Dockerfile b/site/docker/Dockerfile index 714b1ccf53..297c9aa7d3 100644 --- a/site/docker/Dockerfile +++ b/site/docker/Dockerfile @@ -21,23 +21,14 @@ FROM ubuntu:24.04 AS hugo ENV LANGUAGE='en_US:en' -RUN apt-get update -RUN apt-get install --yes golang hugo asciidoctor npm curl -RUN apt-get clean -# http-server is used when building the static site to manually check it locally -# (via `site/bin/create-static-site.sh --local` at http://localhost:8080/) -RUN npm install --global http-server - -# these dependencies are needed to build the static site -#RUN npm install --global autoprefixer postcss postcss-cli http-server - -RUN mkdir /polaris -RUN mkdir /polaris/site -RUN mkdir /polaris/site/resources +RUN apt-get update && \ + apt-get install --yes --no-install-recommends golang hugo asciidoctor npm curl git && \ + rm -rf /var/lib/apt/lists/* && \ + npm install --global http-server && \ + mkdir -p /polaris/site/resources COPY _run_in_docker.sh /hugo/run -EXPOSE 1313 -EXPOSE 8080 +EXPOSE 1313 8080 ENTRYPOINT ["/hugo/run"] From f857e2905d107004959125677b88bf2af1c6f876 Mon Sep 17 00:00:00 2001 From: Christopher Lambert Date: Tue, 11 Nov 2025 17:48:27 +0100 Subject: [PATCH 12/14] Make StorageAccessConfigProvider request-scoped (#2974) - add `StorageCredentialsVendor` as request-scoped wrapper around `PolarisCredentialVendor` - make `FileIOFactory` request-scoped - make `TaskFileIOSupplier` request-scoped --- .../AtomicOperationMetaStoreManager.java | 2 +- .../TransactionWorkspaceMetaStoreManager.java | 4 +- .../TransactionalMetaStoreManagerImpl.java | 2 +- .../core/storage/PolarisCredentialVendor.java | 2 +- .../storage/StorageCredentialsVendor.java | 81 +++++++++++ .../storage/cache/StorageCredentialCache.java | 32 ++--- .../cache/StorageCredentialCacheTest.java | 130 ++++++------------ .../catalog/iceberg/IcebergCatalog.java | 14 +- .../iceberg/IcebergCatalogHandler.java | 1 - .../catalog/io/DefaultFileIOFactory.java | 4 +- .../service/catalog/io/FileIOFactory.java | 4 +- .../service/catalog/io/FileIOUtil.java | 73 ---------- .../io/StorageAccessConfigProvider.java | 67 ++++++--- .../service/config/ServiceProducers.java | 9 ++ .../task/BatchFileCleanupTaskHandler.java | 2 +- .../task/ManifestFileCleanupTaskHandler.java | 2 +- .../service/task/TableCleanupTaskHandler.java | 3 +- .../service/task/TaskFileIOSupplier.java | 10 +- ...bstractPolarisGenericTableCatalogTest.java | 5 +- .../iceberg/AbstractIcebergCatalogTest.java | 8 +- .../AbstractIcebergCatalogViewTest.java | 6 +- .../service/catalog/io/FileIOFactoryTest.java | 2 +- .../policy/AbstractPolicyCatalogTest.java | 5 +- .../apache/polaris/service/TestServices.java | 5 +- 24 files changed, 238 insertions(+), 235 deletions(-) create mode 100644 polaris-core/src/main/java/org/apache/polaris/core/storage/StorageCredentialsVendor.java diff --git a/polaris-core/src/main/java/org/apache/polaris/core/persistence/AtomicOperationMetaStoreManager.java b/polaris-core/src/main/java/org/apache/polaris/core/persistence/AtomicOperationMetaStoreManager.java index 3ae2ac2c9e..431520e9f8 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/persistence/AtomicOperationMetaStoreManager.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/persistence/AtomicOperationMetaStoreManager.java @@ -1596,7 +1596,7 @@ private void revokeGrantRecord( @Nonnull PolarisCallContext callCtx, long catalogId, long entityId, - PolarisEntityType entityType, + @Nonnull PolarisEntityType entityType, boolean allowListOperation, @Nonnull Set allowedReadLocations, @Nonnull Set allowedWriteLocations, diff --git a/polaris-core/src/main/java/org/apache/polaris/core/persistence/TransactionWorkspaceMetaStoreManager.java b/polaris-core/src/main/java/org/apache/polaris/core/persistence/TransactionWorkspaceMetaStoreManager.java index 99c1f81624..b0c78c0b13 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/persistence/TransactionWorkspaceMetaStoreManager.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/persistence/TransactionWorkspaceMetaStoreManager.java @@ -339,11 +339,11 @@ public EntitiesResult loadTasks( } @Override - public ScopedCredentialsResult getSubscopedCredsForEntity( + public @Nonnull ScopedCredentialsResult getSubscopedCredsForEntity( @Nonnull PolarisCallContext callCtx, long catalogId, long entityId, - PolarisEntityType entityType, + @Nonnull PolarisEntityType entityType, boolean allowListOperation, @Nonnull Set allowedReadLocations, @Nonnull Set allowedWriteLocations, diff --git a/polaris-core/src/main/java/org/apache/polaris/core/persistence/transactional/TransactionalMetaStoreManagerImpl.java b/polaris-core/src/main/java/org/apache/polaris/core/persistence/transactional/TransactionalMetaStoreManagerImpl.java index 815e119d30..6602cf01ae 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/persistence/transactional/TransactionalMetaStoreManagerImpl.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/persistence/transactional/TransactionalMetaStoreManagerImpl.java @@ -2094,7 +2094,7 @@ private PolarisEntityResolver resolveSecurableToRoleGrant( @Nonnull PolarisCallContext callCtx, long catalogId, long entityId, - PolarisEntityType entityType, + @Nonnull PolarisEntityType entityType, boolean allowListOperation, @Nonnull Set allowedReadLocations, @Nonnull Set allowedWriteLocations, diff --git a/polaris-core/src/main/java/org/apache/polaris/core/storage/PolarisCredentialVendor.java b/polaris-core/src/main/java/org/apache/polaris/core/storage/PolarisCredentialVendor.java index d64e9ad88c..ee90294c69 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/storage/PolarisCredentialVendor.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/storage/PolarisCredentialVendor.java @@ -49,7 +49,7 @@ ScopedCredentialsResult getSubscopedCredsForEntity( @Nonnull PolarisCallContext callCtx, long catalogId, long entityId, - PolarisEntityType entityType, + @Nonnull PolarisEntityType entityType, boolean allowListOperation, @Nonnull Set allowedReadLocations, @Nonnull Set allowedWriteLocations, diff --git a/polaris-core/src/main/java/org/apache/polaris/core/storage/StorageCredentialsVendor.java b/polaris-core/src/main/java/org/apache/polaris/core/storage/StorageCredentialsVendor.java new file mode 100644 index 0000000000..59bcf86c8e --- /dev/null +++ b/polaris-core/src/main/java/org/apache/polaris/core/storage/StorageCredentialsVendor.java @@ -0,0 +1,81 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.polaris.core.storage; + +import jakarta.annotation.Nonnull; +import java.util.Optional; +import java.util.Set; +import org.apache.polaris.core.config.RealmConfig; +import org.apache.polaris.core.context.CallContext; +import org.apache.polaris.core.context.RealmContext; +import org.apache.polaris.core.entity.PolarisEntity; +import org.apache.polaris.core.persistence.dao.entity.ScopedCredentialsResult; + +public class StorageCredentialsVendor { + + private final PolarisCredentialVendor polarisCredentialVendor; + private final CallContext callContext; + + public StorageCredentialsVendor( + PolarisCredentialVendor polarisCredentialVendor, CallContext callContext) { + this.polarisCredentialVendor = polarisCredentialVendor; + this.callContext = callContext; + } + + public RealmContext getRealmContext() { + return callContext.getRealmContext(); + } + + public RealmConfig getRealmConfig() { + return callContext.getRealmConfig(); + } + + /** + * Get sub-scoped credentials for an entity against the provided allowed read and write locations. + * + * @param entity the entity + * @param allowListOperation whether to allow LIST operation on the allowedReadLocations and + * allowedWriteLocations + * @param allowedReadLocations a set of allowed to read locations + * @param allowedWriteLocations a set of allowed to write locations + * @param refreshCredentialsEndpoint an optional endpoint to use for refreshing credentials. If + * supported by the storage type it will be returned to the client in the appropriate + * properties. The endpoint may be relative to the base URI and the client is responsible for + * handling the relative path + * @return an enum map containing the scoped credentials + */ + @Nonnull + public ScopedCredentialsResult getSubscopedCredsForEntity( + @Nonnull PolarisEntity entity, + boolean allowListOperation, + @Nonnull Set allowedReadLocations, + @Nonnull Set allowedWriteLocations, + Optional refreshCredentialsEndpoint) { + return polarisCredentialVendor.getSubscopedCredsForEntity( + callContext.getPolarisCallContext(), + entity.getCatalogId(), + entity.getId(), + entity.getType(), + allowListOperation, + allowedReadLocations, + allowedWriteLocations, + refreshCredentialsEndpoint); + } +} diff --git a/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCache.java b/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCache.java index 93f5e351ab..0f22863e59 100644 --- a/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCache.java +++ b/polaris-core/src/main/java/org/apache/polaris/core/storage/cache/StorageCredentialCache.java @@ -30,15 +30,15 @@ import java.util.Set; import java.util.function.Function; import org.apache.iceberg.exceptions.UnprocessableEntityException; -import org.apache.polaris.core.PolarisCallContext; import org.apache.polaris.core.PolarisDiagnostics; import org.apache.polaris.core.config.FeatureConfiguration; import org.apache.polaris.core.config.RealmConfig; +import org.apache.polaris.core.context.RealmContext; import org.apache.polaris.core.entity.PolarisEntity; import org.apache.polaris.core.entity.PolarisEntityType; import org.apache.polaris.core.persistence.dao.entity.ScopedCredentialsResult; -import org.apache.polaris.core.storage.PolarisCredentialVendor; import org.apache.polaris.core.storage.StorageAccessConfig; +import org.apache.polaris.core.storage.StorageCredentialsVendor; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -95,8 +95,8 @@ private long maxCacheDurationMs(RealmConfig realmConfig) { /** * Either get from the cache or generate a new entry for a scoped creds * - * @param credentialVendor the credential vendor used to generate a new scoped creds if needed - * @param callCtx the call context + * @param storageCredentialsVendor the credential vendor used to generate a new scoped creds if + * needed * @param polarisEntity the polaris entity that is going to scoped creds * @param allowListOperation whether allow list action on the provided read and write locations * @param allowedReadLocations a set of allowed to read locations @@ -104,20 +104,21 @@ private long maxCacheDurationMs(RealmConfig realmConfig) { * @return the a map of string containing the scoped creds information */ public StorageAccessConfig getOrGenerateSubScopeCreds( - @Nonnull PolarisCredentialVendor credentialVendor, - @Nonnull PolarisCallContext callCtx, + @Nonnull StorageCredentialsVendor storageCredentialsVendor, @Nonnull PolarisEntity polarisEntity, boolean allowListOperation, @Nonnull Set allowedReadLocations, @Nonnull Set allowedWriteLocations, Optional refreshCredentialsEndpoint) { + RealmContext realmContext = storageCredentialsVendor.getRealmContext(); + RealmConfig realmConfig = storageCredentialsVendor.getRealmConfig(); if (!isTypeSupported(polarisEntity.getType())) { diagnostics.fail( "entity_type_not_suppported_to_scope_creds", "type={}", polarisEntity.getType()); } StorageCredentialCacheKey key = StorageCredentialCacheKey.of( - callCtx.getRealmContext().getRealmIdentifier(), + realmContext.getRealmIdentifier(), polarisEntity, allowListOperation, allowedReadLocations, @@ -128,17 +129,14 @@ public StorageAccessConfig getOrGenerateSubScopeCreds( k -> { LOGGER.atDebug().log("StorageCredentialCache::load"); ScopedCredentialsResult scopedCredentialsResult = - credentialVendor.getSubscopedCredsForEntity( - callCtx, - k.catalogId(), - polarisEntity.getId(), - polarisEntity.getType(), - k.allowedListAction(), - k.allowedReadLocations(), - k.allowedWriteLocations(), - k.refreshCredentialsEndpoint()); + storageCredentialsVendor.getSubscopedCredsForEntity( + polarisEntity, + allowListOperation, + allowedReadLocations, + allowedWriteLocations, + refreshCredentialsEndpoint); if (scopedCredentialsResult.isSuccess()) { - long maxCacheDurationMs = maxCacheDurationMs(callCtx.getRealmConfig()); + long maxCacheDurationMs = maxCacheDurationMs(realmConfig); return new StorageCredentialCacheEntry( scopedCredentialsResult.getStorageAccessConfig(), maxCacheDurationMs); } diff --git a/polaris-core/src/test/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheTest.java b/polaris-core/src/test/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheTest.java index f1e5ac1f61..c4db872317 100644 --- a/polaris-core/src/test/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheTest.java +++ b/polaris-core/src/test/java/org/apache/polaris/core/storage/cache/StorageCredentialCacheTest.java @@ -18,8 +18,6 @@ */ package org.apache.polaris.core.storage.cache; -import static org.apache.polaris.core.persistence.PrincipalSecretsGenerator.RANDOM_SECRETS; - import jakarta.annotation.Nonnull; import java.util.ArrayList; import java.util.Arrays; @@ -28,63 +26,56 @@ import java.util.Optional; import java.util.Set; import org.apache.iceberg.exceptions.UnprocessableEntityException; -import org.apache.polaris.core.PolarisCallContext; import org.apache.polaris.core.PolarisDefaultDiagServiceImpl; import org.apache.polaris.core.PolarisDiagnostics; +import org.apache.polaris.core.config.PolarisConfigurationStore; +import org.apache.polaris.core.config.RealmConfig; +import org.apache.polaris.core.config.RealmConfigImpl; +import org.apache.polaris.core.context.RealmContext; import org.apache.polaris.core.entity.PolarisBaseEntity; import org.apache.polaris.core.entity.PolarisEntity; import org.apache.polaris.core.entity.PolarisEntityConstants; import org.apache.polaris.core.entity.PolarisEntitySubType; import org.apache.polaris.core.entity.PolarisEntityType; -import org.apache.polaris.core.persistence.PolarisMetaStoreManager; import org.apache.polaris.core.persistence.dao.entity.BaseResult; import org.apache.polaris.core.persistence.dao.entity.ScopedCredentialsResult; -import org.apache.polaris.core.persistence.transactional.TransactionalPersistence; -import org.apache.polaris.core.persistence.transactional.TreeMapMetaStore; -import org.apache.polaris.core.persistence.transactional.TreeMapTransactionalPersistenceImpl; import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; +import org.apache.polaris.core.storage.StorageCredentialsVendor; import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.RepeatedTest; import org.junit.jupiter.api.Test; import org.mockito.Mockito; public class StorageCredentialCacheTest { private final PolarisDiagnostics diagServices = new PolarisDefaultDiagServiceImpl(); - private final PolarisCallContext callCtx; - private final StorageCredentialCacheConfig storageCredentialCacheConfig; - private final PolarisMetaStoreManager metaStoreManager; + private final RealmContext realmContext = () -> "testRealm"; + private final RealmConfig realmConfig = + new RealmConfigImpl(new PolarisConfigurationStore() {}, realmContext); + private final StorageCredentialsVendor storageCredentialsVendor; private StorageCredentialCache storageCredentialCache; public StorageCredentialCacheTest() { - // the entity store, use treemap implementation - TreeMapMetaStore store = new TreeMapMetaStore(diagServices); - // to interact with the metastore - TransactionalPersistence metaStore = - new TreeMapTransactionalPersistenceImpl( - diagServices, store, Mockito.mock(), RANDOM_SECRETS); - callCtx = new PolarisCallContext(() -> "testRealm", metaStore); - storageCredentialCacheConfig = () -> 10_000; - metaStoreManager = Mockito.mock(PolarisMetaStoreManager.class); - storageCredentialCache = newStorageCredentialCache(); + storageCredentialsVendor = Mockito.mock(StorageCredentialsVendor.class); + Mockito.when(storageCredentialsVendor.getRealmContext()).thenReturn(realmContext); + Mockito.when(storageCredentialsVendor.getRealmConfig()).thenReturn(realmConfig); } - private StorageCredentialCache newStorageCredentialCache() { - return new StorageCredentialCache(diagServices, storageCredentialCacheConfig); + @BeforeEach + void beforeEach() { + StorageCredentialCacheConfig storageCredentialCacheConfig = () -> 10_000; + storageCredentialCache = new StorageCredentialCache(diagServices, storageCredentialCacheConfig); } @Test public void testBadResult() { - storageCredentialCache = newStorageCredentialCache(); ScopedCredentialsResult badResult = new ScopedCredentialsResult( BaseResult.ReturnStatus.SUBSCOPE_CREDS_ERROR, "extra_error_info"); Mockito.when( - metaStoreManager.getSubscopedCredsForEntity( - Mockito.any(), - Mockito.anyLong(), - Mockito.anyLong(), + storageCredentialsVendor.getSubscopedCredsForEntity( Mockito.any(), Mockito.anyBoolean(), Mockito.anySet(), @@ -98,8 +89,7 @@ public void testBadResult() { Assertions.assertThatThrownBy( () -> storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, polarisEntity, true, Set.of("s3://bucket1/path"), @@ -111,14 +101,10 @@ public void testBadResult() { @Test public void testCacheHit() { - storageCredentialCache = newStorageCredentialCache(); List mockedScopedCreds = getFakeScopedCreds(3, /* expireSoon= */ false); Mockito.when( - metaStoreManager.getSubscopedCredsForEntity( - Mockito.any(), - Mockito.anyLong(), - Mockito.anyLong(), + storageCredentialsVendor.getSubscopedCredsForEntity( Mockito.any(), Mockito.anyBoolean(), Mockito.anySet(), @@ -134,8 +120,7 @@ public void testCacheHit() { // add an item to the cache storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, polarisEntity, true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -145,8 +130,7 @@ public void testCacheHit() { // subscope for the same entity and same allowed locations, will hit the cache storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, polarisEntity, true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -156,15 +140,11 @@ public void testCacheHit() { } @RepeatedTest(10) - public void testCacheEvict() throws InterruptedException { - storageCredentialCache = newStorageCredentialCache(); + public void testCacheEvict() throws Exception { List mockedScopedCreds = getFakeScopedCreds(3, /* expireSoon= */ true); Mockito.when( - metaStoreManager.getSubscopedCredsForEntity( - Mockito.any(), - Mockito.anyLong(), - Mockito.anyLong(), + storageCredentialsVendor.getSubscopedCredsForEntity( Mockito.any(), Mockito.anyBoolean(), Mockito.anySet(), @@ -179,7 +159,7 @@ public void testCacheEvict() throws InterruptedException { PolarisEntity polarisEntity = new PolarisEntity(baseEntity); StorageCredentialCacheKey cacheKey = StorageCredentialCacheKey.of( - callCtx.getRealmContext().getRealmIdentifier(), + realmContext.getRealmIdentifier(), polarisEntity, true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -188,8 +168,7 @@ public void testCacheEvict() throws InterruptedException { // the entry will be evicted immediately because the token is expired storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, polarisEntity, true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -198,8 +177,7 @@ public void testCacheEvict() throws InterruptedException { Assertions.assertThat(storageCredentialCache.getIfPresent(cacheKey)).isNull(); storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, polarisEntity, true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -208,8 +186,7 @@ public void testCacheEvict() throws InterruptedException { Assertions.assertThat(storageCredentialCache.getIfPresent(cacheKey)).isNull(); storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, polarisEntity, true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -220,14 +197,10 @@ public void testCacheEvict() throws InterruptedException { @Test public void testCacheGenerateNewEntries() { - storageCredentialCache = newStorageCredentialCache(); List mockedScopedCreds = getFakeScopedCreds(3, /* expireSoon= */ false); Mockito.when( - metaStoreManager.getSubscopedCredsForEntity( - Mockito.any(), - Mockito.anyLong(), - Mockito.anyLong(), + storageCredentialsVendor.getSubscopedCredsForEntity( Mockito.any(), Mockito.anyBoolean(), Mockito.anySet(), @@ -241,8 +214,7 @@ public void testCacheGenerateNewEntries() { // different catalog will generate new cache entries for (PolarisEntity entity : entityList) { storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, entity, true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -259,8 +231,7 @@ public void testCacheGenerateNewEntries() { PolarisBaseEntity updateEntity = new PolarisBaseEntity.Builder(entity).internalPropertiesAsMap(internalMap).build(); storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, PolarisEntity.of(updateEntity), /* allowedListAction= */ true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -271,8 +242,7 @@ public void testCacheGenerateNewEntries() { // allowedListAction changed to different value FALSE, will generate new entry for (PolarisEntity entity : entityList) { storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, entity, /* allowedListAction= */ false, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -283,8 +253,7 @@ public void testCacheGenerateNewEntries() { // different allowedWriteLocations, will generate new entry for (PolarisEntity entity : entityList) { storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, entity, /* allowedListAction= */ false, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -300,8 +269,7 @@ public void testCacheGenerateNewEntries() { PolarisBaseEntity updateEntity = new PolarisBaseEntity.Builder(entity).internalPropertiesAsMap(internalMap).build(); storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, PolarisEntity.of(updateEntity), /* allowedListAction= */ false, Set.of("s3://differentbucket/path", "s3://bucket2/path"), @@ -313,15 +281,11 @@ public void testCacheGenerateNewEntries() { @Test public void testCacheNotAffectedBy() { - storageCredentialCache = newStorageCredentialCache(); List mockedScopedCreds = getFakeScopedCreds(3, /* expireSoon= */ false); Mockito.when( - metaStoreManager.getSubscopedCredsForEntity( - Mockito.any(), - Mockito.anyLong(), - Mockito.anyLong(), + storageCredentialsVendor.getSubscopedCredsForEntity( Mockito.any(), Mockito.anyBoolean(), Mockito.anySet(), @@ -333,8 +297,7 @@ public void testCacheNotAffectedBy() { List entityList = getPolarisEntities(); for (PolarisEntity entity : entityList) { storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, entity, true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -346,8 +309,7 @@ public void testCacheNotAffectedBy() { // entity ID does not affect the cache for (PolarisEntity entity : entityList) { storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, new PolarisEntity(new PolarisBaseEntity.Builder(entity).id(1234).build()), true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -359,8 +321,7 @@ public void testCacheNotAffectedBy() { // other property changes does not affect the cache for (PolarisEntity entity : entityList) { storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, new PolarisEntity(new PolarisBaseEntity.Builder(entity).entityVersion(5).build()), true, Set.of("s3://bucket1/path", "s3://bucket2/path"), @@ -371,8 +332,7 @@ public void testCacheNotAffectedBy() { // order of the allowedReadLocations does not affect the cache for (PolarisEntity entity : entityList) { storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, new PolarisEntity(new PolarisBaseEntity.Builder(entity).entityVersion(5).build()), true, Set.of("s3://bucket2/path", "s3://bucket1/path"), @@ -384,8 +344,7 @@ public void testCacheNotAffectedBy() { // order of the allowedWriteLocations does not affect the cache for (PolarisEntity entity : entityList) { storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, new PolarisEntity(new PolarisBaseEntity.Builder(entity).entityVersion(5).build()), true, Set.of("s3://bucket2/path", "s3://bucket1/path"), @@ -456,7 +415,6 @@ private static List getPolarisEntities() { @Test public void testExtraProperties() { - storageCredentialCache = newStorageCredentialCache(); ScopedCredentialsResult properties = new ScopedCredentialsResult( StorageAccessConfig.builder() @@ -465,10 +423,7 @@ public void testExtraProperties() { .put(StorageAccessProperty.AWS_PATH_STYLE_ACCESS, "true") .build()); Mockito.when( - metaStoreManager.getSubscopedCredsForEntity( - Mockito.any(), - Mockito.anyLong(), - Mockito.anyLong(), + storageCredentialsVendor.getSubscopedCredsForEntity( Mockito.any(), Mockito.anyBoolean(), Mockito.anySet(), @@ -479,8 +434,7 @@ public void testExtraProperties() { StorageAccessConfig config = storageCredentialCache.getOrGenerateSubScopeCreds( - metaStoreManager, - callCtx, + storageCredentialsVendor, entityList.get(0), true, Set.of("s3://bucket1/path", "s3://bucket2/path"), diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java index 89624418a6..e8f5402b1b 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalog.java @@ -1085,7 +1085,7 @@ private void validateNoLocationO realmConfig.getConfig(FeatureConfiguration.OPTIMIZED_SIBLING_CHECK); if (useOptimizedSiblingCheck) { Optional> directSiblingCheckResult = - getMetaStoreManager().hasOverlappingSiblings(callContext.getPolarisCallContext(), entity); + getMetaStoreManager().hasOverlappingSiblings(getCurrentPolarisContext(), entity); if (directSiblingCheckResult.isPresent()) { if (directSiblingCheckResult.get().isPresent()) { throw new org.apache.iceberg.exceptions.ForbiddenException( @@ -2080,12 +2080,7 @@ private FileIO loadFileIOForTableLike( Set storageActions) { StorageAccessConfig storageAccessConfig = storageAccessConfigProvider.getStorageAccessConfig( - callContext, - identifier, - readLocations, - storageActions, - Optional.empty(), - resolvedStorageEntity); + identifier, readLocations, storageActions, Optional.empty(), resolvedStorageEntity); // Reload fileIO based on table specific context FileIO fileIO = fileIOFactory.loadFileIO(storageAccessConfig, ioImplClassName, tableProperties); // ensure the new fileIO is closed when the catalog is closed @@ -2101,11 +2096,6 @@ private PolarisMetaStoreManager getMetaStoreManager() { return metaStoreManager; } - @VisibleForTesting - public void setFileIOFactory(FileIOFactory newFactory) { - this.fileIOFactory = newFactory; - } - @VisibleForTesting long getCatalogId() { // TODO: Properly handle initialization diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandler.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandler.java index 1de129cacc..d2e48d2cc0 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandler.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/iceberg/IcebergCatalogHandler.java @@ -812,7 +812,6 @@ ALLOW_FEDERATED_CATALOGS_CREDENTIAL_VENDING, getResolvedCatalogEntity())) { StorageAccessConfig storageAccessConfig = storageAccessConfigProvider.getStorageAccessConfig( - callContext, tableIdentifier, tableLocations, actions, diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/DefaultFileIOFactory.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/DefaultFileIOFactory.java index c132322f5d..39b2f9d5f5 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/DefaultFileIOFactory.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/DefaultFileIOFactory.java @@ -21,7 +21,7 @@ import com.google.common.annotations.VisibleForTesting; import io.smallrye.common.annotation.Identifier; import jakarta.annotation.Nonnull; -import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.context.RequestScoped; import jakarta.inject.Inject; import java.util.HashMap; import java.util.Map; @@ -36,7 +36,7 @@ *

This class acts as a translation layer between Polaris properties and the properties required * by Iceberg's {@link FileIO}. */ -@ApplicationScoped +@RequestScoped @Identifier("default") public class DefaultFileIOFactory implements FileIOFactory { diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOFactory.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOFactory.java index b9bfbf97e9..50a9c68835 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOFactory.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOFactory.java @@ -19,7 +19,7 @@ package org.apache.polaris.service.catalog.io; import jakarta.annotation.Nonnull; -import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.context.RequestScoped; import java.util.Map; import org.apache.iceberg.io.FileIO; import org.apache.polaris.core.storage.StorageAccessConfig; @@ -27,7 +27,7 @@ /** * Interface for providing a way to construct FileIO objects, such as for reading/writing S3. * - *

Implementations are available via CDI as {@link ApplicationScoped @ApplicationScoped} beans. + *

Implementations are available via CDI as {@link RequestScoped @RequestScoped} beans. */ public interface FileIOFactory { diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOUtil.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOUtil.java index 7d5a112bba..5dc657a601 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOUtil.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/FileIOUtil.java @@ -19,18 +19,9 @@ package org.apache.polaris.service.catalog.io; import java.util.Optional; -import java.util.Set; -import org.apache.iceberg.catalog.TableIdentifier; -import org.apache.polaris.core.config.FeatureConfiguration; -import org.apache.polaris.core.context.CallContext; import org.apache.polaris.core.entity.PolarisEntity; import org.apache.polaris.core.entity.PolarisEntityConstants; import org.apache.polaris.core.persistence.PolarisResolvedPathWrapper; -import org.apache.polaris.core.storage.PolarisCredentialVendor; -import org.apache.polaris.core.storage.PolarisStorageActions; -import org.apache.polaris.core.storage.StorageAccessConfig; -import org.apache.polaris.core.storage.cache.StorageCredentialCache; -import org.apache.polaris.service.catalog.iceberg.IcebergCatalog; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -61,68 +52,4 @@ public static Optional findStorageInfoFromHierarchy( .findFirst(); return storageInfoEntity; } - - /** - * Refreshes or generates subscoped creds for accessing table storage based on the params. - * - *

Use cases: - * - *

    - *
  • In {@link IcebergCatalog}, subscoped credentials are generated or refreshed when the - * client sends a loadTable request to vend credentials. - *
  • In {@link DefaultFileIOFactory}, subscoped credentials are obtained to access the storage - * and read/write metadata JSON files. - *
- */ - public static StorageAccessConfig refreshAccessConfig( - CallContext callContext, - StorageCredentialCache storageCredentialCache, - PolarisCredentialVendor credentialVendor, - TableIdentifier tableIdentifier, - Set tableLocations, - Set storageActions, - PolarisEntity entity, - Optional refreshCredentialsEndpoint) { - - boolean skipCredentialSubscopingIndirection = - callContext - .getRealmConfig() - .getConfig(FeatureConfiguration.SKIP_CREDENTIAL_SUBSCOPING_INDIRECTION); - if (skipCredentialSubscopingIndirection) { - LOGGER - .atDebug() - .addKeyValue("tableIdentifier", tableIdentifier) - .log("Skipping generation of subscoped creds for table"); - return StorageAccessConfig.builder().build(); - } - - boolean allowList = - storageActions.contains(PolarisStorageActions.LIST) - || storageActions.contains(PolarisStorageActions.ALL); - Set writeLocations = - storageActions.contains(PolarisStorageActions.WRITE) - || storageActions.contains(PolarisStorageActions.DELETE) - || storageActions.contains(PolarisStorageActions.ALL) - ? tableLocations - : Set.of(); - StorageAccessConfig storageAccessConfig = - storageCredentialCache.getOrGenerateSubScopeCreds( - credentialVendor, - callContext.getPolarisCallContext(), - entity, - allowList, - tableLocations, - writeLocations, - refreshCredentialsEndpoint); - LOGGER - .atDebug() - .addKeyValue("tableIdentifier", tableIdentifier) - .addKeyValue("credentialKeys", storageAccessConfig.credentials().keySet()) - .addKeyValue("extraProperties", storageAccessConfig.extraProperties()) - .log("Loaded scoped credentials for table"); - if (storageAccessConfig.credentials().isEmpty()) { - LOGGER.debug("No credentials found for table"); - } - return storageAccessConfig; - } } diff --git a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/StorageAccessConfigProvider.java b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/StorageAccessConfigProvider.java index 80e62856ae..d6316c6e7a 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/StorageAccessConfigProvider.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/catalog/io/StorageAccessConfigProvider.java @@ -20,17 +20,17 @@ package org.apache.polaris.service.catalog.io; import jakarta.annotation.Nonnull; -import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.context.RequestScoped; import jakarta.inject.Inject; import java.util.Optional; import java.util.Set; import org.apache.iceberg.catalog.TableIdentifier; -import org.apache.polaris.core.context.CallContext; +import org.apache.polaris.core.config.FeatureConfiguration; import org.apache.polaris.core.entity.PolarisEntity; -import org.apache.polaris.core.persistence.MetaStoreManagerFactory; import org.apache.polaris.core.persistence.PolarisResolvedPathWrapper; import org.apache.polaris.core.storage.PolarisStorageActions; import org.apache.polaris.core.storage.StorageAccessConfig; +import org.apache.polaris.core.storage.StorageCredentialsVendor; import org.apache.polaris.core.storage.cache.StorageCredentialCache; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -42,26 +42,25 @@ *

This provider decouples credential vending from catalog implementations, and should be the * primary entrypoint to get sub-scoped credentials for accessing table data. */ -@ApplicationScoped +@RequestScoped public class StorageAccessConfigProvider { private static final Logger LOGGER = LoggerFactory.getLogger(StorageAccessConfigProvider.class); private final StorageCredentialCache storageCredentialCache; - private final MetaStoreManagerFactory metaStoreManagerFactory; + private final StorageCredentialsVendor storageCredentialsVendor; @Inject public StorageAccessConfigProvider( StorageCredentialCache storageCredentialCache, - MetaStoreManagerFactory metaStoreManagerFactory) { + StorageCredentialsVendor storageCredentialsVendor) { this.storageCredentialCache = storageCredentialCache; - this.metaStoreManagerFactory = metaStoreManagerFactory; + this.storageCredentialsVendor = storageCredentialsVendor; } /** * Vends credentials for accessing table storage at explicit locations. * - * @param callContext the call context containing realm, principal, and security context * @param tableIdentifier the table identifier, used for logging and refresh endpoint construction * @param tableLocations set of storage location URIs to scope credentials to * @param storageActions the storage operations (READ, WRITE, LIST, DELETE) to scope credentials @@ -72,7 +71,6 @@ public StorageAccessConfigProvider( * config found */ public StorageAccessConfig getStorageAccessConfig( - @Nonnull CallContext callContext, @Nonnull TableIdentifier tableIdentifier, @Nonnull Set tableLocations, @Nonnull Set storageActions, @@ -91,14 +89,47 @@ public StorageAccessConfig getStorageAccessConfig( .log("Table entity has no storage configuration in its hierarchy"); return StorageAccessConfig.builder().supportsCredentialVending(false).build(); } - return FileIOUtil.refreshAccessConfig( - callContext, - storageCredentialCache, - metaStoreManagerFactory.getOrCreateMetaStoreManager(callContext.getRealmContext()), - tableIdentifier, - tableLocations, - storageActions, - storageInfo.get(), - refreshCredentialsEndpoint); + PolarisEntity storageInfoEntity = storageInfo.get(); + + boolean skipCredentialSubscopingIndirection = + storageCredentialsVendor + .getRealmConfig() + .getConfig(FeatureConfiguration.SKIP_CREDENTIAL_SUBSCOPING_INDIRECTION); + if (skipCredentialSubscopingIndirection) { + LOGGER + .atDebug() + .addKeyValue("tableIdentifier", tableIdentifier) + .log("Skipping generation of subscoped creds for table"); + return StorageAccessConfig.builder().build(); + } + + boolean allowList = + storageActions.contains(PolarisStorageActions.LIST) + || storageActions.contains(PolarisStorageActions.ALL); + Set writeLocations = + storageActions.contains(PolarisStorageActions.WRITE) + || storageActions.contains(PolarisStorageActions.DELETE) + || storageActions.contains(PolarisStorageActions.ALL) + ? tableLocations + : Set.of(); + StorageAccessConfig accessConfig = + storageCredentialCache.getOrGenerateSubScopeCreds( + storageCredentialsVendor, + storageInfoEntity, + allowList, + tableLocations, + writeLocations, + refreshCredentialsEndpoint); + + LOGGER + .atDebug() + .addKeyValue("tableIdentifier", tableIdentifier) + .addKeyValue("credentialKeys", accessConfig.credentials().keySet()) + .addKeyValue("extraProperties", accessConfig.extraProperties()) + .log("Loaded scoped credentials for table"); + if (accessConfig.credentials().isEmpty()) { + LOGGER.debug("No credentials found for table"); + } + return accessConfig; } } diff --git a/runtime/service/src/main/java/org/apache/polaris/service/config/ServiceProducers.java b/runtime/service/src/main/java/org/apache/polaris/service/config/ServiceProducers.java index bd03c022b9..13768f2ba3 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/config/ServiceProducers.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/config/ServiceProducers.java @@ -59,6 +59,7 @@ import org.apache.polaris.core.persistence.resolver.ResolverFactory; import org.apache.polaris.core.secrets.UserSecretsManager; import org.apache.polaris.core.secrets.UserSecretsManagerFactory; +import org.apache.polaris.core.storage.StorageCredentialsVendor; import org.apache.polaris.core.storage.cache.StorageCredentialCache; import org.apache.polaris.core.storage.cache.StorageCredentialCacheConfig; import org.apache.polaris.service.auth.AuthenticationConfiguration; @@ -220,6 +221,7 @@ public RealmContextResolver realmContextResolver( } @Produces + @RequestScoped public FileIOFactory fileIOFactory( FileIOConfiguration config, @Any Instance fileIOFactories) { return fileIOFactories.select(Identifier.Literal.of(config.type())).get(); @@ -246,6 +248,13 @@ public PolarisMetaStoreManager polarisMetaStoreManager( return metaStoreManagerFactory.getOrCreateMetaStoreManager(realmContext); } + @Produces + @RequestScoped + public StorageCredentialsVendor storageCredentialsVendor( + PolarisMetaStoreManager metaStoreManager, CallContext callContext) { + return new StorageCredentialsVendor(metaStoreManager, callContext); + } + @Produces public UserSecretsManagerFactory userSecretsManagerFactory( SecretsManagerConfiguration config, diff --git a/runtime/service/src/main/java/org/apache/polaris/service/task/BatchFileCleanupTaskHandler.java b/runtime/service/src/main/java/org/apache/polaris/service/task/BatchFileCleanupTaskHandler.java index fdb4ef5e0b..67dcacb9d0 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/task/BatchFileCleanupTaskHandler.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/task/BatchFileCleanupTaskHandler.java @@ -53,7 +53,7 @@ public boolean handleTask(TaskEntity task, CallContext callContext) { BatchFileCleanupTask cleanupTask = task.readData(BatchFileCleanupTask.class); TableIdentifier tableId = cleanupTask.tableId(); List batchFiles = cleanupTask.batchFiles(); - try (FileIO authorizedFileIO = fileIOSupplier.apply(task, tableId, callContext)) { + try (FileIO authorizedFileIO = fileIOSupplier.apply(task, tableId)) { List validFiles = batchFiles.stream().filter(file -> TaskUtils.exists(file, authorizedFileIO)).toList(); if (validFiles.isEmpty()) { diff --git a/runtime/service/src/main/java/org/apache/polaris/service/task/ManifestFileCleanupTaskHandler.java b/runtime/service/src/main/java/org/apache/polaris/service/task/ManifestFileCleanupTaskHandler.java index f71adc2bc2..7d9bc095f6 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/task/ManifestFileCleanupTaskHandler.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/task/ManifestFileCleanupTaskHandler.java @@ -61,7 +61,7 @@ public boolean canHandleTask(TaskEntity task) { public boolean handleTask(TaskEntity task, CallContext callContext) { ManifestCleanupTask cleanupTask = task.readData(ManifestCleanupTask.class); TableIdentifier tableId = cleanupTask.tableId(); - try (FileIO authorizedFileIO = fileIOSupplier.apply(task, tableId, callContext)) { + try (FileIO authorizedFileIO = fileIOSupplier.apply(task, tableId)) { ManifestFile manifestFile = TaskUtils.decodeManifestFileData(cleanupTask.manifestFileData()); return cleanUpManifestFile(manifestFile, authorizedFileIO, tableId); } diff --git a/runtime/service/src/main/java/org/apache/polaris/service/task/TableCleanupTaskHandler.java b/runtime/service/src/main/java/org/apache/polaris/service/task/TableCleanupTaskHandler.java index 777d9bc8db..54976dead0 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/task/TableCleanupTaskHandler.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/task/TableCleanupTaskHandler.java @@ -93,8 +93,7 @@ public boolean handleTask(TaskEntity cleanupTask, CallContext callContext) { // It's likely the cleanupTask has already been completed, but wasn't dropped successfully. // Log a // warning and move on - try (FileIO fileIO = - fileIOSupplier.apply(cleanupTask, tableEntity.getTableIdentifier(), callContext)) { + try (FileIO fileIO = fileIOSupplier.apply(cleanupTask, tableEntity.getTableIdentifier())) { if (!TaskUtils.exists(tableEntity.getMetadataLocation(), fileIO)) { LOGGER .atWarn() diff --git a/runtime/service/src/main/java/org/apache/polaris/service/task/TaskFileIOSupplier.java b/runtime/service/src/main/java/org/apache/polaris/service/task/TaskFileIOSupplier.java index b4c31d6921..cddb671473 100644 --- a/runtime/service/src/main/java/org/apache/polaris/service/task/TaskFileIOSupplier.java +++ b/runtime/service/src/main/java/org/apache/polaris/service/task/TaskFileIOSupplier.java @@ -18,7 +18,7 @@ */ package org.apache.polaris.service.task; -import jakarta.enterprise.context.ApplicationScoped; +import jakarta.enterprise.context.RequestScoped; import jakarta.inject.Inject; import java.util.HashMap; import java.util.List; @@ -28,7 +28,6 @@ import org.apache.iceberg.CatalogProperties; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.io.FileIO; -import org.apache.polaris.core.context.CallContext; import org.apache.polaris.core.entity.PolarisTaskConstants; import org.apache.polaris.core.entity.TaskEntity; import org.apache.polaris.core.persistence.PolarisResolvedPathWrapper; @@ -38,7 +37,7 @@ import org.apache.polaris.service.catalog.io.FileIOFactory; import org.apache.polaris.service.catalog.io.StorageAccessConfigProvider; -@ApplicationScoped +@RequestScoped public class TaskFileIOSupplier { private final FileIOFactory fileIOFactory; private final StorageAccessConfigProvider accessConfigProvider; @@ -50,8 +49,7 @@ public TaskFileIOSupplier( this.accessConfigProvider = storageAccessConfigProvider; } - public FileIO apply(TaskEntity task, TableIdentifier identifier, CallContext callContext) { - + public FileIO apply(TaskEntity task, TableIdentifier identifier) { Map internalProperties = task.getInternalPropertiesAsMap(); Map properties = new HashMap<>(internalProperties); @@ -64,7 +62,7 @@ public FileIO apply(TaskEntity task, TableIdentifier identifier, CallContext cal new PolarisResolvedPathWrapper(List.of(resolvedTaskEntity)); StorageAccessConfig storageAccessConfig = accessConfigProvider.getStorageAccessConfig( - callContext, identifier, locations, storageActions, Optional.empty(), resolvedPath); + identifier, locations, storageActions, Optional.empty(), resolvedPath); String ioImpl = properties.getOrDefault( diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/generic/AbstractPolarisGenericTableCatalogTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/generic/AbstractPolarisGenericTableCatalogTest.java index 5cda9c7981..c2d0e997b6 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/generic/AbstractPolarisGenericTableCatalogTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/generic/AbstractPolarisGenericTableCatalogTest.java @@ -60,6 +60,7 @@ import org.apache.polaris.core.secrets.UserSecretsManagerFactory; import org.apache.polaris.core.storage.PolarisStorageIntegration; import org.apache.polaris.core.storage.PolarisStorageIntegrationProvider; +import org.apache.polaris.core.storage.StorageCredentialsVendor; import org.apache.polaris.core.storage.aws.AwsCredentialsStorageIntegration; import org.apache.polaris.core.storage.aws.AwsStorageConfigurationInfo; import org.apache.polaris.core.storage.cache.StorageCredentialCache; @@ -156,8 +157,10 @@ public void before(TestInfo testInfo) { metaStoreManagerFactory.getOrCreateSession(realmContext), configurationStore); realmConfig = polarisContext.getRealmConfig(); + StorageCredentialsVendor storageCredentialsVendor = + new StorageCredentialsVendor(metaStoreManager, polarisContext); storageAccessConfigProvider = - new StorageAccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); + new StorageAccessConfigProvider(storageCredentialCache, storageCredentialsVendor); PrincipalEntity rootPrincipal = metaStoreManager.findRootPrincipal(polarisContext).orElseThrow(); diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogTest.java index 3e7cc19850..e9a0b2e777 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogTest.java @@ -130,6 +130,7 @@ import org.apache.polaris.core.storage.PolarisStorageIntegrationProvider; import org.apache.polaris.core.storage.StorageAccessConfig; import org.apache.polaris.core.storage.StorageAccessProperty; +import org.apache.polaris.core.storage.StorageCredentialsVendor; import org.apache.polaris.core.storage.aws.AwsCredentialsStorageIntegration; import org.apache.polaris.core.storage.aws.AwsStorageConfigurationInfo; import org.apache.polaris.core.storage.cache.StorageCredentialCache; @@ -290,8 +291,11 @@ public void before(TestInfo testInfo) { metaStoreManagerFactory.getOrCreateSession(realmContext), configurationStore); realmConfig = polarisContext.getRealmConfig(); + StorageCredentialsVendor storageCredentialsVendor = + new StorageCredentialsVendor(metaStoreManager, polarisContext); storageAccessConfigProvider = - new StorageAccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); + new StorageAccessConfigProvider(storageCredentialCache, storageCredentialsVendor); + EntityCache entityCache = createEntityCache(diagServices, realmConfig, metaStoreManager); resolverFactory = (principal, referenceCatalogName) -> @@ -1915,7 +1919,7 @@ public void testDropTableWithPurge() { .containsEntry(StorageAccessProperty.AWS_TOKEN.getPropertyName(), SESSION_TOKEN); FileIO fileIO = new TaskFileIOSupplier(new DefaultFileIOFactory(), storageAccessConfigProvider) - .apply(taskEntity, TABLE, polarisContext); + .apply(taskEntity, TABLE); Assertions.assertThat(fileIO).isNotNull().isInstanceOf(ExceptionMappingFileIO.class); Assertions.assertThat(((ExceptionMappingFileIO) fileIO).getInnerIo()) .isInstanceOf(InMemoryFileIO.class); diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogViewTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogViewTest.java index f8468d6bf1..7dd45d1808 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogViewTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/iceberg/AbstractIcebergCatalogViewTest.java @@ -53,6 +53,7 @@ import org.apache.polaris.core.persistence.resolver.ResolverFactory; import org.apache.polaris.core.secrets.UserSecretsManager; import org.apache.polaris.core.secrets.UserSecretsManagerFactory; +import org.apache.polaris.core.storage.StorageCredentialsVendor; import org.apache.polaris.core.storage.cache.StorageCredentialCache; import org.apache.polaris.service.admin.PolarisAdminService; import org.apache.polaris.service.catalog.PolarisPassthroughResolutionView; @@ -162,8 +163,11 @@ public void before(TestInfo testInfo) { metaStoreManagerFactory.getOrCreateSession(realmContext), configurationStore); realmConfig = polarisContext.getRealmConfig(); + StorageCredentialsVendor storageCredentialsVendor = + new StorageCredentialsVendor(metaStoreManager, polarisContext); storageAccessConfigProvider = - new StorageAccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); + new StorageAccessConfigProvider(storageCredentialCache, storageCredentialsVendor); + PrincipalEntity rootPrincipal = metaStoreManager.findRootPrincipal(polarisContext).orElseThrow(); PolarisPrincipal authenticatedRoot = PolarisPrincipal.of(rootPrincipal, Set.of()); diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/io/FileIOFactoryTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/io/FileIOFactoryTest.java index 673e0f2923..09268cf2c4 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/io/FileIOFactoryTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/io/FileIOFactoryTest.java @@ -171,7 +171,7 @@ public void testLoadFileIOForCleanupTask(String scheme) { FileIO fileIO = new TaskFileIOSupplier( testServices.fileIOFactory(), testServices.storageAccessConfigProvider()) - .apply(taskEntity, TABLE, callContext); + .apply(taskEntity, TABLE); Assertions.assertThat(fileIO).isNotNull().isInstanceOf(ExceptionMappingFileIO.class); Assertions.assertThat(((ExceptionMappingFileIO) fileIO).getInnerIo()) .isInstanceOf(InMemoryFileIO.class); diff --git a/runtime/service/src/test/java/org/apache/polaris/service/catalog/policy/AbstractPolicyCatalogTest.java b/runtime/service/src/test/java/org/apache/polaris/service/catalog/policy/AbstractPolicyCatalogTest.java index 3ee2faf340..eba010e11a 100644 --- a/runtime/service/src/test/java/org/apache/polaris/service/catalog/policy/AbstractPolicyCatalogTest.java +++ b/runtime/service/src/test/java/org/apache/polaris/service/catalog/policy/AbstractPolicyCatalogTest.java @@ -72,6 +72,7 @@ import org.apache.polaris.core.secrets.UserSecretsManagerFactory; import org.apache.polaris.core.storage.PolarisStorageIntegration; import org.apache.polaris.core.storage.PolarisStorageIntegrationProvider; +import org.apache.polaris.core.storage.StorageCredentialsVendor; import org.apache.polaris.core.storage.aws.AwsCredentialsStorageIntegration; import org.apache.polaris.core.storage.aws.AwsStorageConfigurationInfo; import org.apache.polaris.core.storage.cache.StorageCredentialCache; @@ -177,8 +178,10 @@ public void before(TestInfo testInfo) { metaStoreManagerFactory.getOrCreateSession(realmContext), configurationStore); realmConfig = polarisContext.getRealmConfig(); + StorageCredentialsVendor storageCredentialsVendor = + new StorageCredentialsVendor(metaStoreManager, polarisContext); storageAccessConfigProvider = - new StorageAccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); + new StorageAccessConfigProvider(storageCredentialCache, storageCredentialsVendor); PrincipalEntity rootPrincipal = metaStoreManager.findRootPrincipal(polarisContext).orElseThrow(); diff --git a/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestServices.java b/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestServices.java index 6041d2c489..4c910ffbd6 100644 --- a/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestServices.java +++ b/runtime/service/src/testFixtures/java/org/apache/polaris/service/TestServices.java @@ -60,6 +60,7 @@ import org.apache.polaris.core.persistence.resolver.ResolverFactory; import org.apache.polaris.core.secrets.UserSecretsManager; import org.apache.polaris.core.secrets.UserSecretsManagerFactory; +import org.apache.polaris.core.storage.StorageCredentialsVendor; import org.apache.polaris.core.storage.cache.StorageCredentialCache; import org.apache.polaris.core.storage.cache.StorageCredentialCacheConfig; import org.apache.polaris.service.admin.PolarisAdminService; @@ -273,8 +274,10 @@ public String getAuthenticationScheme() { PolarisCredentialManager credentialManager = new DefaultPolarisCredentialManager(realmContext, mockCredentialVendors); + StorageCredentialsVendor storageCredentialsVendor = + new StorageCredentialsVendor(metaStoreManager, callContext); StorageAccessConfigProvider storageAccessConfigProvider = - new StorageAccessConfigProvider(storageCredentialCache, metaStoreManagerFactory); + new StorageAccessConfigProvider(storageCredentialCache, storageCredentialsVendor); FileIOFactory fileIOFactory = fileIOFactorySupplier.get(); TaskExecutor taskExecutor = Mockito.mock(TaskExecutor.class); From 16e4649b57706b34f50da9df155257719fa9141d Mon Sep 17 00:00:00 2001 From: Dmitri Bourlatchkov Date: Tue, 11 Nov 2025 13:21:28 -0500 Subject: [PATCH 13/14] Increase javadoc visibility in `nosql/realms` (#3029) This is to fix javadoc error: `No public or protected classes found to document` --- persistence/nosql/realms/impl/build.gradle.kts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/persistence/nosql/realms/impl/build.gradle.kts b/persistence/nosql/realms/impl/build.gradle.kts index 2c5c360ec7..99e6dcda23 100644 --- a/persistence/nosql/realms/impl/build.gradle.kts +++ b/persistence/nosql/realms/impl/build.gradle.kts @@ -53,4 +53,7 @@ dependencies { testCompileOnly(libs.jakarta.enterprise.cdi.api) } -tasks.withType { isFailOnError = false } +tasks.withType { + isFailOnError = false + options.memberLevel = JavadocMemberLevel.PACKAGE +} From e9ba76d6b56bef08840ffdd4ba1c62a2d3f52da9 Mon Sep 17 00:00:00 2001 From: Yong Date: Tue, 11 Nov 2025 22:13:38 -0600 Subject: [PATCH 14/14] Sorted on import statements --- client/python/generate_clients.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/python/generate_clients.py b/client/python/generate_clients.py index ebfdc0538f..0c7c1b5af0 100644 --- a/client/python/generate_clients.py +++ b/client/python/generate_clients.py @@ -350,7 +350,7 @@ def fix_catalog_models_init() -> None: # Write the new __init__.py with open(init_py, "w") as f: - f.write("\n".join(imports)) + f.write("\n".join(sorted(imports))) logger.info("Catalog models __init__.py fixed.")