diff --git a/api/src/main/java/org/apache/iceberg/exceptions/UnprocessableEntityException.java b/api/src/main/java/org/apache/iceberg/exceptions/UnprocessableEntityException.java new file mode 100644 index 000000000000..9c5e0f8852bd --- /dev/null +++ b/api/src/main/java/org/apache/iceberg/exceptions/UnprocessableEntityException.java @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.exceptions; + +import com.google.errorprone.annotations.FormatMethod; + +/** + * REST exception thrown when a request is well-formed but cannot be applied. + *

+ * For example, this is used when a property update requests that properties are simultaneously set and removed. + */ +public class UnprocessableEntityException extends RESTException { + @FormatMethod + public UnprocessableEntityException(String message, Object... args) { + super(message, args); + } +} diff --git a/core/src/main/java/org/apache/iceberg/BaseTransaction.java b/core/src/main/java/org/apache/iceberg/BaseTransaction.java index ad4639c39e93..e63f7bf7e741 100644 --- a/core/src/main/java/org/apache/iceberg/BaseTransaction.java +++ b/core/src/main/java/org/apache/iceberg/BaseTransaction.java @@ -49,7 +49,7 @@ import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS; import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT; -class BaseTransaction implements Transaction { +public class BaseTransaction implements Transaction { private static final Logger LOG = LoggerFactory.getLogger(BaseTransaction.class); enum TransactionType { @@ -90,6 +90,14 @@ public Table table() { return transactionTable; } + public TableMetadata startMetadata() { + return current; + } + + public TableOperations underlyingOps() { + return ops; + } + private void checkLastOperationCommitted(String operation) { Preconditions.checkState(hasLastOpCommitted, "Cannot create new %s: last operation has not committed", operation); diff --git a/core/src/main/java/org/apache/iceberg/rest/CatalogHandlers.java b/core/src/main/java/org/apache/iceberg/rest/CatalogHandlers.java new file mode 100644 index 000000000000..f1267ba6d45d --- /dev/null +++ b/core/src/main/java/org/apache/iceberg/rest/CatalogHandlers.java @@ -0,0 +1,314 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.rest; + +import java.time.OffsetDateTime; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import org.apache.iceberg.BaseTable; +import org.apache.iceberg.BaseTransaction; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.SortOrder; +import org.apache.iceberg.Table; +import org.apache.iceberg.TableMetadata; +import org.apache.iceberg.TableOperations; +import org.apache.iceberg.Transaction; +import org.apache.iceberg.catalog.Catalog; +import org.apache.iceberg.catalog.Namespace; +import org.apache.iceberg.catalog.SupportsNamespaces; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.exceptions.AlreadyExistsException; +import org.apache.iceberg.exceptions.CommitFailedException; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.rest.requests.CreateNamespaceRequest; +import org.apache.iceberg.rest.requests.CreateTableRequest; +import org.apache.iceberg.rest.requests.UpdateNamespacePropertiesRequest; +import org.apache.iceberg.rest.requests.UpdateTableRequest; +import org.apache.iceberg.rest.responses.CreateNamespaceResponse; +import org.apache.iceberg.rest.responses.DropNamespaceResponse; +import org.apache.iceberg.rest.responses.DropTableResponse; +import org.apache.iceberg.rest.responses.GetNamespaceResponse; +import org.apache.iceberg.rest.responses.ListNamespacesResponse; +import org.apache.iceberg.rest.responses.ListTablesResponse; +import org.apache.iceberg.rest.responses.LoadTableResponse; +import org.apache.iceberg.rest.responses.UpdateNamespacePropertiesResponse; +import org.apache.iceberg.util.Tasks; + +import static org.apache.iceberg.TableProperties.COMMIT_MAX_RETRY_WAIT_MS_DEFAULT; +import static org.apache.iceberg.TableProperties.COMMIT_MIN_RETRY_WAIT_MS_DEFAULT; +import static org.apache.iceberg.TableProperties.COMMIT_NUM_RETRIES_DEFAULT; +import static org.apache.iceberg.TableProperties.COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT; + +public class CatalogHandlers { + private static final Schema EMPTY_SCHEMA = new Schema(); + + private CatalogHandlers() { + } + + /** + * Exception used to avoid retrying commits when assertions fail. + *

+ * When a REST assertion fails, it will throw CommitFailedException to send back to the client. But the assertion + * checks happen in the block that is retried if {@link TableOperations#commit(TableMetadata, TableMetadata)} throws + * CommitFailedException. This is used to avoid retries for assertion failures, which are unwrapped and rethrown + * outside of the commit loop. + */ + private static class ValidationFailureException extends RuntimeException { + private final CommitFailedException wrapped; + + private ValidationFailureException(CommitFailedException cause) { + super(cause); + this.wrapped = cause; + } + + public CommitFailedException wrapped() { + return wrapped; + } + } + + public static ListNamespacesResponse listNamespaces(SupportsNamespaces catalog, Namespace parent) { + List results; + if (parent.isEmpty()) { + results = catalog.listNamespaces(); + } else { + results = catalog.listNamespaces(parent); + } + + return ListNamespacesResponse.builder().addAll(results).build(); + } + + public static CreateNamespaceResponse createNamespace(SupportsNamespaces catalog, CreateNamespaceRequest request) { + Namespace namespace = request.namespace(); + catalog.createNamespace(namespace, request.properties()); + return CreateNamespaceResponse.builder() + .withNamespace(namespace) + .setProperties(catalog.loadNamespaceMetadata(namespace)) + .build(); + } + + public static GetNamespaceResponse loadNamespace(SupportsNamespaces catalog, Namespace namespace) { + Map properties = catalog.loadNamespaceMetadata(namespace); + return GetNamespaceResponse.builder() + .withNamespace(namespace) + .setProperties(properties) + .build(); + } + + public static DropNamespaceResponse dropNamespace(SupportsNamespaces catalog, Namespace namespace) { + boolean dropped = catalog.dropNamespace(namespace); + return DropNamespaceResponse.builder() + .dropped(dropped) + .build(); + } + + public static UpdateNamespacePropertiesResponse updateNamespaceProperties( + SupportsNamespaces catalog, Namespace namespace, UpdateNamespacePropertiesRequest request) { + request.validate(); + + Set removals = Sets.newHashSet(request.removals()); + Map updates = request.updates(); + + Map startProperties = catalog.loadNamespaceMetadata(namespace); + Set missing = Sets.difference(removals, startProperties.keySet()); + + if (!updates.isEmpty()) { + catalog.setProperties(namespace, updates); + } + + if (!removals.isEmpty()) { + // remove the original set just in case there was an update just after loading properties + catalog.removeProperties(namespace, removals); + } + + return UpdateNamespacePropertiesResponse.builder() + .addMissing(missing) + .addUpdated(updates.keySet()) + .addRemoved(Sets.difference(removals, missing)) + .build(); + } + + public static ListTablesResponse listTables(Catalog catalog, Namespace namespace) { + List idents = catalog.listTables(namespace); + return ListTablesResponse.builder().addAll(idents).build(); + } + + public static LoadTableResponse stageTableCreate(Catalog catalog, Namespace namespace, CreateTableRequest request) { + request.validate(); + + TableIdentifier ident = TableIdentifier.of(namespace, request.name()); + if (catalog.tableExists(ident)) { + throw new AlreadyExistsException("Table already exists: %s", ident); + } + + Map properties = Maps.newHashMap(); + properties.put("created-at", OffsetDateTime.now().toString()); + properties.putAll(request.properties()); + + TableMetadata metadata = TableMetadata.newTableMetadata( + request.schema(), + request.spec() != null ? request.spec() : PartitionSpec.unpartitioned(), + request.writeOrder() != null ? request.writeOrder() : SortOrder.unsorted(), + request.location(), + properties); + + return LoadTableResponse.builder() + .withTableMetadata(metadata) + .build(); + } + + public static LoadTableResponse createTable(Catalog catalog, Namespace namespace, CreateTableRequest request) { + request.validate(); + + TableIdentifier ident = TableIdentifier.of(namespace, request.name()); + Table table = catalog.buildTable(ident, request.schema()) + .withLocation(request.location()) + .withPartitionSpec(request.spec()) + .withSortOrder(request.writeOrder()) + .withProperties(request.properties()) + .create(); + + if (table instanceof BaseTable) { + return LoadTableResponse.builder() + .withTableMetadata(((BaseTable) table).operations().current()) + .build(); + } + + throw new IllegalStateException("Cannot wrap catalog that does not produce BaseTable"); + } + + public static DropTableResponse dropTable(Catalog catalog, TableIdentifier ident) { + boolean dropped = catalog.dropTable(ident); + return DropTableResponse.builder().dropped(dropped).build(); + } + + public static LoadTableResponse loadTable(Catalog catalog, TableIdentifier ident) { + Table table = catalog.loadTable(ident); + + if (table instanceof BaseTable) { + return LoadTableResponse.builder() + .withTableMetadata(((BaseTable) table).operations().current()) + .build(); + } + + throw new IllegalStateException("Cannot wrap catalog that does not produce BaseTable"); + } + + public static LoadTableResponse updateTable(Catalog catalog, TableIdentifier ident, UpdateTableRequest request) { + TableMetadata finalMetadata; + if (isCreate(request)) { + // this is a hacky way to get TableOperations for an uncommitted table + Transaction transaction = catalog.buildTable(ident, EMPTY_SCHEMA).createOrReplaceTransaction(); + if (transaction instanceof BaseTransaction) { + BaseTransaction baseTransaction = (BaseTransaction) transaction; + finalMetadata = create(baseTransaction.underlyingOps(), baseTransaction.startMetadata(), request); + } else { + throw new IllegalStateException("Cannot wrap catalog that does not produce BaseTransaction"); + } + + } else { + Table table = catalog.loadTable(ident); + if (table instanceof BaseTable) { + TableOperations ops = ((BaseTable) table).operations(); + finalMetadata = commit(ops, request); + } else { + throw new IllegalStateException("Cannot wrap catalog that does not produce BaseTable"); + } + } + + return LoadTableResponse.builder() + .withTableMetadata(finalMetadata) + .build(); + } + + private static boolean isCreate(UpdateTableRequest request) { + boolean isCreate = request.requirements().stream() + .anyMatch(UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist.class::isInstance); + + if (isCreate) { + List invalidRequirements = request.requirements().stream() + .filter(req -> !(req instanceof UpdateTableRequest.UpdateRequirement.AssertTableDoesNotExist)) + .collect(Collectors.toList()); + Preconditions.checkArgument(invalidRequirements.isEmpty(), + "Invalid create requirements: %s", invalidRequirements); + } + + return isCreate; + } + + private static TableMetadata create(TableOperations ops, TableMetadata start, UpdateTableRequest request) { + TableMetadata.Builder builder = TableMetadata.buildFrom(start); + + // the only valid requirement is that the table will be created + request.updates().forEach(update -> update.applyTo(builder)); + + // create transactions do not retry. if the table exists, retrying is not a solution + ops.commit(null, builder.build()); + + return ops.current(); + } + + private static TableMetadata commit(TableOperations ops, UpdateTableRequest request) { + AtomicBoolean isRetry = new AtomicBoolean(false); + try { + Tasks.foreach(ops) + .retry(COMMIT_NUM_RETRIES_DEFAULT) + .exponentialBackoff( + COMMIT_MIN_RETRY_WAIT_MS_DEFAULT, COMMIT_MAX_RETRY_WAIT_MS_DEFAULT, + COMMIT_TOTAL_RETRY_TIME_MS_DEFAULT, 2.0 /* exponential */) + .onlyRetryOn(CommitFailedException.class) + .run(taskOps -> { + TableMetadata base = isRetry.get() ? taskOps.refresh() : taskOps.current(); + isRetry.set(true); + + // validate requirements + try { + request.requirements().forEach(requirement -> requirement.validate(base)); + } catch (CommitFailedException e) { + // wrap and rethrow outside of tasks to avoid unnecessary retry + throw new ValidationFailureException(e); + } + + // apply changes + TableMetadata.Builder metadataBuilder = TableMetadata.buildFrom(base); + request.updates().forEach(update -> update.applyTo(metadataBuilder)); + + TableMetadata updated = metadataBuilder.build(); + if (updated.changes().isEmpty()) { + // do not commit if the metadata has not changed + return; + } + + // commit + taskOps.commit(base, updated); + }); + + } catch (ValidationFailureException e) { + throw e.wrapped(); + } + + return ops.current(); + } +} diff --git a/core/src/main/java/org/apache/iceberg/rest/RESTCatalog.java b/core/src/main/java/org/apache/iceberg/rest/RESTCatalog.java new file mode 100644 index 000000000000..66162c4d590e --- /dev/null +++ b/core/src/main/java/org/apache/iceberg/rest/RESTCatalog.java @@ -0,0 +1,417 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.rest; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Function; +import org.apache.hadoop.conf.Configuration; +import org.apache.iceberg.BaseTable; +import org.apache.iceberg.CatalogProperties; +import org.apache.iceberg.CatalogUtil; +import org.apache.iceberg.MetadataUpdate; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Schema; +import org.apache.iceberg.SortOrder; +import org.apache.iceberg.Table; +import org.apache.iceberg.TableMetadata; +import org.apache.iceberg.Transaction; +import org.apache.iceberg.Transactions; +import org.apache.iceberg.catalog.Catalog; +import org.apache.iceberg.catalog.Namespace; +import org.apache.iceberg.catalog.SupportsNamespaces; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.exceptions.NamespaceNotEmptyException; +import org.apache.iceberg.exceptions.NoSuchNamespaceException; +import org.apache.iceberg.exceptions.NoSuchTableException; +import org.apache.iceberg.hadoop.Configurable; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.io.ResolvingFileIO; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.rest.requests.CreateNamespaceRequest; +import org.apache.iceberg.rest.requests.CreateTableRequest; +import org.apache.iceberg.rest.requests.UpdateNamespacePropertiesRequest; +import org.apache.iceberg.rest.responses.CreateNamespaceResponse; +import org.apache.iceberg.rest.responses.DropNamespaceResponse; +import org.apache.iceberg.rest.responses.DropTableResponse; +import org.apache.iceberg.rest.responses.GetNamespaceResponse; +import org.apache.iceberg.rest.responses.ListNamespacesResponse; +import org.apache.iceberg.rest.responses.ListTablesResponse; +import org.apache.iceberg.rest.responses.LoadTableResponse; +import org.apache.iceberg.rest.responses.UpdateNamespacePropertiesResponse; +import org.apache.iceberg.util.Pair; + +public class RESTCatalog implements Catalog, SupportsNamespaces, Configurable { + private final Function, RESTClient> clientBuilder; + private RESTClient client = null; + private String catalogName = null; + private Map properties = null; + private Object conf = null; + private FileIO io = null; + + RESTCatalog(Function, RESTClient> clientBuilder) { + this.clientBuilder = clientBuilder; + } + + @Override + public void initialize(String name, Map props) { + this.client = clientBuilder.apply(props); + this.catalogName = name; + this.properties = ImmutableMap.copyOf(props); + String ioImpl = props.get(CatalogProperties.FILE_IO_IMPL); + this.io = CatalogUtil.loadFileIO(ioImpl != null ? ioImpl : ResolvingFileIO.class.getName(), props, conf); + } + + @Override + public void setConf(Configuration newConf) { + this.conf = newConf; + } + + @Override + public String name() { + return catalogName; + } + + @Override + public List listTables(Namespace namespace) { + String ns = RESTUtil.urlEncode(namespace); + ListTablesResponse response = client + .get("v1/namespaces/" + ns + "/tables", ListTablesResponse.class, ErrorHandlers.namespaceErrorHandler()); + return response.identifiers(); + } + + @Override + public boolean dropTable(TableIdentifier identifier, boolean purge) { + String tablePath = tablePath(identifier); + // TODO: support purge flagN + DropTableResponse response = client.delete( + tablePath, DropTableResponse.class, ErrorHandlers.tableErrorHandler()); + return response.isDropped(); + } + + @Override + public void renameTable(TableIdentifier from, TableIdentifier to) { + + } + + private LoadTableResponse loadInternal(TableIdentifier identifier) { + String tablePath = tablePath(identifier); + return client.get(tablePath, LoadTableResponse.class, ErrorHandlers.tableErrorHandler()); + } + + @Override + public Table loadTable(TableIdentifier identifier) { + LoadTableResponse response = loadInternal(identifier); + Pair clients = tableClients(response.config()); + + return new BaseTable( + new RESTTableOperations(clients.first(), tablePath(identifier), clients.second(), response.tableMetadata()), + fullTableName(identifier)); + } + + @Override + public void createNamespace(Namespace namespace, Map metadata) { + CreateNamespaceRequest request = CreateNamespaceRequest.builder() + .withNamespace(namespace) + .setProperties(metadata) + .build(); + + // for now, ignore the response because there is no way to return it + client.post("v1/namespaces", request, CreateNamespaceResponse.class, ErrorHandlers.namespaceErrorHandler()); + } + + @Override + public List listNamespaces(Namespace namespace) throws NoSuchNamespaceException { + Preconditions.checkArgument(namespace.isEmpty(), "Cannot list namespaces under parent: %s", namespace); + // String joined = NULL.join(namespace.levels()); + ListNamespacesResponse response = client + .get("v1/namespaces", ListNamespacesResponse.class, ErrorHandlers.namespaceErrorHandler()); + return response.namespaces(); + } + + @Override + public Map loadNamespaceMetadata(Namespace namespace) throws NoSuchNamespaceException { + String ns = RESTUtil.urlEncode(namespace); + // TODO: rename to LoadNamespaceResponse? + GetNamespaceResponse response = client + .get("v1/namespaces/" + ns, GetNamespaceResponse.class, ErrorHandlers.namespaceErrorHandler()); + return response.properties(); + } + + @Override + public boolean dropNamespace(Namespace namespace) throws NamespaceNotEmptyException { + String ns = RESTUtil.urlEncode(namespace); + DropNamespaceResponse response = client + .delete("v1/namespaces/" + ns, DropNamespaceResponse.class, ErrorHandlers.namespaceErrorHandler()); + return response.isDropped(); + } + + @Override + public boolean setProperties(Namespace namespace, Map props) throws NoSuchNamespaceException { + String ns = RESTUtil.urlEncode(namespace); + UpdateNamespacePropertiesRequest request = UpdateNamespacePropertiesRequest.builder() + .updateAll(props) + .build(); + + UpdateNamespacePropertiesResponse response = client.post( + "v1/namespaces/" + ns + "/properties", request, UpdateNamespacePropertiesResponse.class, + ErrorHandlers.namespaceErrorHandler()); + + return !response.updated().isEmpty(); + } + + @Override + public boolean removeProperties(Namespace namespace, Set props) throws NoSuchNamespaceException { + String ns = RESTUtil.urlEncode(namespace); + UpdateNamespacePropertiesRequest request = UpdateNamespacePropertiesRequest.builder() + .removeAll(props) + .build(); + + UpdateNamespacePropertiesResponse response = client.post( + "v1/namespaces/" + ns + "/properties", request, UpdateNamespacePropertiesResponse.class, + ErrorHandlers.namespaceErrorHandler()); + + return !response.removed().isEmpty(); + } + + @Override + public TableBuilder buildTable(TableIdentifier identifier, Schema schema) { + return new Builder(identifier, schema); + } + + private class Builder implements TableBuilder { + private final TableIdentifier ident; + private final Schema schema; + private final ImmutableMap.Builder propertiesBuilder = ImmutableMap.builder(); + private PartitionSpec spec = null; + private SortOrder writeOrder = null; + private String location = null; + + private Builder(TableIdentifier ident, Schema schema) { + this.ident = ident; + this.schema = schema; + } + + @Override + public TableBuilder withPartitionSpec(PartitionSpec tableSpec) { + this.spec = tableSpec; + return this; + } + + @Override + public TableBuilder withSortOrder(SortOrder tableWriteOrder) { + this.writeOrder = tableWriteOrder; + return this; + } + + @Override + public TableBuilder withLocation(String tableLocation) { + this.location = tableLocation; + return this; + } + + @Override + public TableBuilder withProperties(Map props) { + this.propertiesBuilder.putAll(props); + return this; + } + + @Override + public TableBuilder withProperty(String key, String value) { + this.propertiesBuilder.put(key, value); + return this; + } + + @Override + public Table create() { + String ns = RESTUtil.urlEncode(ident.namespace()); + CreateTableRequest request = CreateTableRequest.builder() + .withName(ident.name()) + .withSchema(schema) + .withPartitionSpec(spec) + .withWriteOrder(writeOrder) + .withLocation(location) + .setProperties(propertiesBuilder.build()) + .build(); + + LoadTableResponse response = client.post( + "v1/namespaces/" + ns + "/tables", request, LoadTableResponse.class, ErrorHandlers.tableErrorHandler()); + + String tablePath = tablePath(ident); + Pair clients = tableClients(response.config()); + + return new BaseTable( + new RESTTableOperations(clients.first(), tablePath, clients.second(), response.tableMetadata()), + fullTableName(ident)); + } + + @Override + public Transaction createTransaction() { + LoadTableResponse response = stageCreate(); + String fullName = fullTableName(ident); + + String tablePath = tablePath(ident); + Pair clients = tableClients(response.config()); + TableMetadata meta = response.tableMetadata(); + + RESTTableOperations ops = new RESTTableOperations( + clients.first(), tablePath, clients.second(), + RESTTableOperations.UpdateType.CREATE, createChanges(meta), meta); + + return Transactions.createTableTransaction(fullName, ops, meta); + } + + @Override + public Transaction replaceTransaction() { + LoadTableResponse response = loadInternal(ident); + String fullName = fullTableName(ident); + + String tablePath = tablePath(ident); + Pair clients = tableClients(response.config()); + TableMetadata base = response.tableMetadata(); + + Map tableProperties = propertiesBuilder.build(); + TableMetadata replacement = base.buildReplacement( + schema, + spec != null ? spec : PartitionSpec.unpartitioned(), + writeOrder != null ? writeOrder : SortOrder.unsorted(), + location != null ? location : base.location(), + tableProperties); + + ImmutableList.Builder changes = ImmutableList.builder(); + + if (replacement.changes().stream().noneMatch(MetadataUpdate.SetCurrentSchema.class::isInstance)) { + // ensure there is a change to set the current schema + changes.add(new MetadataUpdate.SetCurrentSchema(replacement.currentSchemaId())); + } + + if (replacement.changes().stream().noneMatch(MetadataUpdate.SetDefaultPartitionSpec.class::isInstance)) { + // ensure there is a change to set the default spec + changes.add(new MetadataUpdate.SetDefaultPartitionSpec(replacement.defaultSpecId())); + } + + if (replacement.changes().stream().noneMatch(MetadataUpdate.SetDefaultSortOrder.class::isInstance)) { + // ensure there is a change to set the default sort order + changes.add(new MetadataUpdate.SetDefaultSortOrder(replacement.defaultSortOrderId())); + } + + RESTTableOperations ops = new RESTTableOperations( + clients.first(), tablePath, clients.second(), + RESTTableOperations.UpdateType.REPLACE, changes.build(), base); + + return Transactions.replaceTableTransaction(fullName, ops, replacement); + } + + @Override + public Transaction createOrReplaceTransaction() { + // return a create or a replace transaction, depending on whether the table exists + // deciding whether to create or replace can't be determined on the service because schema field IDs are assigned + // at this point and then used in data and metadata files. because create and replace will assign different + // field IDs, they must be determined before any writes occur + try { + return replaceTransaction(); + } catch (NoSuchTableException e) { + return createTransaction(); + } + } + + private LoadTableResponse stageCreate() { + String ns = RESTUtil.urlEncode(ident.namespace()); + Map tableProperties = propertiesBuilder.build(); + + CreateTableRequest request = CreateTableRequest.builder() + .withName(ident.name()) + .withSchema(schema) + .withPartitionSpec(spec) + .withWriteOrder(writeOrder) + .withLocation(location) + .setProperties(tableProperties) + .build(); + + // TODO: will this be a specific route or a modified create? + return client.post( + "v1/namespaces/" + ns + "/stageCreate", request, LoadTableResponse.class, ErrorHandlers.tableErrorHandler()); + } + } + + private static List createChanges(TableMetadata meta) { + ImmutableList.Builder changes = ImmutableList.builder(); + + Schema schema = meta.schema(); + changes.add(new MetadataUpdate.AddSchema(schema, schema.highestFieldId())); + changes.add(new MetadataUpdate.SetCurrentSchema(-1)); + + PartitionSpec spec = meta.spec(); + if (spec != null && spec.isPartitioned()) { + changes.add(new MetadataUpdate.AddPartitionSpec(spec)); + changes.add(new MetadataUpdate.SetDefaultPartitionSpec(-1)); + } + + SortOrder order = meta.sortOrder(); + if (order != null && order.isSorted()) { + changes.add(new MetadataUpdate.AddSortOrder(order)); + changes.add(new MetadataUpdate.SetDefaultSortOrder(-1)); + } + + String location = meta.location(); + if (location != null) { + changes.add(new MetadataUpdate.SetLocation(location)); + } + + Map properties = meta.properties(); + if (properties != null && !properties.isEmpty()) { + changes.add(new MetadataUpdate.SetProperties(properties)); + } + + return changes.build(); + } + + private String fullTableName(TableIdentifier ident) { + return String.format("%s.%s", catalogName, ident); + } + + private static String tablePath(TableIdentifier ident) { + return "v1/namespaces/" + RESTUtil.urlEncode(ident.namespace()) + "/tables/" + ident.name(); + } + + private Map fullConf(Map config) { + Map fullConf = Maps.newHashMap(properties); + fullConf.putAll(config); + return fullConf; + } + + private Pair tableClients(Map config) { + if (config.isEmpty()) { + return Pair.of(client, io); // reuse client and io since config is the same + } + + Map fullConf = fullConf(config); + String ioImpl = fullConf.get(CatalogProperties.FILE_IO_IMPL); + FileIO tableIO = CatalogUtil.loadFileIO( + ioImpl != null ? ioImpl : ResolvingFileIO.class.getName(), fullConf, this.conf); + RESTClient tableClient = clientBuilder.apply(fullConf); + + return Pair.of(tableClient, tableIO); + } +} diff --git a/core/src/main/java/org/apache/iceberg/rest/RESTTableOperations.java b/core/src/main/java/org/apache/iceberg/rest/RESTTableOperations.java new file mode 100644 index 000000000000..9e3938ff656f --- /dev/null +++ b/core/src/main/java/org/apache/iceberg/rest/RESTTableOperations.java @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.rest; + +import java.util.List; +import java.util.Objects; +import java.util.function.Consumer; +import org.apache.iceberg.LocationProviders; +import org.apache.iceberg.MetadataUpdate; +import org.apache.iceberg.TableMetadata; +import org.apache.iceberg.TableOperations; +import org.apache.iceberg.TableProperties; +import org.apache.iceberg.encryption.EncryptionManager; +import org.apache.iceberg.io.FileIO; +import org.apache.iceberg.io.LocationProvider; +import org.apache.iceberg.relocated.com.google.common.base.Preconditions; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.rest.requests.UpdateTableRequest; +import org.apache.iceberg.rest.responses.ErrorResponse; +import org.apache.iceberg.rest.responses.LoadTableResponse; + +class RESTTableOperations implements TableOperations { + private static final String METADATA_FOLDER_NAME = "metadata"; + + enum UpdateType { + CREATE, + REPLACE, + SIMPLE + } + + private final RESTClient client; + private final String path; + private final FileIO io; + private final List createChanges; + private final TableMetadata replaceBase; + private UpdateType updateType; + private TableMetadata current; + + RESTTableOperations(RESTClient client, String path, FileIO io, TableMetadata current) { + this(client, path, io, UpdateType.SIMPLE, Lists.newArrayList(), current); + } + + RESTTableOperations(RESTClient client, String path, FileIO io, UpdateType updateType, + List createChanges, TableMetadata current) { + this.client = client; + this.path = path; + this.io = io; + this.updateType = updateType; + this.createChanges = createChanges; + this.replaceBase = current; + if (updateType == UpdateType.CREATE) { + this.current = null; + } else { + this.current = current; + } + } + + @Override + public TableMetadata current() { + return current; + } + + @Override + public TableMetadata refresh() { + return updateCurrentMetadata(client.get(path, LoadTableResponse.class, ErrorHandlers.tableErrorHandler())); + } + + @Override + public void commit(TableMetadata base, TableMetadata metadata) { + UpdateTableRequest.Builder requestBuilder; + List baseChanges; + Consumer errorHandler; + switch (updateType) { + case CREATE: + Preconditions.checkState(base == null, "Invalid base metadata for create transaction, expected null: %s", base); + requestBuilder = UpdateTableRequest.builderForCreate(); + baseChanges = createChanges; + errorHandler = ErrorHandlers.tableErrorHandler(); // throws NoSuchTableException + break; + + case REPLACE: + Preconditions.checkState(base != null, "Invalid base metadata: null"); + // use the original replace base metadata because the transaction will refresh + requestBuilder = UpdateTableRequest.builderForReplace(replaceBase); + baseChanges = createChanges; + errorHandler = ErrorHandlers.tableCommitHandler(); + break; + + case SIMPLE: + Preconditions.checkState(base != null, "Invalid base metadata: null"); + requestBuilder = UpdateTableRequest.builderFor(base); + baseChanges = ImmutableList.of(); + errorHandler = ErrorHandlers.tableCommitHandler(); + break; + + default: + throw new UnsupportedOperationException(String.format("Update type %s is not supported", updateType)); + } + + baseChanges.forEach(requestBuilder::update); + metadata.changes().forEach(requestBuilder::update); + UpdateTableRequest request = requestBuilder.build(); + + // the error handler will throw necessary exceptions like CommitFailedException and UnknownCommitStateException + // TODO: ensure that the HTTP client lib passes HTTP client errors to the error handler + LoadTableResponse response = client.post(path, request, LoadTableResponse.class, errorHandler); + + // all future commits should be simple commits + this.updateType = UpdateType.SIMPLE; + + updateCurrentMetadata(response); + } + + @Override + public FileIO io() { + return io; + } + + private TableMetadata updateCurrentMetadata(LoadTableResponse response) { + // LoadTableResponse is used to deserialize the response, but config is not allowed by the REST spec so it can be + // safely ignored. there is no requirement to update config on refresh or commit. + if (current == null || !Objects.equals(current.metadataFileLocation(), response.metadataLocation())) { + this.current = response.tableMetadata(); + } + + return current; + } + + private static String metadataFileLocation(TableMetadata metadata, String filename) { + String metadataLocation = metadata.properties() + .get(TableProperties.WRITE_METADATA_LOCATION); + + if (metadataLocation != null) { + return String.format("%s/%s", metadataLocation, filename); + } else { + return String.format("%s/%s/%s", metadata.location(), METADATA_FOLDER_NAME, filename); + } + } + + @Override + public String metadataFileLocation(String filename) { + return metadataFileLocation(current(), filename); + } + + @Override + public LocationProvider locationProvider() { + return LocationProviders.locationsFor(current().location(), current().properties()); + } + + @Override + public TableOperations temp(TableMetadata uncommittedMetadata) { + return new TableOperations() { + @Override + public TableMetadata current() { + return uncommittedMetadata; + } + + @Override + public TableMetadata refresh() { + throw new UnsupportedOperationException("Cannot call refresh on temporary table operations"); + } + + @Override + public void commit(TableMetadata base, TableMetadata metadata) { + throw new UnsupportedOperationException("Cannot call commit on temporary table operations"); + } + + @Override + public String metadataFileLocation(String fileName) { + return RESTTableOperations.metadataFileLocation(uncommittedMetadata, fileName); + } + + @Override + public LocationProvider locationProvider() { + return LocationProviders.locationsFor(uncommittedMetadata.location(), uncommittedMetadata.properties()); + } + + @Override + public FileIO io() { + return RESTTableOperations.this.io(); + } + + @Override + public EncryptionManager encryption() { + return RESTTableOperations.this.encryption(); + } + + @Override + public long newSnapshotId() { + return RESTTableOperations.this.newSnapshotId(); + } + }; + } +} diff --git a/core/src/main/java/org/apache/iceberg/rest/requests/UpdateNamespacePropertiesRequest.java b/core/src/main/java/org/apache/iceberg/rest/requests/UpdateNamespacePropertiesRequest.java index 5fb55724b7c9..f5b6f31ad974 100644 --- a/core/src/main/java/org/apache/iceberg/rest/requests/UpdateNamespacePropertiesRequest.java +++ b/core/src/main/java/org/apache/iceberg/rest/requests/UpdateNamespacePropertiesRequest.java @@ -23,12 +23,15 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Set; +import org.apache.iceberg.exceptions.UnprocessableEntityException; import org.apache.iceberg.relocated.com.google.common.base.MoreObjects; import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; /** * A REST request to set and/or remove properties on a namespace. @@ -48,7 +51,13 @@ private UpdateNamespacePropertiesRequest(List removals, Map commonKeys = Sets.intersection(updates().keySet(), Sets.newHashSet(removals())); + if (!commonKeys.isEmpty()) { + throw new UnprocessableEntityException( + "Invalid namespace update, cannot simultaneously set and remove keys: %s", commonKeys); + } + return this; } diff --git a/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java b/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java index 5171849c8c57..b9d5c0717cff 100644 --- a/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java +++ b/core/src/test/java/org/apache/iceberg/catalog/CatalogTests.java @@ -1207,7 +1207,7 @@ public void testCreateOrReplaceTransactionConcurrentCreate() { catalog.buildTable(TABLE, OTHER_SCHEMA).create(); AssertHelpers.assertThrows("Should fail because table was created concurrently", - CommitFailedException.class, "Table already exists", createOrReplace::commitTransaction); + AlreadyExistsException.class, "Table already exists", createOrReplace::commitTransaction); // validate the concurrently created table is unmodified Table table = catalog.loadTable(TABLE); diff --git a/core/src/test/java/org/apache/iceberg/rest/RESTCatalogAdapter.java b/core/src/test/java/org/apache/iceberg/rest/RESTCatalogAdapter.java new file mode 100644 index 000000000000..32791e7626e4 --- /dev/null +++ b/core/src/test/java/org/apache/iceberg/rest/RESTCatalogAdapter.java @@ -0,0 +1,340 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.rest; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import org.apache.iceberg.catalog.Catalog; +import org.apache.iceberg.catalog.Namespace; +import org.apache.iceberg.catalog.SupportsNamespaces; +import org.apache.iceberg.catalog.TableIdentifier; +import org.apache.iceberg.exceptions.AlreadyExistsException; +import org.apache.iceberg.exceptions.CommitFailedException; +import org.apache.iceberg.exceptions.CommitStateUnknownException; +import org.apache.iceberg.exceptions.ForbiddenException; +import org.apache.iceberg.exceptions.NamespaceNotEmptyException; +import org.apache.iceberg.exceptions.NoSuchIcebergTableException; +import org.apache.iceberg.exceptions.NoSuchNamespaceException; +import org.apache.iceberg.exceptions.NoSuchTableException; +import org.apache.iceberg.exceptions.NotAuthorizedException; +import org.apache.iceberg.exceptions.RESTException; +import org.apache.iceberg.exceptions.UnprocessableEntityException; +import org.apache.iceberg.exceptions.ValidationException; +import org.apache.iceberg.relocated.com.google.common.base.Splitter; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; +import org.apache.iceberg.rest.requests.CreateNamespaceRequest; +import org.apache.iceberg.rest.requests.CreateTableRequest; +import org.apache.iceberg.rest.requests.UpdateNamespacePropertiesRequest; +import org.apache.iceberg.rest.requests.UpdateTableRequest; +import org.apache.iceberg.rest.responses.ErrorResponse; +import org.apache.iceberg.util.Pair; + +/** + * Adaptor class to translate REST requests into {@link Catalog} API calls. + */ +public class RESTCatalogAdapter implements RESTClient { + private static final Splitter SLASH = Splitter.on('/'); + + private static final Map, Integer> EXCEPTION_ERROR_CODES = ImmutableMap + ., Integer>builder() + .put(IllegalArgumentException.class, 400) + .put(ValidationException.class, 400) + .put(NamespaceNotEmptyException.class, 400) // TODO: should this be more specific? + .put(NotAuthorizedException.class, 401) + .put(ForbiddenException.class, 403) + .put(NoSuchNamespaceException.class, 404) + .put(NoSuchTableException.class, 404) + .put(NoSuchIcebergTableException.class, 404) + .put(UnsupportedOperationException.class, 406) + .put(AlreadyExistsException.class, 409) + .put(CommitFailedException.class, 409) + .put(UnprocessableEntityException.class, 422) + .put(CommitStateUnknownException.class, 500) + .build(); + + private final Catalog catalog; + private final SupportsNamespaces asNamespaceCatalog; + + public RESTCatalogAdapter(Catalog catalog) { + this.catalog = catalog; + this.asNamespaceCatalog = catalog instanceof SupportsNamespaces ? (SupportsNamespaces) catalog : null; + } + + private enum HTTPMethod { + GET, + HEAD, + POST, + DELETE + } + + private enum Route { + CONFIG(HTTPMethod.GET, "v1/config"), + LIST_NAMESPACES(HTTPMethod.GET, "v1/namespaces"), + CREATE_NAMESPACE(HTTPMethod.POST, "v1/namespaces"), + LOAD_NAMESPACE(HTTPMethod.GET, "v1/namespaces/{namespace}"), + DROP_NAMESPACE(HTTPMethod.DELETE, "v1/namespaces/{namespace}"), + UPDATE_NAMESPACE(HTTPMethod.POST, "v1/namespaces/{namespace}/properties"), + LIST_TABLES(HTTPMethod.GET, "v1/namespaces/{namespace}/tables"), + CREATE_TABLE(HTTPMethod.POST, "v1/namespaces/{namespace}/tables"), + STAGE_CREATE_TABLE(HTTPMethod.POST, "v1/namespaces/{namespace}/stageCreate"), + LOAD_TABLE(HTTPMethod.GET, "v1/namespaces/{namespace}/tables/{table}"), + UPDATE_TABLE(HTTPMethod.POST, "v1/namespaces/{namespace}/tables/{table}"), + DROP_TABLE(HTTPMethod.DELETE, "v1/namespaces/{namespace}/tables/{table}"); + + private final HTTPMethod method; + private final int requriedLength; + private final Map requirements; + private final Map variables; + + Route(HTTPMethod method, String pattern) { + this.method = method; + + // parse the pattern into requirements and variables + List parts = SLASH.splitToList(pattern); + ImmutableMap.Builder requirementsBuilder = ImmutableMap.builder(); + ImmutableMap.Builder variablesBuilder = ImmutableMap.builder(); + for (int pos = 0; pos < parts.size(); pos += 1) { + String part = parts.get(pos); + if (part.startsWith("{") && part.endsWith("}")) { + variablesBuilder.put(pos, part.substring(1, part.length() - 1)); + } else { + requirementsBuilder.put(pos, part); + } + } + + this.requriedLength = parts.size(); + this.requirements = requirementsBuilder.build(); + this.variables = variablesBuilder.build(); + } + + private boolean matches(HTTPMethod requestMethod, List requestPath) { + return method == requestMethod && requriedLength == requestPath.size() && + requirements.entrySet().stream().allMatch( + requirement -> requirement.getValue().equalsIgnoreCase(requestPath.get(requirement.getKey()))); + } + + private Map variables(List requestPath) { + ImmutableMap.Builder vars = ImmutableMap.builder(); + variables.forEach((key, value) -> vars.put(value, requestPath.get(key))); + return vars.build(); + } + + public static Pair> from(HTTPMethod method, String path) { + List parts = SLASH.splitToList(path); + for (Route candidate : Route.values()) { + if (candidate.matches(method, parts)) { + return Pair.of(candidate, candidate.variables(parts)); + } + } + + return null; + } + } + + public T handleRequest(Route route, Map vars, Object body, Class responseType) { + switch (route) { + case CONFIG: + // TODO: use the correct response object + return castResponse(responseType, ImmutableMap.of()); + + case LIST_NAMESPACES: + if (asNamespaceCatalog != null) { + // TODO: support parent namespace from query params + return castResponse(responseType, CatalogHandlers.listNamespaces(asNamespaceCatalog, Namespace.empty())); + } + break; + + case CREATE_NAMESPACE: + if (asNamespaceCatalog != null) { + CreateNamespaceRequest request = castRequest(CreateNamespaceRequest.class, body); + return castResponse(responseType, CatalogHandlers.createNamespace(asNamespaceCatalog, request)); + } + break; + + case LOAD_NAMESPACE: + if (asNamespaceCatalog != null) { + Namespace namespace = namespaceFromPathVars(vars); + return castResponse(responseType, CatalogHandlers.loadNamespace(asNamespaceCatalog, namespace)); + } + break; + + case DROP_NAMESPACE: + if (asNamespaceCatalog != null) { + Namespace namespace = namespaceFromPathVars(vars); + return castResponse(responseType, CatalogHandlers.dropNamespace(asNamespaceCatalog, namespace)); + } + break; + + case UPDATE_NAMESPACE: + if (asNamespaceCatalog != null) { + Namespace namespace = namespaceFromPathVars(vars); + UpdateNamespacePropertiesRequest request = castRequest(UpdateNamespacePropertiesRequest.class, body); + return castResponse(responseType, + CatalogHandlers.updateNamespaceProperties(asNamespaceCatalog, namespace, request)); + } + break; + + case LIST_TABLES: { + Namespace namespace = namespaceFromPathVars(vars); + return castResponse(responseType, CatalogHandlers.listTables(catalog, namespace)); + } + + case CREATE_TABLE: { + Namespace namespace = namespaceFromPathVars(vars); + CreateTableRequest request = castRequest(CreateTableRequest.class, body); + return castResponse(responseType, CatalogHandlers.createTable(catalog, namespace, request)); + } + + case STAGE_CREATE_TABLE: { + Namespace namespace = namespaceFromPathVars(vars); + CreateTableRequest request = castRequest(CreateTableRequest.class, body); + return castResponse(responseType, CatalogHandlers.stageTableCreate(catalog, namespace, request)); + } + + case DROP_TABLE: { + TableIdentifier ident = identFromPathVars(vars); + return castResponse(responseType, CatalogHandlers.dropTable(catalog, ident)); + } + + case LOAD_TABLE: { + TableIdentifier ident = identFromPathVars(vars); + return castResponse(responseType, CatalogHandlers.loadTable(catalog, ident)); + } + + case UPDATE_TABLE: { + TableIdentifier ident = identFromPathVars(vars); + UpdateTableRequest request = castRequest(UpdateTableRequest.class, body); + return castResponse(responseType, CatalogHandlers.updateTable(catalog, ident, request)); + } + + default: + } + + return null; // will be converted to a 400 + } + + public T execute(HTTPMethod method, String path, Object body, Class responseType, + Consumer errorHandler) { + ErrorResponse.Builder errorBuilder = ErrorResponse.builder(); + Pair> routeAndVars = Route.from(method, path); + if (routeAndVars != null) { + try { + T response = handleRequest(routeAndVars.first(), routeAndVars.second(), body, responseType); + if (response != null) { + return response; + } + + // if a response was not returned, there was no handler for the route + errorBuilder + .responseCode(400) + .withType("BadRequestException") + .withMessage(String.format("No handler for request: %s %s", method, path)); + + } catch (RuntimeException e) { + configureResponseFromException(e, errorBuilder); + } + + } else { + errorBuilder + .responseCode(400) + .withType("BadRequestException") + .withMessage(String.format("No route for request: %s %s", method, path)); + } + + ErrorResponse error = errorBuilder.build(); + errorHandler.accept(error); + + // if the error handler doesn't throw an exception, throw a generic one + throw new RESTException("Unhandled error: %s", error); + } + + @Override + public T delete(String path, Class responseType, Consumer errorHandler) { + return execute(HTTPMethod.DELETE, path, null, responseType, errorHandler); + } + + @Override + public T post(String path, Object body, Class responseType, Consumer errorHandler) { + return execute(HTTPMethod.POST, path, body, responseType, errorHandler); + } + + @Override + public T get(String path, Class responseType, Consumer errorHandler) { + return execute(HTTPMethod.GET, path, null, responseType, errorHandler); + } + + @Override + public T head(String path, Consumer errorHandler) { + return execute(HTTPMethod.HEAD, path, null, null, errorHandler); + } + + @Override + public void close() throws IOException { + if (catalog instanceof Closeable) { + ((Closeable) catalog).close(); + } + } + + private static class BadResponseType extends RuntimeException { + private BadResponseType(Class responseType, Object response) { + super(String.format("Invalid response object, not a %s: %s", responseType.getName(), response)); + } + } + + private static class BadRequestType extends RuntimeException { + private BadRequestType(Class requestType, Object request) { + super(String.format("Invalid request object, not a %s: %s", requestType.getName(), request)); + } + } + + public static T castRequest(Class requestType, Object request) { + if (requestType.isInstance(request)) { + return requestType.cast(request); + } + + throw new BadRequestType(requestType, request); + } + + public static T castResponse(Class responseType, Object response) { + if (responseType.isInstance(response)) { + return responseType.cast(response); + } + + throw new BadResponseType(responseType, response); + } + + public static void configureResponseFromException(Exception exc, ErrorResponse.Builder errorBuilder) { + errorBuilder + .responseCode(EXCEPTION_ERROR_CODES.getOrDefault(exc.getClass(), 500)) + .withType(exc.getClass().getSimpleName()) + .withMessage(exc.getMessage()); + } + + private static Namespace namespaceFromPathVars(Map pathVars) { + return RESTUtil.urlDecode(pathVars.get("namespace")); + } + + private static TableIdentifier identFromPathVars(Map pathVars) { + return TableIdentifier.of(namespaceFromPathVars(pathVars), pathVars.get("table")); + } +} diff --git a/core/src/test/java/org/apache/iceberg/rest/TestRESTCatalog.java b/core/src/test/java/org/apache/iceberg/rest/TestRESTCatalog.java new file mode 100644 index 000000000000..e3d6ba6b5e32 --- /dev/null +++ b/core/src/test/java/org/apache/iceberg/rest/TestRESTCatalog.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.rest; + +import java.io.File; +import java.io.IOException; +import java.util.Map; +import java.util.UUID; +import org.apache.hadoop.conf.Configuration; +import org.apache.iceberg.CatalogProperties; +import org.apache.iceberg.catalog.CatalogTests; +import org.apache.iceberg.jdbc.JdbcCatalog; +import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; +import org.junit.Before; +import org.junit.Rule; +import org.junit.rules.TemporaryFolder; + +public class TestRESTCatalog extends CatalogTests { + @Rule + public TemporaryFolder temp = new TemporaryFolder(); + + private RESTCatalog restCatalog; + + @Before + public void createCatalog() throws IOException { + File warehouse = temp.newFolder(); + Configuration conf = new Configuration(); + + JdbcCatalog backendCatalog = new JdbcCatalog(); + backendCatalog.setConf(conf); + Map backendCatalogProperties = ImmutableMap.of( + CatalogProperties.WAREHOUSE_LOCATION, warehouse.getAbsolutePath(), + CatalogProperties.URI, "jdbc:sqlite:file::memory:?ic" + UUID.randomUUID().toString().replace("-", ""), + JdbcCatalog.PROPERTY_PREFIX + "username", "user", + JdbcCatalog.PROPERTY_PREFIX + "password", "password"); + backendCatalog.initialize("backend", backendCatalogProperties); + + RESTCatalogAdapter adaptor = new RESTCatalogAdapter(backendCatalog); + + this.restCatalog = new RESTCatalog((config) -> adaptor); + restCatalog.setConf(conf); + restCatalog.initialize("prod", ImmutableMap.of()); + } + + @Override + protected RESTCatalog catalog() { + return restCatalog; + } + + @Override + protected boolean supportsNamespaceProperties() { + return true; + } + + @Override + protected boolean supportsServerSideRetry() { + return true; + } +}