Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@

package org.apache.hadoop.ozone.container.keyvalue;

import static org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.VALUE_ONLY;
import static org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil.isSameSchemaVersion;

import com.fasterxml.jackson.databind.JsonNode;
Expand All @@ -36,6 +35,7 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
import org.apache.hadoop.hdds.server.JsonUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.impl.ContainerData;
Expand Down Expand Up @@ -500,9 +500,9 @@ static PendingDelete countPendingDeletesSchemaV2(
Table<Long, DeletedBlocksTransaction> delTxTable =
schemaTwoStore.getDeleteTransactionTable();

try (Table.KeyValueIterator<Long, DeletedBlocksTransaction> iterator = delTxTable.iterator(VALUE_ONLY)) {
try (TableIterator<Long, DeletedBlocksTransaction> iterator = delTxTable.valueIterator()) {
while (iterator.hasNext()) {
DeletedBlocksTransaction txn = iterator.next().getValue();
final DeletedBlocksTransaction txn = iterator.next();
final List<Long> localIDs = txn.getLocalIDList();
// In schema 2, pending delete blocks are stored in the
// transaction object. Since the actual blocks still exist in the
Expand Down Expand Up @@ -543,10 +543,10 @@ static PendingDelete countPendingDeletesSchemaV3(
KeyValueContainerData containerData) throws IOException {
long pendingDeleteBlockCountTotal = 0;
long pendingDeleteBytes = 0;
try (Table.KeyValueIterator<String, DeletedBlocksTransaction> iter
= store.getDeleteTransactionTable().iterator(containerData.containerPrefix(), VALUE_ONLY)) {
while (iter.hasNext()) {
DeletedBlocksTransaction delTx = iter.next().getValue();
try (TableIterator<String, DeletedBlocksTransaction> iterator
= store.getDeleteTransactionTable().valueIterator(containerData.containerPrefix())) {
while (iterator.hasNext()) {
final DeletedBlocksTransaction delTx = iterator.next();
final List<Long> localIDs = delTx.getLocalIDList();
pendingDeleteBlockCountTotal += localIDs.size();
pendingDeleteBytes += computePendingDeleteBytes(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@

package org.apache.hadoop.ozone.container.keyvalue.statemachine.background;

import static org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.VALUE_ONLY;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2;
import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V3;
Expand All @@ -39,6 +38,7 @@
import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
import org.apache.hadoop.ozone.container.common.helpers.BlockDeletingServiceMetrics;
Expand Down Expand Up @@ -285,7 +285,7 @@ public ContainerBackgroundTaskResult deleteViaSchema2(
Table<Long, DeletedBlocksTransaction> deleteTxns =
((DeleteTransactionStore<Long>) meta.getStore())
.getDeleteTransactionTable();
try (Table.KeyValueIterator<Long, DeletedBlocksTransaction> iterator = deleteTxns.iterator(VALUE_ONLY)) {
try (TableIterator<Long, DeletedBlocksTransaction> iterator = deleteTxns.valueIterator()) {
return deleteViaTransactionStore(
iterator, meta,
container, dataDir, startTime, schema2Deleter);
Expand All @@ -304,16 +304,16 @@ public ContainerBackgroundTaskResult deleteViaSchema3(
Table<String, DeletedBlocksTransaction> deleteTxns =
((DeleteTransactionStore<String>) meta.getStore())
.getDeleteTransactionTable();
try (Table.KeyValueIterator<String, DeletedBlocksTransaction> iterator
= deleteTxns.iterator(containerData.containerPrefix(), VALUE_ONLY)) {
try (TableIterator<String, DeletedBlocksTransaction> iterator
= deleteTxns.valueIterator(containerData.containerPrefix())) {
return deleteViaTransactionStore(
iterator, meta,
container, dataDir, startTime, schema3Deleter);
}
}

private ContainerBackgroundTaskResult deleteViaTransactionStore(
Table.KeyValueIterator<?, DeletedBlocksTransaction> iter, DBHandle meta, Container container, File dataDir,
TableIterator<?, DeletedBlocksTransaction> iter, DBHandle meta, Container container, File dataDir,
long startTime, Deleter deleter) throws IOException {
ContainerBackgroundTaskResult crr = new ContainerBackgroundTaskResult();
if (!checkDataDir(dataDir)) {
Expand All @@ -331,7 +331,7 @@ private ContainerBackgroundTaskResult deleteViaTransactionStore(
List<DeletedBlocksTransaction> delBlocks = new ArrayList<>();
int numBlocks = 0;
while (iter.hasNext() && (numBlocks < blocksToDelete)) {
DeletedBlocksTransaction delTx = iter.next().getValue();
final DeletedBlocksTransaction delTx = iter.next();
numBlocks += delTx.getLocalIDList().size();
delBlocks.add(delTx);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@

package org.apache.hadoop.ozone.container.ozoneimpl;

import static org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.KEY_ONLY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_WORKERS;
Expand Down Expand Up @@ -65,7 +64,7 @@
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.container.checksum.ContainerChecksumTreeManager;
import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage;
Expand Down Expand Up @@ -352,10 +351,10 @@ public void buildContainerSet() throws IOException {
for (Thread volumeThread : volumeThreads) {
volumeThread.join();
}
try (Table.KeyValueIterator<ContainerID, String> itr = containerSet.getContainerIdsTable().iterator(KEY_ONLY)) {
try (TableIterator<ContainerID, ContainerID> itr = containerSet.getContainerIdsTable().keyIterator()) {
final Map<ContainerID, Long> containerIds = new HashMap<>();
while (itr.hasNext()) {
containerIds.put(itr.next().getKey(), 0L);
containerIds.put(itr.next(), 0L);
}
containerSet.buildMissingContainerSetAndValidate(containerIds, ContainerID::getId);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ abstract class RDBStoreAbstractIterator<RAW>
this.rocksDBIterator = iterator;
this.rocksDBTable = table;
this.prefix = prefix;
this.type = this.prefix == null ? type : type.addKey(); // it has to read key for matching prefix.
this.type = type;
}

Type getType() {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ class RDBStoreCodecBufferIterator extends RDBStoreAbstractIterator<CodecBuffer>
final String name = table != null ? table.getName() : null;
this.keyBuffer = new Buffer(
new CodecBuffer.Capacity(name + "-iterator-key", 1 << 10),
getType().readKey() ? buffer -> getRocksDBIterator().get().key(buffer) : null);
// it has to read key for matching prefix.
getType().readKey() || prefix != null ? buffer -> getRocksDBIterator().get().key(buffer) : null);
this.valueBuffer = new Buffer(
new CodecBuffer.Capacity(name + "-iterator-value", 4 << 10),
getType().readValue() ? buffer -> getRocksDBIterator().get().value(buffer) : null);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -152,25 +152,51 @@ default KeyValueIterator<KEY, VALUE> iterator(KEY prefix) throws RocksDatabaseEx
return iterator(prefix, KeyValueIterator.Type.KEY_AND_VALUE);
}

/** The same as iterator(null, type). */
default KeyValueIterator<KEY, VALUE> iterator(KeyValueIterator.Type type)
throws RocksDatabaseException, CodecException {
return iterator(null, type);
}

/**
* Iterate the elements in this table.
* <p>
* Note that using a more restrictive type may improve performance
* since the unrequired data may not be read from the DB.
* <p>
* Note also that, when the prefix is non-empty,
* using a non-key type may not improve performance
* since it has to read keys for matching the prefix.
*
* @param prefix The prefix of the elements to be iterated.
* @param type Specify whether key and/or value are required.
* When the prefix is non-empty, it has to read keys for matching the prefix.
* The type will be automatically changed to including keys;
* see {@link KeyValueIterator.Type#addKey()}.
* @return an iterator.
*/
KeyValueIterator<KEY, VALUE> iterator(KEY prefix, KeyValueIterator.Type type)
throws RocksDatabaseException, CodecException;

/**
* @param prefix The prefix of the elements to be iterated.
* @return a key-only iterator
*/
default TableIterator<KEY, KEY> keyIterator(KEY prefix) throws RocksDatabaseException, CodecException {
final KeyValueIterator<KEY, VALUE> i = iterator(prefix, KeyValueIterator.Type.KEY_ONLY);
return TableIterator.convert(i, KeyValue::getKey);
}

/** The same as keyIterator(null). */
default TableIterator<KEY, KEY> keyIterator() throws RocksDatabaseException, CodecException {
return keyIterator(null);
}

/**
* @param prefix The prefix of the elements to be iterated.
* @return a value-only iterator.
*/
default TableIterator<KEY, VALUE> valueIterator(KEY prefix) throws RocksDatabaseException, CodecException {
final KeyValueIterator<KEY, VALUE> i = iterator(prefix, KeyValueIterator.Type.VALUE_ONLY);
return TableIterator.convert(i, KeyValue::getValue);
}

/** The same as valueIterator(null). */
default TableIterator<KEY, VALUE> valueIterator() throws RocksDatabaseException, CodecException {
return valueIterator(null);
}

/**
* Returns the Name of this Table.
* @return - Table Name.
Expand Down Expand Up @@ -343,8 +369,8 @@ public boolean equals(Object obj) {
return false;
}
final KeyValue<?, ?> that = (KeyValue<?, ?>) obj;
return this.getKey().equals(that.getKey())
&& this.getValue().equals(that.getValue());
return Objects.equals(this.getKey(), that.getKey())
&& Objects.equals(this.getValue(), that.getValue());
}

@Override
Expand Down Expand Up @@ -383,10 +409,6 @@ boolean readKey() {
boolean readValue() {
return (this.ordinal() & VALUE_ONLY.ordinal()) != 0;
}

Type addKey() {
return values()[ordinal() | KEY_ONLY.ordinal()];
}
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@

import java.io.Closeable;
import java.util.Iterator;
import java.util.function.Function;
import org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator;

/**
* To iterate a {@link Table}.
Expand Down Expand Up @@ -54,4 +56,50 @@ public interface TableIterator<KEY, T> extends Iterator<T>, Closeable {
*/
void removeFromDB() throws RocksDatabaseException, CodecException;

/**
* Convert the given {@link KeyValueIterator} to a {@link TableIterator} using the given converter.
*
* @param <K> The key type of both the input and the output iterators
* @param <INPUT> The value type of the input iterator
* @param <OUTPUT> The value type of the output iterator
*/
static <K, INPUT, OUTPUT> TableIterator<K, OUTPUT> convert(KeyValueIterator<K, INPUT> i,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be nice to have just Iterator<OUTPUT> returned by keyIterator and valueIterator methods.

Suggested change
static <K, INPUT, OUTPUT> TableIterator<K, OUTPUT> convert(KeyValueIterator<K, INPUT> i,
static <K, INPUT, OUTPUT> Iterator<OUTPUT> convert(KeyValueIterator<K, INPUT> i,

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No, it can't since the returned iterator needs a close() method for releasing resources.

Function<Table.KeyValue<K, INPUT>, OUTPUT> converter) throws RocksDatabaseException, CodecException {
return new TableIterator<K, OUTPUT>() {
@Override
public boolean hasNext() {
return i.hasNext();
}

@Override
public OUTPUT next() {
return converter.apply(i.next());
}

@Override
public void close() throws RocksDatabaseException {
i.close();
}

@Override
public void seekToFirst() {
i.seekToFirst();
}

@Override
public void seekToLast() {
i.seekToLast();
}

@Override
public OUTPUT seek(K key) throws RocksDatabaseException, CodecException {
return converter.apply(i.seek(key));
}

@Override
public void removeFromDB() throws RocksDatabaseException, CodecException {
i.removeFromDB();
}
};
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
import static org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.NEITHER;
import static org.apache.hadoop.hdds.utils.db.Table.KeyValueIterator.Type.VALUE_ONLY;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertThrows;
Expand Down Expand Up @@ -316,10 +315,5 @@ public void testIteratorType() {

assertTrue(KEY_AND_VALUE.readKey());
assertTrue(KEY_AND_VALUE.readValue());

assertEquals(KEY_ONLY, NEITHER.addKey());
assertEquals(KEY_ONLY, KEY_ONLY.addKey());
assertEquals(KEY_AND_VALUE, VALUE_ONLY.addKey());
assertEquals(KEY_AND_VALUE, KEY_AND_VALUE.addKey());
}
}
Loading