Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
Expand Down Expand Up @@ -192,8 +191,7 @@ public void testGetDeletedDirectoryInfo()

// Retrieve the object ID of dir1 from directory table.
Long directoryObjectId = null;
try (
TableIterator<?, ? extends Table.KeyValue<?, OmDirectoryInfo>> iterator
try (Table.KeyValueIterator<?, OmDirectoryInfo> iterator
= reconDirTable.iterator()) {
if (iterator.hasNext()) {
directoryObjectId = iterator.next().getValue().getObjectID();
Expand Down Expand Up @@ -421,22 +419,22 @@ private void cleanupTables() throws IOException {

Table<String, OmKeyInfo> deletedDirTable =
metadataManager.getDeletedDirTable();
try (TableIterator<String, ? extends Table.KeyValue<String, ?>> it = deletedDirTable.iterator()) {
try (Table.KeyValueIterator<String, OmKeyInfo> it = deletedDirTable.iterator()) {
removeAllFromDB(it, deletedDirTable);
}
Table<String, OmKeyInfo> fileTable = metadataManager.getFileTable();
try (TableIterator<String, ? extends Table.KeyValue<String, ?>> it = fileTable.iterator()) {
try (Table.KeyValueIterator<String, OmKeyInfo> it = fileTable.iterator()) {
removeAllFromDB(it, fileTable);
}
Table<String, OmDirectoryInfo> directoryTable =
metadataManager.getDirectoryTable();
try (TableIterator<String, ? extends Table.KeyValue<String, ?>> it = directoryTable.iterator()) {
try (Table.KeyValueIterator<String, OmDirectoryInfo> it = directoryTable.iterator()) {
removeAllFromDB(it, directoryTable);
}
}

private static void removeAllFromDB(
TableIterator<String, ? extends Table.KeyValue<String, ?>> iterator,
Table.KeyValueIterator<String, ?> iterator,
Table<String, ?> table) throws IOException {
List<String> keysToDelete = new ArrayList<>();
while (iterator.hasNext()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
import org.apache.hadoop.ozone.client.BucketArgs;
Expand All @@ -42,8 +41,8 @@
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager;
import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
import org.apache.hadoop.ozone.recon.spi.impl.ReconContainerMetadataManagerImpl;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
Expand Down Expand Up @@ -132,15 +131,13 @@ public void testReconGetsSnapshotFromLeader() throws Exception {
// Sync data to Recon
impl.syncDataFromOM();

ReconContainerMetadataManager reconContainerMetadataManager =
recon.getReconServer().getReconContainerMetadataManager();
try (TableIterator iterator =
reconContainerMetadataManager.getContainerTableIterator()) {
final ReconContainerMetadataManagerImpl reconContainerMetadataManager =
(ReconContainerMetadataManagerImpl) recon.getReconServer().getReconContainerMetadataManager();
try (Table.KeyValueIterator<ContainerKeyPrefix, Integer> iterator
= reconContainerMetadataManager.getContainerKeyTableForTesting().iterator()) {
String reconKeyPrefix = null;
while (iterator.hasNext()) {
Table.KeyValue<ContainerKeyPrefix, Integer> keyValue =
(Table.KeyValue<ContainerKeyPrefix, Integer>) iterator.next();
reconKeyPrefix = keyValue.getKey().getKeyPrefix();
reconKeyPrefix = iterator.next().getKey().getKeyPrefix();
}
assertEquals(
String.format("/%s/%s/%s", VOL_NAME, VOL_NAME, keyPrefix),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.ClientConfigForTesting;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
Expand Down Expand Up @@ -366,8 +365,7 @@ public void testKeyHSyncThenClose() throws Exception {

OMMetadataManager metadataManager = ozoneManager.getMetadataManager();
// deletedTable should not have an entry for file at all in this case
try (TableIterator<String,
? extends Table.KeyValue<String, RepeatedOmKeyInfo>>
try (Table.KeyValueIterator<String, RepeatedOmKeyInfo>
tableIter = metadataManager.getDeletedTable().iterator()) {
while (tableIter.hasNext()) {
Table.KeyValue<String, RepeatedOmKeyInfo> kv = tableIter.next();
Expand Down Expand Up @@ -609,7 +607,7 @@ private List<OmKeyInfo> getOpenKeyInfo(BucketLayout bucketLayout) {

Table<String, OmKeyInfo> openFileTable =
cluster.getOzoneManager().getMetadataManager().getOpenKeyTable(bucketLayout);
try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
try (Table.KeyValueIterator<String, OmKeyInfo>
iterator = openFileTable.iterator()) {
while (iterator.hasNext()) {
omKeyInfo.add(iterator.next().getValue());
Expand All @@ -624,7 +622,7 @@ private List<OmKeyInfo> getKeyInfo(BucketLayout bucketLayout) {

Table<String, OmKeyInfo> openFileTable =
cluster.getOzoneManager().getMetadataManager().getKeyTable(bucketLayout);
try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
try (Table.KeyValueIterator<String, OmKeyInfo>
iterator = openFileTable.iterator()) {
while (iterator.hasNext()) {
omKeyInfo.add(iterator.next().getValue());
Expand Down Expand Up @@ -1134,7 +1132,7 @@ public void testDisableHsync() throws Exception {
* @return OmKeyInfo
*/
private OmKeyInfo getFirstKeyInTable(String keyName, Table<String, OmKeyInfo> openKeyTable) throws IOException {
try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> it = openKeyTable.iterator()) {
try (Table.KeyValueIterator<String, OmKeyInfo> it = openKeyTable.iterator()) {
assertTrue(it.hasNext());
Table.KeyValue<String, OmKeyInfo> kv = it.next();
String dbOpenKey = kv.getKey();
Expand Down Expand Up @@ -1543,7 +1541,7 @@ public void testHSyncKeyOverwriteHSyncKey() throws Exception {

private Map<String, OmKeyInfo> getAllOpenKeys(Table<String, OmKeyInfo> table) throws IOException {
Map<String, OmKeyInfo> keys = new HashMap<String, OmKeyInfo>();
try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> tableIter = table.iterator()) {
try (Table.KeyValueIterator<String, OmKeyInfo> tableIter = table.iterator()) {
while (tableIter.hasNext()) {
Table.KeyValue<String, OmKeyInfo> kv = tableIter.next();
String key = kv.getKey();
Expand All @@ -1555,7 +1553,7 @@ private Map<String, OmKeyInfo> getAllOpenKeys(Table<String, OmKeyInfo> table) th

private Map<String, RepeatedOmKeyInfo> getAllDeletedKeys(Table<String, RepeatedOmKeyInfo> table) throws IOException {
Map<String, RepeatedOmKeyInfo> keys = new HashMap<String, RepeatedOmKeyInfo>();
try (TableIterator<String, ? extends Table.KeyValue<String, RepeatedOmKeyInfo>> tableIter = table.iterator()) {
try (Table.KeyValueIterator<String, RepeatedOmKeyInfo> tableIter = table.iterator()) {
while (tableIter.hasNext()) {
Table.KeyValue<String, RepeatedOmKeyInfo> kv = tableIter.next();
String key = kv.getKey();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
Expand Down Expand Up @@ -247,7 +246,7 @@ public static Map<String, OmKeyInfo> createKeys(MiniOzoneCluster cluster, int nu
public static void cleanupDeletedTable(OzoneManager ozoneManager) throws IOException {
Table<String, RepeatedOmKeyInfo> deletedTable = ozoneManager.getMetadataManager().getDeletedTable();
List<String> nameList = new ArrayList<>();
try (TableIterator<String, ? extends Table.KeyValue<String, RepeatedOmKeyInfo>> keyIter = deletedTable.iterator()) {
try (Table.KeyValueIterator<String, RepeatedOmKeyInfo> keyIter = deletedTable.iterator()) {
while (keyIter.hasNext()) {
Table.KeyValue<String, RepeatedOmKeyInfo> kv = keyIter.next();
nameList.add(kv.getKey());
Expand All @@ -265,7 +264,7 @@ public static void cleanupDeletedTable(OzoneManager ozoneManager) throws IOExcep
public static void cleanupOpenKeyTable(OzoneManager ozoneManager, BucketLayout bucketLayout) throws IOException {
Table<String, OmKeyInfo> openKeyTable = ozoneManager.getMetadataManager().getOpenKeyTable(bucketLayout);
List<String> nameList = new ArrayList<>();
try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>> keyIter = openKeyTable.iterator()) {
try (Table.KeyValueIterator<String, OmKeyInfo> keyIter = openKeyTable.iterator()) {
while (keyIter.hasNext()) {
Table.KeyValue<String, OmKeyInfo> kv = keyIter.next();
nameList.add(kv.getKey());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.ClientVersion;
Expand Down Expand Up @@ -179,11 +178,10 @@ private void testPutKeySuccessWithBlockTokenWithBucketLayout(
omMetadataManager.getBucketTable().get(bucketKey).getObjectID());
String keyPrefix =
bucketLayout.isFileSystemOptimized() ? bucketId : bucketKey;
Table table = omMetadataManager.getKeyTable(bucketLayout);
Table<String, OmKeyInfo> table = omMetadataManager.getKeyTable(bucketLayout);

// Check table entry.
try (
TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
try (Table.KeyValueIterator<String, OmKeyInfo>
keyIterator = table.iterator()) {
Table.KeyValue<String, OmKeyInfo> kv =
keyIterator.seek(keyPrefix + "/" + keyName);
Expand Down Expand Up @@ -315,7 +313,7 @@ public void testPreallocateFileRecovery(long dataSize) throws Exception {
// check unused pre-allocated blocks are reclaimed
Table<String, RepeatedOmKeyInfo> deletedTable =
getCluster().getOzoneManager().getMetadataManager().getDeletedTable();
try (TableIterator<String, ? extends Table.KeyValue<String, RepeatedOmKeyInfo>>
try (Table.KeyValueIterator<String, RepeatedOmKeyInfo>
keyIter = deletedTable.iterator()) {
while (keyIter.hasNext()) {
Table.KeyValue<String, RepeatedOmKeyInfo> kv = keyIter.next();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.BucketArgs;
Expand Down Expand Up @@ -142,7 +141,7 @@ private boolean assertKeyCount(
String dbKey, int expectedCnt, String keyName) {
int countKeys = 0;
int matchingKeys = 0;
try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
try (Table.KeyValueIterator<String, OmKeyInfo>
itr = keyTable.iterator()) {
itr.seek(dbKey);
while (itr.hasNext()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.ratis.RatisHelper;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdfs.LogVerificationAppender;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
Expand Down Expand Up @@ -455,7 +454,7 @@ public void testKeyDeletion() throws Exception {
GenericTestUtils.waitFor(() -> {
Table<String, RepeatedOmKeyInfo> deletedTable =
om.getMetadataManager().getDeletedTable();
try (TableIterator<?, ?> iterator = deletedTable.iterator()) {
try (Table.KeyValueIterator<String, RepeatedOmKeyInfo> iterator = deletedTable.iterator()) {
return !iterator.hasNext();
} catch (Exception ex) {
return false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
Expand Down Expand Up @@ -197,7 +196,7 @@ public void testDeleteEmptyDirectory() throws Exception {
assertEquals(1, metrics.getNumDirsPurged());
assertEquals(1, metrics.getNumDirsSentForPurge());

try (TableIterator<?, ? extends Table.KeyValue<?, OmDirectoryInfo>>
try (Table.KeyValueIterator<?, OmDirectoryInfo>
iterator = dirTable.iterator()) {
assertTrue(iterator.hasNext());
assertEquals(root.getName(), iterator.next().getValue().getName());
Expand Down Expand Up @@ -779,20 +778,20 @@ private void cleanupTables() throws IOException {
OMMetadataManager metadataManager =
cluster.getOzoneManager().getMetadataManager();

try (TableIterator<?, ?> it = metadataManager.getDeletedDirTable()
try (Table.KeyValueIterator<String, OmKeyInfo> it = metadataManager.getDeletedDirTable()
.iterator()) {
removeAllFromDB(it);
}
try (TableIterator<?, ?> it = metadataManager.getFileTable().iterator()) {
try (Table.KeyValueIterator<String, OmKeyInfo> it = metadataManager.getFileTable().iterator()) {
removeAllFromDB(it);
}
try (TableIterator<?, ?> it = metadataManager.getDirectoryTable()
try (Table.KeyValueIterator<String, OmDirectoryInfo> it = metadataManager.getDirectoryTable()
.iterator()) {
removeAllFromDB(it);
}
}

private static void removeAllFromDB(TableIterator<?, ?> iterator)
private static void removeAllFromDB(Table.KeyValueIterator<?, ?> iterator)
throws IOException {
while (iterator.hasNext()) {
iterator.next();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.TestDataUtil;
Expand Down Expand Up @@ -504,8 +503,7 @@ public void testSnapshotWithFSO() throws Exception {
OmSnapshot snap1 = rcSnap1.get();
Table<String, OmKeyInfo> snap1KeyTable =
snap1.getMetadataManager().getFileTable();
try (TableIterator<String, ? extends Table.KeyValue<String,
RepeatedOmKeyInfo>> iterator = deletedTable.iterator()) {
try (Table.KeyValueIterator<String, RepeatedOmKeyInfo> iterator = deletedTable.iterator()) {
while (iterator.hasNext()) {
Table.KeyValue<String, RepeatedOmKeyInfo> next = iterator.next();
String activeDBDeletedKey = next.getKey();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
import org.apache.hadoop.ozone.OzoneConfigKeys;
Expand Down Expand Up @@ -207,8 +206,7 @@ public void testSnapshotNameConsistency() throws Exception {
String snapshotPrefix = OM_KEY_PREFIX + volumeName +
OM_KEY_PREFIX + bucketName;
SnapshotInfo snapshotInfo = null;
try (TableIterator<String, ?
extends Table.KeyValue<String, SnapshotInfo>>
try (Table.KeyValueIterator<String, SnapshotInfo>
iterator = ozoneManager.getMetadataManager()
.getSnapshotInfoTable().iterator(snapshotPrefix)) {
while (iterator.hasNext()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@
import org.apache.hadoop.hdds.utils.TransactionInfo;
import org.apache.hadoop.hdds.utils.db.RDBCheckpointUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
Expand Down Expand Up @@ -308,7 +307,7 @@ public void testSnapshotAndKeyDeletionBackgroundServices()
}

private static <V> boolean isKeyInTable(String key, Table<String, V> table) {
try (TableIterator<String, ? extends Table.KeyValue<String, V>> iterator
try (Table.KeyValueIterator<String, V> iterator
= table.iterator()) {
while (iterator.hasNext()) {
Table.KeyValue<String, V> next = iterator.next();
Expand Down Expand Up @@ -429,8 +428,7 @@ public void testCompactionLogBackgroundService()
private List<CompactionLogEntry> getCompactionLogEntries(OzoneManager om)
throws IOException {
List<CompactionLogEntry> compactionLogEntries = new ArrayList<>();
try (TableIterator<String,
? extends Table.KeyValue<String, CompactionLogEntry>>
try (Table.KeyValueIterator<String, CompactionLogEntry>
iterator = om.getMetadataManager().getCompactionLogTable()
.iterator()) {
iterator.seekToFirst();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
Expand Down Expand Up @@ -238,7 +237,7 @@ public void testExclusiveSizeWithDirectoryDeepClean() throws Exception {
put("snap3", 0L);
}};

try (TableIterator<String, ? extends Table.KeyValue<String, SnapshotInfo>>
try (Table.KeyValueIterator<String, SnapshotInfo>
iterator = snapshotInfoTable.iterator()) {
while (iterator.hasNext()) {
Table.KeyValue<String, SnapshotInfo> snapshotEntry = iterator.next();
Expand Down
Loading
Loading