diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index c156b8e4d67a..bc6ac38c72e4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -139,7 +139,7 @@ public static List listColumnFamiliesEmptyOptions(final String path) } } - static RocksDatabase open(File dbFile, ManagedDBOptions dbOptions, + public static RocksDatabase open(File dbFile, ManagedDBOptions dbOptions, ManagedWriteOptions writeOptions, Set families, boolean readOnly) throws IOException { List descriptors = null; @@ -460,8 +460,13 @@ public void ingestExternalFile(ColumnFamily family, List files, public void put(ColumnFamily family, byte[] key, byte[] value) throws IOException { + put(family.getHandle(), key, value); + } + + public void put(ColumnFamilyHandle handle, byte[] key, byte[] value) + throws IOException { try (UncheckedAutoCloseable ignored = acquire()) { - db.get().put(family.getHandle(), writeOptions, key, value); + db.get().put(handle, writeOptions, key, value); } catch (RocksDBException e) { closeOnError(e); throw toIOException(this, "put " + bytes2String(key), e); @@ -621,9 +626,14 @@ RocksCheckpoint createCheckpoint() { */ Supplier keyMayExist(ColumnFamily family, byte[] key) throws IOException { + return keyMayExist(family.getHandle(), key); + } + + public Supplier keyMayExist(ColumnFamilyHandle handle, byte[] key) + throws IOException { try (UncheckedAutoCloseable ignored = acquire()) { final Holder out = new Holder<>(); - return db.get().keyMayExist(family.getHandle(), key, out) ? + return db.get().keyMayExist(handle, key, out) ? out::getValue : null; } } @@ -652,16 +662,39 @@ public Collection getExtraColumnFamilies() { return Collections.unmodifiableCollection(columnFamilies.values()); } - byte[] get(ColumnFamily family, byte[] key) throws IOException { + public void dropColumnFamily(ColumnFamilyHandle handle) throws IOException { try (UncheckedAutoCloseable ignored = acquire()) { - return db.get().get(family.getHandle(), key); + db.get().dropColumnFamily(handle); } catch (RocksDBException e) { closeOnError(e); - final String message = "get " + bytes2String(key) + " from " + family; + throw toIOException(this, "dropColumnFamily", e); + } + } + + public ColumnFamilyHandle createColumnFamily(ColumnFamilyDescriptor descriptor) throws IOException { + try (UncheckedAutoCloseable ignored = acquire()) { + return db.get().createColumnFamily(descriptor); + } catch (RocksDBException e) { + closeOnError(e); + throw toIOException(this, "createColumnFamily", e); + } + } + + public byte[] get(ColumnFamily family, byte[] key) throws IOException { + return get(family.getHandle(), key, family.getName()); + } + + public byte[] get(ColumnFamilyHandle handle, byte[] key, String familyName) throws IOException { + try (UncheckedAutoCloseable ignored = acquire()) { + return db.get().get(handle, key); + } catch (RocksDBException e) { + closeOnError(e); + final String message = "get " + bytes2String(key) + " from " + familyName; throw toIOException(this, message, e); } } + /** * Get the value mapped to the given key. * diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java index 5a5a577351b1..6248dfba321c 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java @@ -102,4 +102,5 @@ public void deleteFile(LiveFileMetaData fileToBeDeleted) File file = new File(fileToBeDeleted.path(), fileToBeDeleted.fileName()); ManagedRocksObjectUtils.waitForFileDelete(file, Duration.ofSeconds(60)); } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestFSORepairTool.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestFSORepairTool.java new file mode 100644 index 000000000000..430a931d0547 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestFSORepairTool.java @@ -0,0 +1,552 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.apache.commons.io.IOUtils; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.contract.ContractTestUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.repair.om.FSORepairTool; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; + +/** + * FSORepairTool test cases. + */ +public class TestFSORepairTool { + public static final Logger LOG = + LoggerFactory.getLogger(TestFSORepairTool.class); + + private static MiniOzoneHAClusterImpl cluster; + private static FileSystem fs; + private static OzoneClient client; + + + @BeforeAll + public static void init() throws Exception { + // Set configs. + OzoneConfiguration conf = new OzoneConfiguration(); + // deletion services will be triggered manually. + conf.setTimeDuration(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, + 1_000_000, TimeUnit.SECONDS); + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1_000_000, + TimeUnit.SECONDS); + conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 10); + conf.setInt(OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK, 10); + conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); + // Since delete services use RocksDB iterators, make sure the double + // buffer is flushed between runs. + conf.setInt(OMConfigKeys.OZONE_OM_UNFLUSHED_TRANSACTION_MAX_COUNT, 1); + + // Build cluster. + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) + .setNumOfOzoneManagers(1) + .setOMServiceId("omservice") + .setNumDatanodes(3) + .build(); + cluster.waitForClusterToBeReady(); + + // Init ofs. + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, cluster.getOzoneManager().getOMNodeId()); + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + fs = FileSystem.get(conf); + client = OzoneClientFactory.getRpcClient("omservice", conf); + } + + @AfterEach + public void cleanNamespace() throws Exception { + if (fs.exists(new Path("/vol1"))) { + fs.delete(new Path("/vol1"), true); + } + if (fs.exists(new Path("/vol2"))) { + fs.delete(new Path("/vol2"), true); + } + runDeletes(); + assertFileAndDirTablesEmpty(); + } + + @AfterAll + public static void teardown() { + if (cluster != null) { + cluster.shutdown(); + } + IOUtils.closeQuietly(fs); + } + + @Test + public void testConnectedTreeOneBucket() throws Exception { + org.apache.hadoop.ozone.repair.om.FSORepairTool.Report expectedReport = buildConnectedTree("vol1", "bucket1"); + + // Test the connected tree in debug mode. + FSORepairTool fsoTool = new FSORepairTool(getOmDB(), + getOmDBLocation(), true); + FSORepairTool.Report debugReport = fsoTool.run(); + + Assertions.assertEquals(expectedReport, debugReport); + assertConnectedTreeReadable("vol1", "bucket1"); + assertDeleteTablesEmpty(); + + // Running again in repair mode should give same results since the tree + // is connected. + fsoTool = new org.apache.hadoop.ozone.repair.om.FSORepairTool(getOmDB(), + getOmDBLocation(), false); + org.apache.hadoop.ozone.repair.om.FSORepairTool.Report repairReport = fsoTool.run(); + + Assertions.assertEquals(expectedReport, repairReport); + assertConnectedTreeReadable("vol1", "bucket1"); + assertDeleteTablesEmpty(); + } + + @Test + public void testReportedDataSize() throws Exception { + FSORepairTool.Report report1 = buildDisconnectedTree("vol1", "bucket1", 10); + FSORepairTool.Report report2 = buildConnectedTree("vol1", "bucket2", 10); + FSORepairTool.Report expectedReport = new FSORepairTool.Report(report1, report2); + + FSORepairTool + repair = new FSORepairTool(getOmDB(), + getOmDBLocation(), false); + FSORepairTool.Report debugReport = repair.run(); + Assertions.assertEquals(expectedReport, debugReport); + } + + @Test + public void testMultipleBucketsAndVolumes() throws Exception { + FSORepairTool.Report report1 = buildConnectedTree("vol1", "bucket1"); + FSORepairTool.Report report2 = buildDisconnectedTree("vol2", "bucket2"); + FSORepairTool.Report expectedAggregateReport = new org.apache.hadoop.ozone.repair.om.FSORepairTool.Report( + report1, report2); + + org.apache.hadoop.ozone.repair.om.FSORepairTool + repair = new org.apache.hadoop.ozone.repair.om.FSORepairTool(getOmDB(), + getOmDBLocation(), false); + org.apache.hadoop.ozone.repair.om.FSORepairTool.Report generatedReport = repair.run(); + + Assertions.assertEquals(generatedReport, expectedAggregateReport); + assertConnectedTreeReadable("vol1", "bucket1"); + assertDisconnectedTreePartiallyReadable("vol2", "bucket2"); + assertDisconnectedObjectsMarkedForDelete(1); + } + + /** + * Tests having multiple entries in the deleted file and directory tables + * for the same objects. + */ + @Test + public void testDeleteOverwrite() throws Exception { + // Create files and dirs under dir1. To make sure they are added to the + // delete table, the keys must have data. + buildConnectedTree("vol1", "bucket1", 10); + // Move soon to be disconnected objects to the deleted table. + fs.delete(new Path("/vol1/bucket1/dir1/dir2/file3"), true); + fs.delete(new Path("/vol1/bucket1/dir1/dir2"), true); + fs.delete(new Path("/vol1/bucket1/dir1/file1"), true); + fs.delete(new Path("/vol1/bucket1/dir1/file2"), true); + + // Recreate deleted objects, then disconnect dir1. + // This means after the repair runs, these objects will be + // the deleted tables multiple times. Some will have the same dir1 parent ID + // in their key name too. + ContractTestUtils.touch(fs, new Path("/vol1/bucket1/dir1/dir2/file3")); + ContractTestUtils.touch(fs, new Path("/vol1/bucket1/dir1/file1")); + ContractTestUtils.touch(fs, new Path("/vol1/bucket1/dir1/file2")); + disconnectDirectory("dir1"); + + org.apache.hadoop.ozone.repair.om.FSORepairTool + repair = new org.apache.hadoop.ozone.repair.om.FSORepairTool(getOmDB(), + getOmDBLocation(), false); + org.apache.hadoop.ozone.repair.om.FSORepairTool.Report generatedReport = repair.run(); + + Assertions.assertEquals(1, generatedReport.getUnreachableDirs()); + Assertions.assertEquals(3, generatedReport.getUnreachableFiles()); + + assertDisconnectedObjectsMarkedForDelete(2); + } + + @Test + public void testEmptyFileTrees() throws Exception { + // Run when there are no file trees. + org.apache.hadoop.ozone.repair.om.FSORepairTool + repair = new org.apache.hadoop.ozone.repair.om.FSORepairTool(getOmDB(), + getOmDBLocation(), false); + org.apache.hadoop.ozone.repair.om.FSORepairTool.Report generatedReport = repair.run(); + Assertions.assertEquals(generatedReport, new org.apache.hadoop.ozone.repair.om.FSORepairTool.Report()); + assertDeleteTablesEmpty(); + + // Create an empty volume and bucket. + fs.mkdirs(new Path("/vol1")); + fs.mkdirs(new Path("/vol2/bucket1")); + + // Run on an empty volume and bucket. + repair = new org.apache.hadoop.ozone.repair.om.FSORepairTool(getOmDB(), + getOmDBLocation(), false); + generatedReport = repair.run(); + Assertions.assertEquals(generatedReport, new org.apache.hadoop.ozone.repair.om.FSORepairTool.Report()); + assertDeleteTablesEmpty(); + } + + @Test + public void testNonFSOBucketsSkipped() throws Exception { + ObjectStore store = client.getObjectStore(); + try { + // Create legacy and OBS buckets. + store.createVolume("vol1"); + store.getVolume("vol1").createBucket("obs-bucket", + BucketArgs.newBuilder().setBucketLayout(BucketLayout.OBJECT_STORE) + .build()); + store.getVolume("vol1").createBucket("legacy-bucket", + BucketArgs.newBuilder().setBucketLayout(BucketLayout.LEGACY) + .build()); + + // Put a key in the legacy and OBS buckets. + OzoneOutputStream obsStream = store.getVolume("vol1") + .getBucket("obs-bucket") + .createKey("prefix/test-key", 3); + obsStream.write(new byte[]{1, 1, 1}); + obsStream.close(); + + OzoneOutputStream legacyStream = store.getVolume("vol1") + .getBucket("legacy-bucket") + .createKey("prefix/test-key", 3); + legacyStream.write(new byte[]{1, 1, 1}); + legacyStream.close(); + + // Add an FSO bucket with data. + org.apache.hadoop.ozone.repair.om.FSORepairTool.Report connectReport = buildConnectedTree("vol1", "fso" + + "-bucket"); + + // Even in repair mode there should be no action. legacy and obs buckets + // will be skipped and FSO tree is connected. + org.apache.hadoop.ozone.repair.om.FSORepairTool + repair = new org.apache.hadoop.ozone.repair.om.FSORepairTool(getOmDB(), + getOmDBLocation(), false); + org.apache.hadoop.ozone.repair.om.FSORepairTool.Report generatedReport = repair.run(); + + Assertions.assertEquals(connectReport, generatedReport); + assertConnectedTreeReadable("vol1", "fso-bucket"); + assertDeleteTablesEmpty(); + } finally { + // Need to manually delete obs bucket. It cannot be deleted with ofs as + // part of the normal test cleanup. + store.getVolume("vol1").getBucket("obs-bucket") + .deleteKey("prefix/test-key"); + store.getVolume("vol1").deleteBucket("obs-bucket"); + } + } + + + private org.apache.hadoop.ozone.repair.om.FSORepairTool.Report buildConnectedTree(String volume, String bucket) + throws Exception { + return buildConnectedTree(volume, bucket, 0); + } + + /** + * Creates a tree with 3 reachable directories and 4 reachable files. + */ + private org.apache.hadoop.ozone.repair.om.FSORepairTool.Report buildConnectedTree(String volume, String bucket, + int fileSize) + throws Exception { + Path bucketPath = new Path("/" + volume + "/" + bucket); + Path dir1 = new Path(bucketPath, "dir1"); + Path file1 = new Path(dir1, "file1"); + Path file2 = new Path(dir1, "file2"); + + Path dir2 = new Path(bucketPath, "dir1/dir2"); + Path file3 = new Path(dir2, "file3"); + + Path dir3 = new Path(bucketPath, "dir3"); + Path file4 = new Path(bucketPath, "file4"); + + fs.mkdirs(dir1); + fs.mkdirs(dir2); + fs.mkdirs(dir3); + + // Content to put in every file. + String data = new String(new char[fileSize]); + + FSDataOutputStream stream = fs.create(file1); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + stream = fs.create(file2); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + stream = fs.create(file3); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + stream = fs.create(file4); + stream.write(data.getBytes(StandardCharsets.UTF_8)); + stream.close(); + + assertConnectedTreeReadable(volume, bucket); + + return new org.apache.hadoop.ozone.repair.om.FSORepairTool.Report.Builder() + .setReachableDirs(3) + .setReachableFiles(4) + .setReachableBytes(fileSize * 4L) + .build(); + } + + private void assertConnectedTreeReadable(String volume, String bucket) + throws IOException { + Path bucketPath = new Path("/" + volume + "/" + bucket); + Path dir1 = new Path(bucketPath, "dir1"); + Path file1 = new Path(dir1, "file1"); + Path file2 = new Path(dir1, "file2"); + + Path dir2 = new Path(bucketPath, "dir1/dir2"); + Path file3 = new Path(dir2, "file3"); + + Path dir3 = new Path(bucketPath, "dir3"); + Path file4 = new Path(bucketPath, "file4"); + + Assertions.assertTrue(fs.exists(dir1)); + Assertions.assertTrue(fs.exists(dir2)); + Assertions.assertTrue(fs.exists(dir3)); + Assertions.assertTrue(fs.exists(file1)); + Assertions.assertTrue(fs.exists(file2)); + Assertions.assertTrue(fs.exists(file3)); + Assertions.assertTrue(fs.exists(file4)); + } + + private org.apache.hadoop.ozone.repair.om.FSORepairTool.Report buildDisconnectedTree(String volume, String bucket) + throws Exception { + return buildDisconnectedTree(volume, bucket, 0); + } + + /** + * Creates a tree with 2 reachable directories, 1 reachable file, 1 + * unreachable directory, and 3 unreachable files. + */ + private org.apache.hadoop.ozone.repair.om.FSORepairTool.Report buildDisconnectedTree(String volume, String bucket, + int fileSize) throws Exception { + buildConnectedTree(volume, bucket, fileSize); + + // Manually remove dir1. This should disconnect 3 of the files and 1 of + // the directories. + disconnectDirectory("dir1"); + + assertDisconnectedTreePartiallyReadable(volume, bucket); + + return new org.apache.hadoop.ozone.repair.om.FSORepairTool.Report.Builder() + .setReachableDirs(1) + .setReachableFiles(1) + .setReachableBytes(fileSize) + // dir1 does not count towards the unreachable directories the tool + // will see. It was deleted completely so the tool will never see it. + .setUnreachableDirs(1) + .setUnreachableFiles(3) + .setUnreachableBytes(fileSize * 3L) + .build(); + } + + private void disconnectDirectory(String dirName) throws Exception { + OzoneManager leader = cluster.getOMLeader(); + Table dirTable = + leader.getMetadataManager().getDirectoryTable(); + try (TableIterator> iterator = + dirTable.iterator()) { + while (iterator.hasNext()) { + Table.KeyValue entry = iterator.next(); + String key = entry.getKey(); + if (key.contains(dirName)) { + dirTable.delete(key); + break; + } + } + } + } + + private void assertDisconnectedTreePartiallyReadable( + String volume, String bucket) throws Exception { + Path bucketPath = new Path("/" + volume + "/" + bucket); + Path dir1 = new Path(bucketPath, "dir1"); + Path file1 = new Path(dir1, "file1"); + Path file2 = new Path(dir1, "file2"); + + Path dir2 = new Path(bucketPath, "dir1/dir2"); + Path file3 = new Path(dir2, "file3"); + + Path dir3 = new Path(bucketPath, "dir3"); + Path file4 = new Path(bucketPath, "file4"); + + Assertions.assertFalse(fs.exists(dir1)); + Assertions.assertFalse(fs.exists(dir2)); + Assertions.assertTrue(fs.exists(dir3)); + Assertions.assertFalse(fs.exists(file1)); + Assertions.assertFalse(fs.exists(file2)); + Assertions.assertFalse(fs.exists(file3)); + Assertions.assertTrue(fs.exists(file4)); + } + + /** + * Checks that the disconnected tree's unreachable objects are correctly + * moved to the delete table. If the tree was written and deleted multiple + * times, it makes sure the delete entries with the same name are preserved. + */ + private void assertDisconnectedObjectsMarkedForDelete(int numWrites) + throws Exception { + + Map pendingDeleteDirCounts = new HashMap<>(); + + // Check deleted directory table. + OzoneManager leader = cluster.getOMLeader(); + Table deletedDirTable = + leader.getMetadataManager().getDeletedDirTable(); + try (TableIterator> iterator = + deletedDirTable.iterator()) { + while (iterator.hasNext()) { + Table.KeyValue entry = iterator.next(); + String key = entry.getKey(); + OmKeyInfo value = entry.getValue(); + + String dirName = key.split("/")[4]; + LOG.info("In deletedDirTable, extracting directory name {} from DB " + + "key {}", dirName, key); + + // Check that the correct dir info was added. + // FSO delete path will fill in the whole path to the key in the + // proto when it is deleted. Once the tree is disconnected that can't + // be done, so just make sure the dirName contained in the key name + // somewhere. + Assertions.assertTrue(value.getKeyName().contains(dirName)); + + int count = pendingDeleteDirCounts.getOrDefault(dirName, 0); + pendingDeleteDirCounts.put(dirName, count + 1); + } + } + + // 1 directory is disconnected in the tree. dir1 was totally deleted so + // the repair tool will not see it. + Assertions.assertEquals(1, pendingDeleteDirCounts.size()); + Assertions.assertEquals(numWrites, pendingDeleteDirCounts.get("dir2")); + + // Check that disconnected files were put in deleting tables. + Map pendingDeleteFileCounts = new HashMap<>(); + + Table deletedFileTable = + leader.getMetadataManager().getDeletedTable(); + try (TableIterator> iterator = + deletedFileTable.iterator()) { + while (iterator.hasNext()) { + Table.KeyValue entry = iterator.next(); + String key = entry.getKey(); + RepeatedOmKeyInfo value = entry.getValue(); + + String[] keyParts = key.split("/"); + String fileName = keyParts[keyParts.length - 1]; + + LOG.info("In deletedTable, extracting file name {} from DB " + + "key {}", fileName, key); + + for (OmKeyInfo fileInfo: value.getOmKeyInfoList()) { + // Check that the correct file info was added. + Assertions.assertTrue(fileInfo.getKeyName().contains(fileName)); + + int count = pendingDeleteFileCounts.getOrDefault(fileName, 0); + pendingDeleteFileCounts.put(fileName, count + 1); + } + } + } + + // 3 files are disconnected in the tree. + // TODO: dir2 ended up in here with count = 1. file3 also had count=1 + // Likely that the dir2/file3 entry got split in two. + Assertions.assertEquals(3, pendingDeleteFileCounts.size()); + Assertions.assertEquals(numWrites, pendingDeleteFileCounts.get("file1")); + Assertions.assertEquals(numWrites, pendingDeleteFileCounts.get("file2")); + Assertions.assertEquals(numWrites, pendingDeleteFileCounts.get("file3")); + } + + private void assertDeleteTablesEmpty() throws IOException { + OzoneManager leader = cluster.getOMLeader(); + Assertions.assertTrue(leader.getMetadataManager().getDeletedDirTable().isEmpty()); + Assertions.assertTrue(leader.getMetadataManager().getDeletedTable().isEmpty()); + } + + private void runDeletes() throws Exception { + OzoneManager leader = cluster.getOMLeader(); + + int i = 0; + while (!leader.getMetadataManager().getDeletedDirTable().isEmpty()) { + LOG.info("Running iteration {} of DirectoryDeletingService.", i++); + leader.getKeyManager().getDirDeletingService().runPeriodicalTaskNow(); + // Wait for work from this run to flush through the double buffer. + Thread.sleep(500); + } + + i = 0; + while (!leader.getMetadataManager().getDeletedTable().isEmpty()) { + LOG.info("Running iteration {} of KeyDeletingService.", i++); + leader.getKeyManager().getDeletingService().runPeriodicalTaskNow(); + // Wait for work from this run to flush through the double buffer. + Thread.sleep(500); + } + } + + private void assertFileAndDirTablesEmpty() throws Exception { + OzoneManager leader = cluster.getOMLeader(); + Assertions.assertTrue(leader.getMetadataManager().getDirectoryTable().isEmpty()); + Assertions.assertTrue(leader.getMetadataManager().getFileTable().isEmpty()); + } + + private DBStore getOmDB() { + return cluster.getOMLeader().getMetadataManager().getStore(); + } + + private String getOmDBLocation() { + return cluster.getOMLeader().getMetadataManager().getStore().getDbLocation().toString(); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java new file mode 100644 index 000000000000..35e3bd5936a2 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.om; + +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.ozone.repair.OzoneRepair; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Parser for scm.db file. + */ +@CommandLine.Command( + name = "fso-tree-repair", + description = "Identify and repair a disconnected FSO tree, and mark " + + "unreachable entries for deletion. OM should be " + + "stopped while this tool is run. Information will be logged at " + + "INFO and DEBUG levels." +) +@MetaInfServices(SubcommandWithParent.class) +public class FSORepairCLI implements Callable, SubcommandWithParent { + + @CommandLine.ParentCommand + private OzoneRepair parent; + + @CommandLine.Option(names = {"--db"}, + required = true, + description = "Path to OM RocksDB") + private String dbPath; + + @CommandLine.Option(names = {"--dry-run"}, + description = "Path to OM RocksDB") + private boolean dryRun; + + @CommandLine.Option(names = {"--verbose"}, + description = "More verbose output. ") + private boolean verbose; + + + @Override + public Void call() throws Exception { + try { + FSORepairTool + repairTool = new FSORepairTool(dbPath, dryRun); + repairTool.run(); + } catch (Exception ex) { + throw new IllegalArgumentException("FSO repair failed: " + ex.getMessage()); + } + + if (verbose) { + System.out.println("FSO repair finished. See client logs for results."); + } + + return null; + } + + @Override + public Class getParentType() { + return OzoneRepair.class; + } +} + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java new file mode 100644 index 000000000000..6ee551a6580d --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java @@ -0,0 +1,688 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair.om; + + +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.db.RocksDatabase; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.hdds.utils.db.TableConfig; +import org.apache.hadoop.hdds.utils.db.DBProfile; +import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; +import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.WithObjectID; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.ratis.util.Preconditions; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.Stack; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; +import static org.apache.hadoop.hdds.utils.db.DBStoreBuilder.HDDS_DEFAULT_DB_PROFILE; +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; + +/** + * Base Tool to identify disconnected FSO trees in all buckets. + * The tool will log information about unreachable files or directories. + * If deletes are still in progress (the deleted directory table is not empty), the tool may + * report that the tree is disconnected, even though pending deletes would + * fix the issue. + * + * Before using the tool, make sure all OMs are stopped, + * and that all Ratis logs have been flushed to the OM DB. This can be + * done using `ozone admin prepare` before running the tool, and `ozone admin + * cancelprepare` when done. + * + * The tool will run a DFS from each bucket, and save all reachable + * directories as keys in a new temporary RocksDB instance called "reachable.db" + * In the same directory as om.db. + * will then scan the entire file and directory tables for each bucket to see + * if each object's parent is in the reachable table of reachable.db. The + * reachable table will be dropped and recreated for each bucket. + * The tool is idempotent. reachable.db will not be deleted automatically + * when the tool finishes, in case users want to manually inspect it. It can + * be safely deleted once the tool finishes. + */ +public class FSORepairTool { + public static final Logger LOG = + LoggerFactory.getLogger(org.apache.hadoop.ozone.repair.om.FSORepairTool.class); + + private final String omDBPath; + + private final DBStore store; + private final Table volumeTable; + private final Table bucketTable; + private final Table directoryTable; + private final Table fileTable; + private final Table deletedDirectoryTable; + private final Table deletedTable; + // The temporary DB is used to track which items have been seen. + // Since usage of this DB is simple, use it directly from + // RocksDB. + private String reachableDBPath; + private static final String REACHABLE_TABLE = "reachable"; + private static final byte[] REACHABLE_TABLE_BYTES = + REACHABLE_TABLE.getBytes(StandardCharsets.UTF_8); + private ColumnFamilyHandle reachableCFHandle; + private RocksDatabase reachableDB; + + private long reachableBytes; + private long reachableFiles; + private long reachableDirs; + private long unreachableBytes; + private long unreachableFiles; + private long unreachableDirs; + private boolean dryRun; + + public FSORepairTool(String dbPath, boolean dryRun) throws IOException { + this(getStoreFromPath(dbPath), dbPath, dryRun); + } + + /** + * Allows passing RocksDB instance from a MiniOzoneCluster directly to this + * class for testing. + */ + @VisibleForTesting + public FSORepairTool(DBStore dbStore, String dbPath, boolean isDryRun) throws IOException { + dryRun = isDryRun; + // Counters to track as we walk the tree. + reachableBytes = 0; + reachableFiles = 0; + reachableDirs = 0; + unreachableBytes = 0; + unreachableFiles = 0; + unreachableDirs = 0; + + this.store = dbStore; + this.omDBPath = dbPath; + volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE, + String.class, + OmVolumeArgs.class); + bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE, + String.class, + OmBucketInfo.class); + directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE, + String.class, + OmDirectoryInfo.class); + fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE, + String.class, + OmKeyInfo.class); + deletedDirectoryTable = store.getTable( + OmMetadataManagerImpl.DELETED_DIR_TABLE, + String.class, + OmKeyInfo.class); + deletedTable = store.getTable( + OmMetadataManagerImpl.DELETED_TABLE, + String.class, + RepeatedOmKeyInfo.class); + } + + protected static DBStore getStoreFromPath(String dbPath) throws IOException { + File omDBFile = new File(dbPath); + if (!omDBFile.exists() || !omDBFile.isDirectory()) { + throw new IOException(String.format("Specified OM DB instance %s does " + + "not exist or is not a RocksDB directory.", dbPath)); + } + // Load RocksDB and tables needed. + return OmMetadataManagerImpl.loadDB(new OzoneConfiguration(), + new File(dbPath).getParentFile()); + } + + public org.apache.hadoop.ozone.repair.om.FSORepairTool.Report run() throws IOException { + // Iterate all volumes. + try (TableIterator> + volumeIterator = volumeTable.iterator()) { + openReachableDB(); + + while (volumeIterator.hasNext()) { + Table.KeyValue volumeEntry = + volumeIterator.next(); + String volumeKey = volumeEntry.getKey(); + + // Iterate all buckets in the volume. + try (TableIterator> + bucketIterator = bucketTable.iterator()) { + bucketIterator.seek(volumeKey); + while (bucketIterator.hasNext()) { + Table.KeyValue bucketEntry = + bucketIterator.next(); + String bucketKey = bucketEntry.getKey(); + OmBucketInfo bucketInfo = bucketEntry.getValue(); + + if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + LOG.debug("Skipping non-FSO bucket {}", bucketKey); + continue; + } + + // Stop this loop once we have seen all buckets in the current + // volume. + if (!bucketKey.startsWith(volumeKey)) { + break; + } + + // Start with a fresh list of reachable files for this bucket. + // Also clears partial state if the tool failed on a previous run. + dropReachableTableIfExists(); + createReachableTable(); + // Process one bucket's FSO tree at a time. + markReachableObjectsInBucket(volumeEntry.getValue(), bucketInfo); + handleUnreachableObjects(volumeEntry.getValue(), bucketInfo); + dropReachableTableIfExists(); + } + } + } + } finally { + closeReachableDB(); + } + + return buildReportAndLog(); + } + + private Report buildReportAndLog() { + Report report = new Report.Builder() + .setReachableDirs(reachableDirs) + .setReachableFiles(reachableFiles) + .setReachableBytes(reachableBytes) + .setUnreachableDirs(unreachableDirs) + .setUnreachableFiles(unreachableFiles) + .setUnreachableBytes(unreachableBytes) + .build(); + + LOG.info("\n{}", report); + return report; + } + + private void markReachableObjectsInBucket(OmVolumeArgs volume, + OmBucketInfo bucket) throws IOException { + LOG.info("Processing bucket {}", bucket.getBucketName()); + // Only put directories in the stack. + // Directory keys should have the form /volumeID/bucketID/parentID/name. + Stack dirKeyStack = new Stack<>(); + + // Since the tool uses parent directories to check for reachability, add + // a reachable entry for the bucket as well. + addReachableEntry(volume, bucket, bucket); + // Initialize the stack with all immediate child directories of the + // bucket, and mark them all as reachable. + Collection childDirs = + getChildDirectoriesAndMarkAsReachable(volume, bucket, bucket); + dirKeyStack.addAll(childDirs); + + while (!dirKeyStack.isEmpty()) { + // Get one directory and process its immediate children. + String currentDirKey = dirKeyStack.pop(); + OmDirectoryInfo currentDir = directoryTable.get(currentDirKey); + if (currentDir == null) { + LOG.error("Directory key {} to be processed was not found in the " + + "directory table", currentDirKey); + continue; + } + + // TODO revisit this for a more memory efficient implementation, + // possibly making better use of RocksDB iterators. + childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, + currentDir); + dirKeyStack.addAll(childDirs); + } + } + + private void handleUnreachableObjects(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { + // Check for unreachable directories in the bucket. + String bucketPrefix = OM_KEY_PREFIX + + volume.getObjectID() + + OM_KEY_PREFIX + + bucket.getObjectID(); + + try (TableIterator> dirIterator = + directoryTable.iterator()) { + dirIterator.seek(bucketPrefix); + while (dirIterator.hasNext()) { + Table.KeyValue dirEntry = dirIterator.next(); + String dirKey = dirEntry.getKey(); + + // Only search directories in this bucket. + if (!dirKey.startsWith(bucketPrefix)) { + break; + } + + if (!isReachable(dirKey)) { + LOG.debug("Found unreachable directory: {}", dirKey); + unreachableDirs++; + + if (dryRun) { + LOG.debug("Marking unreachable directory {} for deletion.", dirKey); + OmDirectoryInfo dirInfo = dirEntry.getValue(); + markDirectoryForDeletion(volume.getVolume(), bucket.getBucketName(), + dirKey, dirInfo); + } + } + } + } + + // Check for unreachable files + try (TableIterator> + fileIterator = fileTable.iterator()) { + fileIterator.seek(bucketPrefix); + while (fileIterator.hasNext()) { + Table.KeyValue fileEntry = fileIterator.next(); + String fileKey = fileEntry.getKey(); + // Only search files in this bucket. + if (!fileKey.startsWith(bucketPrefix)) { + break; + } + + OmKeyInfo fileInfo = fileEntry.getValue(); + if (!isReachable(fileKey)) { + LOG.debug("Found unreachable file: {}", fileKey); + unreachableBytes += fileInfo.getDataSize(); + unreachableFiles++; + + if (dryRun) { + LOG.debug("Marking unreachable file {} for deletion.", + fileKey); + markFileForDeletion(fileKey, fileInfo); + } + } else { + // NOTE: We are deserializing the proto of every reachable file + // just to log it's size. If we don't need this information we could + // save time by skipping this step. + reachableBytes += fileInfo.getDataSize(); + reachableFiles++; + } + } + } + } + + protected void markFileForDeletion(String fileKey, OmKeyInfo fileInfo) throws IOException { + try (BatchOperation batch = store.initBatchOperation()) { + fileTable.deleteWithBatch(batch, fileKey); + + RepeatedOmKeyInfo originalRepeatedKeyInfo = deletedTable.get(fileKey); + RepeatedOmKeyInfo updatedRepeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + fileInfo, fileInfo.getUpdateID(), true); + // NOTE: The FSO code seems to write the open key entry with the whole + // path, using the object's names instead of their ID. This would onyl + // be possible when the file is deleted explicitly, and not part of a + // directory delete. It is also not possible here if the file's parent + // is gone. The name of the key does not matter so just use IDs. + deletedTable.putWithBatch(batch, fileKey, updatedRepeatedOmKeyInfo); + + LOG.debug("Added entry {} to open key table: {}", + fileKey, updatedRepeatedOmKeyInfo); + + store.commitBatchOperation(batch); + } + } + + protected void markDirectoryForDeletion(String volumeName, String bucketName, + String dirKeyName, OmDirectoryInfo dirInfo) throws IOException { + try (BatchOperation batch = store.initBatchOperation()) { + directoryTable.deleteWithBatch(batch, dirKeyName); + // HDDS-7592: Make directory entries in deleted dir table unique. + String deleteDirKeyName = + dirKeyName + OM_KEY_PREFIX + dirInfo.getObjectID(); + + // Convert the directory to OmKeyInfo for deletion. + OmKeyInfo dirAsKeyInfo = OMFileRequest.getOmKeyInfo( + volumeName, bucketName, dirInfo, dirInfo.getName()); + deletedDirectoryTable.putWithBatch(batch, deleteDirKeyName, dirAsKeyInfo); + + store.commitBatchOperation(batch); + } + } + + private Collection getChildDirectoriesAndMarkAsReachable(OmVolumeArgs volume, + OmBucketInfo bucket, + WithObjectID currentDir) throws IOException { + + Collection childDirs = new ArrayList<>(); + + try (TableIterator> + dirIterator = directoryTable.iterator()) { + String dirPrefix = buildReachableKey(volume, bucket, currentDir); + // Start searching the directory table at the current directory's + // prefix to get its immediate children. + dirIterator.seek(dirPrefix); + while (dirIterator.hasNext()) { + Table.KeyValue childDirEntry = + dirIterator.next(); + String childDirKey = childDirEntry.getKey(); + // Stop processing once we have seen all immediate children of this + // directory. + if (!childDirKey.startsWith(dirPrefix)) { + break; + } + // This directory was reached by search. + addReachableEntry(volume, bucket, childDirEntry.getValue()); + childDirs.add(childDirKey); + reachableDirs++; + } + } + + return childDirs; + } + + /** + * Add the specified object to the reachable table, indicating it is part + * of the connected FSO tree. + */ + private void addReachableEntry(OmVolumeArgs volume, + OmBucketInfo bucket, WithObjectID object) throws IOException { + byte[] reachableKey = buildReachableKey(volume, bucket, object) + .getBytes(StandardCharsets.UTF_8); + // No value is needed for this table. + reachableDB.put(reachableCFHandle, reachableKey, new byte[]{}); + } + + /** + * Build an entry in the reachable table for the current object, which + * could be a bucket, file or directory. + */ + private static String buildReachableKey(OmVolumeArgs volume, + OmBucketInfo bucket, WithObjectID object) { + return OM_KEY_PREFIX + + volume.getObjectID() + + OM_KEY_PREFIX + + bucket.getObjectID() + + OM_KEY_PREFIX + + object.getObjectID(); + } + + /** + * + * @param fileOrDirKey The key of a file or directory in RocksDB. + * @return true if the entry's parent is in the reachable table. + */ + protected boolean isReachable(String fileOrDirKey) throws IOException { + byte[] reachableParentKey = + buildReachableParentKey(fileOrDirKey).getBytes(StandardCharsets.UTF_8); + + return reachableDB.get(reachableCFHandle, reachableParentKey, REACHABLE_TABLE) != null; + } + + /** + * Build an entry in the reachable table for the current object's parent + * object. The object could be a file or directory. + */ + private static String buildReachableParentKey(String fileOrDirKey) { + String[] keyParts = fileOrDirKey.split(OM_KEY_PREFIX); + // Should be /volID/bucketID/parentID/name + // The first part will be blank since key begins with a slash. + Preconditions.assertTrue(keyParts.length >= 4); + String volumeID = keyParts[1]; + String bucketID = keyParts[2]; + String parentID = keyParts[3]; + + return OM_KEY_PREFIX + + volumeID + + OM_KEY_PREFIX + + bucketID + + OM_KEY_PREFIX + + parentID; + } + + private void openReachableDB() throws IOException { + File reachableDBFile = new File(new File(omDBPath).getParentFile(), + "reachable.db"); + LOG.info("Creating database of reachable directories at {}", + reachableDBFile); + // Delete the DB from the last run if it exists. + if (reachableDBFile.exists()) { + FileUtils.deleteDirectory(reachableDBFile); + } + reachableDBPath = reachableDBFile.toString(); + reachableDB = buildReachableRocksDB(reachableDBFile); + } + + private RocksDatabase buildReachableRocksDB(File reachableDBFile) throws IOException { + DBProfile profile = new OzoneConfiguration().getEnum(HDDS_DB_PROFILE, HDDS_DEFAULT_DB_PROFILE); + Set tableConfigs = new HashSet<>(); + tableConfigs.add(new TableConfig("default", profile.getColumnFamilyOptions())); + + return RocksDatabase.open(reachableDBFile, + profile.getDBOptions(), + new ManagedWriteOptions(), + tableConfigs, false); + } + + private void closeReachableDB() { + if (reachableDB != null) { + reachableDB.close(); + } + } + + private void dropReachableTableIfExists() throws IOException { + try { + List + availableCFs = reachableDB.listColumnFamiliesEmptyOptions(reachableDBPath); + boolean cfFound = false; + for (byte[] cfNameBytes: availableCFs) { + if (new String(cfNameBytes, UTF_8).equals(new String(REACHABLE_TABLE_BYTES, UTF_8))) { + cfFound = true; + break; + } + } + + if (cfFound) { + reachableDB.dropColumnFamily(reachableCFHandle); + } + } catch (RocksDBException ex) { + throw new IOException(ex.getMessage(), ex); + } finally { + if (reachableCFHandle != null) { + reachableCFHandle.close(); + } + } + } + + private void createReachableTable() throws IOException { + reachableCFHandle = reachableDB.createColumnFamily( + new ColumnFamilyDescriptor(REACHABLE_TABLE_BYTES)); + } + + /** + * Define a Report to be created. + */ + public static class Report { + private long reachableBytes; + private long reachableFiles; + private long reachableDirs; + private long unreachableBytes; + private long unreachableFiles; + private long unreachableDirs; + + /** + * Builds one report that is the aggregate of multiple others. + */ + public Report(org.apache.hadoop.ozone.repair.om.FSORepairTool.Report... reports) { + reachableBytes = 0; + reachableFiles = 0; + reachableDirs = 0; + unreachableBytes = 0; + unreachableFiles = 0; + unreachableDirs = 0; + + for (org.apache.hadoop.ozone.repair.om.FSORepairTool.Report report: reports) { + reachableBytes += report.reachableBytes; + reachableFiles += report.reachableFiles; + reachableDirs += report.reachableDirs; + unreachableBytes += report.unreachableBytes; + unreachableFiles += report.unreachableFiles; + unreachableDirs += report.unreachableDirs; + } + } + + private Report(org.apache.hadoop.ozone.repair.om.FSORepairTool.Report.Builder builder) { + reachableBytes = builder.reachableBytes; + reachableFiles = builder.reachableFiles; + reachableDirs = builder.reachableDirs; + unreachableBytes = builder.unreachableBytes; + unreachableFiles = builder.unreachableFiles; + unreachableDirs = builder.unreachableDirs; + } + + public long getReachableBytes() { + return reachableBytes; + } + + public long getReachableFiles() { + return reachableFiles; + } + + public long getReachableDirs() { + return reachableDirs; + } + + public long getUnreachableBytes() { + return unreachableBytes; + } + + public long getUnreachableFiles() { + return unreachableFiles; + } + + public long getUnreachableDirs() { + return unreachableDirs; + } + + @Override + public String toString() { + return "Reachable:" + + "\n\tDirectories: " + reachableDirs + + "\n\tFiles: " + reachableFiles + + "\n\tBytes: " + reachableBytes + + "\nUnreachable:" + + "\n\tDirectories: " + unreachableDirs + + "\n\tFiles: " + unreachableFiles + + "\n\tBytes: " + unreachableBytes; + } + + @Override + public boolean equals(Object other) { + if (other == this) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + FSORepairTool.Report report = (FSORepairTool.Report) other; + + // Useful for testing. + LOG.debug("Comparing reports\nExpect:\n{}\nActual:\n{}", this, report); + + return reachableBytes == report.reachableBytes && + reachableFiles == report.reachableFiles && + reachableDirs == report.reachableDirs && + unreachableBytes == report.unreachableBytes && + unreachableFiles == report.unreachableFiles && + unreachableDirs == report.unreachableDirs; + } + + @Override + public int hashCode() { + return Objects.hash(reachableBytes, + reachableFiles, + reachableDirs, + unreachableBytes, + unreachableFiles, + unreachableDirs); + } + + /** + * Builder class for a Report. + */ + public static final class Builder { + private long reachableBytes; + private long reachableFiles; + private long reachableDirs; + private long unreachableBytes; + private long unreachableFiles; + private long unreachableDirs; + + public Builder() { + } + + @SuppressWarnings("checkstyle:hiddenfield") + public Builder setReachableBytes(long reachableBytes) { + this.reachableBytes = reachableBytes; + return this; + } + + @SuppressWarnings("checkstyle:hiddenfield") + public Builder setReachableFiles(long reachableFiles) { + this.reachableFiles = reachableFiles; + return this; + } + + @SuppressWarnings("checkstyle:hiddenfield") + public Builder setReachableDirs(long reachableDirs) { + this.reachableDirs = reachableDirs; + return this; + } + + @SuppressWarnings("checkstyle:hiddenfield") + public Builder setUnreachableBytes(long unreachableBytes) { + this.unreachableBytes = unreachableBytes; + return this; + } + + @SuppressWarnings("checkstyle:hiddenfield") + public Builder setUnreachableFiles(long unreachableFiles) { + this.unreachableFiles = unreachableFiles; + return this; + } + + @SuppressWarnings("checkstyle:hiddenfield") + public Builder setUnreachableDirs(long unreachableDirs) { + this.unreachableDirs = unreachableDirs; + return this; + } + + public Report build() { + return new Report(this); + } + } + } +}