diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java index 349a15da3a56..5b9d2f7aabdf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/TarContainerPacker.java @@ -84,11 +84,13 @@ public byte[] unpackContainerData(Container container, if (name.startsWith(DB_DIR_NAME + "/")) { Path destinationPath = dbRoot .resolve(name.substring(DB_DIR_NAME.length() + 1)); - extractEntry(archiveInput, size, dbRoot, destinationPath); + extractEntry(entry, archiveInput, size, dbRoot, + destinationPath); } else if (name.startsWith(CHUNKS_DIR_NAME + "/")) { Path destinationPath = chunksRoot .resolve(name.substring(CHUNKS_DIR_NAME.length() + 1)); - extractEntry(archiveInput, size, chunksRoot, destinationPath); + extractEntry(entry, archiveInput, size, chunksRoot, + destinationPath); } else if (CONTAINER_FILE_NAME.equals(name)) { //Don't do anything. Container file should be unpacked in a //separated step by unpackContainerDescriptor call. @@ -109,27 +111,32 @@ public byte[] unpackContainerData(Container container, } } - private void extractEntry(InputStream input, long size, - Path ancestor, Path path) throws IOException { + private void extractEntry(ArchiveEntry entry, InputStream input, long size, + Path ancestor, Path path) throws IOException { HddsUtils.validatePath(path, ancestor); - Path parent = path.getParent(); - if (parent != null) { - Files.createDirectories(parent); - } - try (OutputStream fileOutput = new FileOutputStream(path.toFile()); - OutputStream output = new BufferedOutputStream(fileOutput)) { - int bufferSize = 1024; - byte[] buffer = new byte[bufferSize + 1]; - long remaining = size; - while (remaining > 0) { - int len = (int) Math.min(remaining, bufferSize); - int read = input.read(buffer, 0, len); - if (read >= 0) { - remaining -= read; - output.write(buffer, 0, read); - } else { - remaining = 0; + if (entry.isDirectory()) { + Files.createDirectories(path); + } else { + Path parent = path.getParent(); + if (parent != null) { + Files.createDirectories(parent); + } + + try (OutputStream fileOutput = new FileOutputStream(path.toFile()); + OutputStream output = new BufferedOutputStream(fileOutput)) { + int bufferSize = 1024; + byte[] buffer = new byte[bufferSize + 1]; + long remaining = size; + while (remaining > 0) { + int len = (int) Math.min(remaining, bufferSize); + int read = input.read(buffer, 0, len); + if (read >= 0) { + remaining -= read; + output.write(buffer, 0, read); + } else { + remaining = 0; + } } } } @@ -209,6 +216,12 @@ private byte[] readEntry(InputStream input, final long size) private void includePath(Path dir, String subdir, ArchiveOutputStream archiveOutput) throws IOException { + // Add a directory entry before adding files, in case the directory is + // empty. + ArchiveEntry entry = archiveOutput.createArchiveEntry(dir.toFile(), subdir); + archiveOutput.putArchiveEntry(entry); + + // Add files in the directory. try (Stream dirEntries = Files.list(dir)) { for (Path path : dirEntries.collect(toList())) { String entryName = subdir + "/" + path.getFileName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 72cc8c313bb3..b677713ca228 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -19,6 +19,7 @@ import java.io.File; import java.io.IOException; +import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.util.List; @@ -258,6 +259,12 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, initializeUsedBytesAndBlockCount(store, kvContainerData); } + // If the container is missing a chunks directory, possibly due to the + // bug fixed by HDDS-6235, create it here. + File chunksDir = new File(kvContainerData.getChunksPath()); + if (!chunksDir.exists()) { + Files.createDirectories(chunksDir.toPath()); + } // Run advanced container inspection/repair operations if specified on // startup. If this method is called but not as a part of startup, // The inspectors will be unloaded and this will be a no-op. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 161657257734..20fa8835f3a2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; import org.apache.hadoop.ozone.container.metadata.AbstractDatanodeStore; import org.apache.hadoop.ozone.container.metadata.DatanodeStore; import org.apache.ozone.test.GenericTestUtils; @@ -66,6 +67,8 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; +import java.nio.file.Files; +import java.nio.file.Path; import java.util.HashMap; import java.util.Map; import java.util.List; @@ -74,6 +77,7 @@ import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.IntStream; +import java.util.stream.Stream; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE; import static org.apache.ratis.util.Preconditions.assertTrue; @@ -154,6 +158,55 @@ public void testCreateContainer() throws Exception { "DB does not exist"); } + /** + * Tests repair of containers affected by the bug reported in HDDS-6235. + */ + @Test + public void testMissingChunksDirCreated() throws Exception { + // Create an empty container and delete its chunks directory. + createContainer(); + closeContainer(); + // Sets the checksum. + populate(0); + KeyValueContainerData data = keyValueContainer.getContainerData(); + File chunksDir = new File(data.getChunksPath()); + Assert.assertTrue(chunksDir.delete()); + + // When the container is loaded, the missing chunks directory should + // be created. + KeyValueContainerUtil.parseKVContainerData(data, CONF); + Assert.assertTrue(chunksDir.exists()); + } + + @Test + public void testEmptyContainerImportExport() throws Exception { + createContainer(); + closeContainer(); + + KeyValueContainerData data = keyValueContainer.getContainerData(); + + // Check state of original container. + checkContainerFilesPresent(data, 0); + + //destination path + File exportTar = folder.newFile("exported.tar.gz"); + TarContainerPacker packer = new TarContainerPacker(); + //export the container + try (FileOutputStream fos = new FileOutputStream(exportTar)) { + keyValueContainer.exportContainerData(fos, packer); + } + + keyValueContainer.delete(); + + // import container. + try (FileInputStream fis = new FileInputStream(exportTar)) { + keyValueContainer.importContainerData(fis, packer); + } + + // Make sure empty chunks dir was unpacked. + checkContainerFilesPresent(data, 0); + } + @Test public void testContainerImportExport() throws Exception { long containerId = keyValueContainer.getContainerData().getContainerID(); @@ -244,6 +297,18 @@ public void testContainerImportExport() throws Exception { } } + private void checkContainerFilesPresent(KeyValueContainerData data, + long expectedNumFilesInChunksDir) throws IOException { + File chunksDir = new File(data.getChunksPath()); + Assert.assertTrue(Files.isDirectory(chunksDir.toPath())); + try (Stream stream = Files.list(chunksDir.toPath())) { + Assert.assertEquals(expectedNumFilesInChunksDir, stream.count()); + } + Assert.assertTrue(data.getDbFile().exists()); + Assert.assertTrue(KeyValueContainer.getContainerFile(data.getMetadataPath(), + data.getContainerID()).exists()); + } + /** * Create the container on disk. */ diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java index 16e3f2d27f46..9656c280b4ca 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMetadataInspector.java @@ -22,7 +22,6 @@ import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonPrimitive; -import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.interfaces.ContainerInspector; @@ -37,8 +36,6 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; -import java.io.File; - /** * Tests for {@link KeyValueContainerMetadataInspector}. */ @@ -107,39 +104,6 @@ public void testSystemPropertyAndReadOnly() { System.clearProperty(KeyValueContainerMetadataInspector.SYSTEM_PROPERTY); } - @Test - public void testMissingChunksDir() throws Exception { - // Create container with missing chunks dir. - // The metadata in the DB will not be set in this fake container. - KeyValueContainer container = createClosedContainer(0); - KeyValueContainerData containerData = container.getContainerData(); - String chunksDirStr = containerData.getChunksPath(); - File chunksDirFile = new File(chunksDirStr); - FileUtils.deleteDirectory(chunksDirFile); - Assert.assertFalse(chunksDirFile.exists()); - - // In inspect mode, missing chunks dir should be detected but not fixed. - JsonObject inspectJson = runInspectorAndGetReport(containerData, - KeyValueContainerMetadataInspector.Mode.INSPECT); - // The block count and used bytes should be null in this container, but - // because it has no block keys that should not be an error. - Assert.assertEquals(1, - inspectJson.getAsJsonArray("errors").size()); - checkJsonErrorsReport(inspectJson, "chunksDirectory.present", - new JsonPrimitive(true), new JsonPrimitive(false), false); - Assert.assertFalse(chunksDirFile.exists()); - - // In repair mode, missing chunks dir should be detected and fixed. - JsonObject repairJson = runInspectorAndGetReport(containerData, - KeyValueContainerMetadataInspector.Mode.REPAIR); - Assert.assertEquals(1, - inspectJson.getAsJsonArray("errors").size()); - checkJsonErrorsReport(repairJson, "chunksDirectory.present", - new JsonPrimitive(true), new JsonPrimitive(false), true); - Assert.assertTrue(chunksDirFile.exists()); - Assert.assertTrue(chunksDirFile.isDirectory()); - } - @Test public void testIncorrectTotalsNoData() throws Exception { int createBlocks = 0;