diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index a48a7a33a7da..0207fec264d0 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -104,7 +104,6 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ShutdownHookManager; -import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -132,7 +131,7 @@ private HddsServerUtil() { * @param server RPC server to which the protocol and implementation is added to */ public static void addPBProtocol(Configuration conf, Class protocol, - BlockingService service, RPC.Server server) throws IOException { + BlockingService service, RPC.Server server) { RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class); server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service); } @@ -670,22 +669,6 @@ public static UserGroupInformation getRemoteUser() throws IOException { return (ugi != null) ? ugi : UserGroupInformation.getCurrentUser(); } - /** - * Converts RocksDB exception to IOE. - * @param msg - Message to add to exception. - * @param e - Original Exception. - * @return IOE. - */ - public static IOException toIOException(String msg, RocksDBException e) { - String statusCode = e.getStatus() == null ? "N/A" : - e.getStatus().getCodeString(); - String errMessage = e.getMessage() == null ? "Unknown error" : - e.getMessage(); - String output = msg + "; status : " + statusCode - + "; message : " + errMessage; - return new IOException(output, e); - } - /** * Add exception classes which server won't log at all. * @param server diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java index 21b3ad606176..7c494b12b704 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java @@ -17,11 +17,8 @@ package org.apache.hadoop.hdds.utils.db; -import static org.apache.hadoop.hdds.utils.HddsServerUtil.toIOException; - import com.google.common.base.Preconditions; import java.io.File; -import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.List; @@ -53,7 +50,7 @@ public final class DBConfigFromFile { private DBConfigFromFile() { } - public static File getConfigLocation() throws IOException { + public static File getConfigLocation() { String path = System.getenv(CONFIG_DIR); // Make testing easy. @@ -109,10 +106,9 @@ public static String getOptionsFileNameFromDB(String dbFileName) { * @param dbFileName - The DB File Name, for example, OzoneManager.db. * @param cfDescs - ColumnFamily Handles. * @return DBOptions, Options to be used for opening/creating the DB. - * @throws IOException */ public static ManagedDBOptions readFromFile(String dbFileName, - List cfDescs) throws IOException { + List cfDescs) throws RocksDatabaseException { Preconditions.checkNotNull(dbFileName); Preconditions.checkNotNull(cfDescs); Preconditions.checkArgument(!cfDescs.isEmpty()); @@ -133,7 +129,7 @@ public static ManagedDBOptions readFromFile(String dbFileName, env, options, cfDescs, true); } catch (RocksDBException rdEx) { - throw toIOException("Unable to find/open Options file.", rdEx); + throw new RocksDatabaseException("Failed to loadOptionsFromFile " + optionsFile, rdEx); } } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileLoader.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileLoader.java index 8adf8a445260..49dd9e02d233 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileLoader.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileLoader.java @@ -17,28 +17,18 @@ package org.apache.hadoop.hdds.utils.db; -import java.io.Closeable; import java.io.File; -import java.io.IOException; import java.util.Collections; import org.apache.hadoop.hdds.utils.db.RocksDatabase.ColumnFamily; import org.apache.hadoop.hdds.utils.db.managed.ManagedIngestExternalFileOptions; /** - * DumpFileLoader using rocksdb sst files. + * Load rocksdb sst files. */ -public class RDBSstFileLoader implements DumpFileLoader, Closeable { +final class RDBSstFileLoader { + private RDBSstFileLoader() { } - private final RocksDatabase db; - private final ColumnFamily family; - - public RDBSstFileLoader(RocksDatabase db, ColumnFamily cf) { - this.db = db; - this.family = cf; - } - - @Override - public void load(File externalFile) throws IOException { + static void load(RocksDatabase db, ColumnFamily family, File externalFile) throws RocksDatabaseException { // Ingest an empty sst file results in exception. if (externalFile.length() == 0) { return; @@ -51,8 +41,4 @@ public void load(File externalFile) throws IOException { ingestOptions); } } - - @Override - public void close() { - } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java index b5e00fcf8359..e84854cae443 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBSstFileWriter.java @@ -17,11 +17,8 @@ package org.apache.hadoop.hdds.utils.db; -import static org.apache.hadoop.hdds.utils.HddsServerUtil.toIOException; - import java.io.Closeable; import java.io.File; -import java.io.IOException; import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.hdds.utils.db.managed.ManagedEnvOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedOptions; @@ -31,7 +28,7 @@ /** * DumpFileWriter using rocksdb sst files. */ -public class RDBSstFileWriter implements DumpFileWriter, Closeable { +class RDBSstFileWriter implements Closeable { private ManagedSstFileWriter sstFileWriter; private File sstFile; @@ -39,38 +36,32 @@ public class RDBSstFileWriter implements DumpFileWriter, Closeable { private ManagedOptions emptyOption = new ManagedOptions(); private final ManagedEnvOptions emptyEnvOptions = new ManagedEnvOptions(); - public RDBSstFileWriter() { + RDBSstFileWriter(File externalFile) throws RocksDatabaseException { this.sstFileWriter = new ManagedSstFileWriter(emptyEnvOptions, emptyOption); this.keyCounter = new AtomicLong(0); - } - - @Override - public void open(File externalFile) throws IOException { this.sstFile = externalFile; try { // Here will create a new sst file each time, not append to existing sstFileWriter.open(sstFile.getAbsolutePath()); } catch (RocksDBException e) { closeOnFailure(); - throw toIOException("Failed to open external file for dump " - + sstFile.getAbsolutePath(), e); + throw new RocksDatabaseException("Failed to open " + sstFile, e); } } - @Override - public void put(byte[] key, byte[] value) throws IOException { + public void put(byte[] key, byte[] value) throws RocksDatabaseException { try { sstFileWriter.put(key, value); keyCounter.incrementAndGet(); } catch (RocksDBException e) { closeOnFailure(); - throw toIOException("Failed to put kv into dump file " - + sstFile.getAbsolutePath(), e); + throw new RocksDatabaseException("Failed to put key (length=" + key.length + + ") and value (length=" + value.length + "), sstFile=" + sstFile.getAbsolutePath(), e); } } @Override - public void close() throws IOException { + public void close() throws RocksDatabaseException { if (sstFileWriter != null) { try { // We should check for empty sst file, or we'll get exception. @@ -78,8 +69,7 @@ public void close() throws IOException { sstFileWriter.finish(); } } catch (RocksDBException e) { - throw toIOException("Failed to finish dumping into file " - + sstFile.getAbsolutePath(), e); + throw new RocksDatabaseException("Failed to finish writing to " + sstFile, e); } finally { closeResources(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index 64b63d1814ec..0e7ee39db0a6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -95,6 +95,7 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics stati this.dbOptions = dbOptions; this.statistics = statistics; + Exception exception = null; try { if (enableCompactionDag) { rocksDBCheckpointDiffer = RocksDBCheckpointDifferHolder.getInstance( @@ -171,16 +172,20 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics stati checkPointManager = new RDBCheckpointManager(db, dbLocation.getName()); rdbMetrics = RDBMetrics.create(); + } catch (RuntimeException e) { + exception = e; + throw new IllegalStateException("Failed to create RDBStore from " + dbFile, e); } catch (Exception e) { - // Close DB and other things if got initialized. - close(); - String msg = "Failed init RocksDB, db path : " + dbFile.getAbsolutePath() - + ", " + "exception :" + (e.getCause() == null ? - e.getClass().getCanonicalName() + " " + e.getMessage() : - e.getCause().getClass().getCanonicalName() + " " + - e.getCause().getMessage()); - - throw new IOException(msg, e); + exception = e; + throw new IOException("Failed to create RDBStore from " + dbFile, e); + } finally { + if (exception != null) { + try { + close(); + } catch (IOException e) { + exception.addSuppressed(e); + } + } } if (LOG.isDebugEnabled()) { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java index 9ce8e03f69a4..0f172027f0cf 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java @@ -276,10 +276,8 @@ public void deleteBatchWithPrefix(BatchOperation batch, byte[] prefix) @Override public void dumpToFileWithPrefix(File externalFile, byte[] prefix) throws IOException { - try (TableIterator> iter - = iterator(prefix); - DumpFileWriter fileWriter = new RDBSstFileWriter()) { - fileWriter.open(externalFile); + try (TableIterator> iter = iterator(prefix); + RDBSstFileWriter fileWriter = new RDBSstFileWriter(externalFile)) { while (iter.hasNext()) { final KeyValue entry = iter.next(); fileWriter.put(entry.getKey(), entry.getValue()); @@ -288,10 +286,8 @@ public void dumpToFileWithPrefix(File externalFile, byte[] prefix) } @Override - public void loadFromFile(File externalFile) throws IOException { - try (DumpFileLoader fileLoader = new RDBSstFileLoader(db, family)) { - fileLoader.load(externalFile); - } + public void loadFromFile(File externalFile) throws RocksDatabaseException { + RDBSstFileLoader.load(db, family, externalFile); } private List> getRangeKVs(byte[] startKey, diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java index 9ef3710b8aa3..4ee7dcef6516 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabase.java @@ -25,7 +25,6 @@ import com.google.common.annotations.VisibleForTesting; import java.io.Closeable; import java.io.File; -import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Path; import java.util.ArrayList; @@ -42,7 +41,6 @@ import java.util.stream.Collectors; import java.util.stream.Stream; import org.apache.hadoop.hdds.StringUtils; -import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.utils.db.managed.ManagedCheckpoint; import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions; import org.apache.hadoop.hdds.utils.db.managed.ManagedCompactRangeOptions; @@ -92,8 +90,8 @@ static String bytes2String(ByteBuffer bytes) { return StringCodec.get().decode(bytes); } - static IOException toIOException(Object name, String op, RocksDBException e) { - return HddsServerUtil.toIOException(name + ": Failed to " + op, e); + static RocksDatabaseException toRocksDatabaseException(Object name, String op, RocksDBException e) { + return new RocksDatabaseException(name + ": Failed to " + op, e); } /** @@ -140,7 +138,7 @@ public static List listColumnFamiliesEmptyOptions(final String path) static RocksDatabase open(File dbFile, ManagedDBOptions dbOptions, ManagedWriteOptions writeOptions, Set families, - boolean readOnly) throws IOException { + boolean readOnly) throws RocksDatabaseException { List descriptors = null; ManagedRocksDB db = null; final Map columnFamilies = new HashMap<>(); @@ -162,7 +160,7 @@ static RocksDatabase open(File dbFile, ManagedDBOptions dbOptions, return new RocksDatabase(dbFile, db, dbOptions, writeOptions, descriptors, handles); } catch (RocksDBException e) { close(columnFamilies, db, descriptors, writeOptions, dbOptions); - throw toIOException(RocksDatabase.class, "open " + dbFile, e); + throw toRocksDatabaseException(RocksDatabase.class, "open " + dbFile, e); } } @@ -229,21 +227,21 @@ private RocksCheckpoint() { this.checkpoint = ManagedCheckpoint.create(db); } - public void createCheckpoint(Path path) throws IOException { + public void createCheckpoint(Path path) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { checkpoint.get().createCheckpoint(path.toString()); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "createCheckpoint " + path, e); + throw toRocksDatabaseException(this, "createCheckpoint " + path, e); } } - public long getLatestSequenceNumber() throws IOException { + public long getLatestSequenceNumber() throws RocksDatabaseException { return RocksDatabase.this.getLatestSequenceNumber(); } @Override - public void close() throws IOException { + public void close() throws RocksDatabaseException { checkpoint.close(); } } @@ -279,16 +277,16 @@ public ColumnFamilyHandle getHandle() { } public void batchDelete(ManagedWriteBatch writeBatch, byte[] key) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { writeBatch.delete(getHandle(), key); } catch (RocksDBException e) { - throw toIOException(this, "batchDelete key " + bytes2String(key), e); + throw toRocksDatabaseException(this, "batchDelete key " + bytes2String(key), e); } } public void batchPut(ManagedWriteBatch writeBatch, byte[] key, byte[] value) - throws IOException { + throws RocksDatabaseException { if (LOG.isDebugEnabled()) { LOG.debug("batchPut array key {}", bytes2String(key)); LOG.debug("batchPut array value {}", bytes2String(value)); @@ -297,12 +295,12 @@ public void batchPut(ManagedWriteBatch writeBatch, byte[] key, byte[] value) try (UncheckedAutoCloseable ignored = acquire()) { writeBatch.put(getHandle(), key, value); } catch (RocksDBException e) { - throw toIOException(this, "batchPut key " + bytes2String(key), e); + throw toRocksDatabaseException(this, "batchPut key " + bytes2String(key), e); } } public void batchPut(ManagedWriteBatch writeBatch, ByteBuffer key, - ByteBuffer value) throws IOException { + ByteBuffer value) throws RocksDatabaseException { if (LOG.isDebugEnabled()) { LOG.debug("batchPut buffer key {}", bytes2String(key.duplicate())); LOG.debug("batchPut buffer value size {}", value.remaining()); @@ -311,18 +309,18 @@ public void batchPut(ManagedWriteBatch writeBatch, ByteBuffer key, try (UncheckedAutoCloseable ignored = acquire()) { writeBatch.put(getHandle(), key.duplicate(), value); } catch (RocksDBException e) { - throw toIOException(this, "batchPut ByteBuffer key " + throw toRocksDatabaseException(this, "batchPut ByteBuffer key " + bytes2String(key), e); } } - private UncheckedAutoCloseable acquire() throws IOException { + private UncheckedAutoCloseable acquire() throws RocksDatabaseException { if (isClosed.get()) { - throw new IOException("Rocks Database is closed"); + throw new RocksDatabaseException("Rocks Database is closed"); } if (counter.getAndIncrement() < 0) { counter.getAndDecrement(); - throw new IOException("Rocks Database is closed"); + throw new RocksDatabaseException("Rocks Database is closed"); } return counter::getAndDecrement; } @@ -433,19 +431,19 @@ private boolean shouldClose(RocksDBException e) { } } - private UncheckedAutoCloseable acquire() throws IOException { + private UncheckedAutoCloseable acquire() throws RocksDatabaseException { if (isClosed()) { - throw new IOException("Rocks Database is closed"); + throw new RocksDatabaseException("Rocks Database is closed"); } if (counter.getAndIncrement() < 0) { counter.getAndDecrement(); - throw new IOException("Rocks Database is closed"); + throw new RocksDatabaseException("Rocks Database is closed"); } return counter::getAndDecrement; } public void ingestExternalFile(ColumnFamily family, List files, - ManagedIngestExternalFileOptions ingestOptions) throws IOException { + ManagedIngestExternalFileOptions ingestOptions) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().ingestExternalFile(family.getHandle(), files, ingestOptions); } catch (RocksDBException e) { @@ -453,31 +451,31 @@ public void ingestExternalFile(ColumnFamily family, List files, String msg = "Failed to ingest external files " + files.stream().collect(Collectors.joining(", ")) + " of " + family.getName(); - throw toIOException(this, msg, e); + throw toRocksDatabaseException(this, msg, e); } } public void put(ColumnFamily family, byte[] key, byte[] value) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().put(family.getHandle(), writeOptions, key, value); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "put " + bytes2String(key), e); + throw toRocksDatabaseException(this, "put " + bytes2String(key), e); } } public void put(ColumnFamily family, ByteBuffer key, ByteBuffer value) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().put(family.getHandle(), writeOptions, key, value); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "put " + bytes2String(key), e); + throw toRocksDatabaseException(this, "put " + bytes2String(key), e); } } - public void flush() throws IOException { + public void flush() throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire(); ManagedFlushOptions options = new ManagedFlushOptions()) { options.setWaitForFlush(true); @@ -487,14 +485,14 @@ public void flush() throws IOException { } } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "flush", e); + throw toRocksDatabaseException(this, "flush", e); } } /** * @param cfName columnFamily on which flush will run. */ - public void flush(String cfName) throws IOException { + public void flush(String cfName) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { ColumnFamilyHandle handle = getColumnFamilyHandle(cfName); try (ManagedFlushOptions options = new ManagedFlushOptions()) { @@ -508,40 +506,40 @@ public void flush(String cfName) throws IOException { } } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "flush", e); + throw toRocksDatabaseException(this, "flush", e); } } } - public void flushWal(boolean sync) throws IOException { + public void flushWal(boolean sync) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().flushWal(sync); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "flushWal with sync=" + sync, e); + throw toRocksDatabaseException(this, "flushWal with sync=" + sync, e); } } - public void compactRange() throws IOException { + public void compactRange() throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().compactRange(); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "compactRange", e); + throw toRocksDatabaseException(this, "compactRange", e); } } public void compactRangeDefault(final ManagedCompactRangeOptions options) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().compactRange(null, null, null, options); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "compactRange", e); + throw toRocksDatabaseException(this, "compactRange", e); } } - public void compactDB(ManagedCompactRangeOptions options) throws IOException { + public void compactDB(ManagedCompactRangeOptions options) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { compactRangeDefault(options); for (RocksDatabase.ColumnFamily columnFamily @@ -551,7 +549,7 @@ public void compactDB(ManagedCompactRangeOptions options) throws IOException { } } - public int getLiveFilesMetaDataSize() throws IOException { + public int getLiveFilesMetaDataSize() throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return db.get().getLiveFilesMetaData().size(); } @@ -560,7 +558,7 @@ public int getLiveFilesMetaDataSize() throws IOException { /** * @param cfName columnFamily on which compaction will run. */ - public void compactRange(String cfName) throws IOException { + public void compactRange(String cfName) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { ColumnFamilyHandle handle = getColumnFamilyHandle(cfName); try { @@ -573,7 +571,7 @@ public void compactRange(String cfName) throws IOException { } } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "compactRange", e); + throw toRocksDatabaseException(this, "compactRange", e); } } } @@ -585,16 +583,16 @@ private ColumnFamilyHandle getColumnFamilyHandle(String columnFamilyName) { public void compactRange(ColumnFamily family, final byte[] begin, final byte[] end, final ManagedCompactRangeOptions options) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().compactRange(family.getHandle(), begin, end, options); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "compactRange", e); + throw toRocksDatabaseException(this, "compactRange", e); } } - public List getLiveFilesMetaData() throws IOException { + public List getLiveFilesMetaData() throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return db.get().getLiveFilesMetaData(); } @@ -619,7 +617,7 @@ RocksCheckpoint createCheckpoint() { * @see org.rocksdb.RocksDB#keyMayExist(ColumnFamilyHandle, byte[], Holder) */ Supplier keyMayExist(ColumnFamily family, byte[] key) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { final Holder out = new Holder<>(); return db.get().keyMayExist(family.getHandle(), key, out) ? @@ -628,7 +626,7 @@ Supplier keyMayExist(ColumnFamily family, byte[] key) } Supplier keyMayExist(ColumnFamily family, - ByteBuffer key, ByteBuffer out) throws IOException { + ByteBuffer key, ByteBuffer out) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { final KeyMayExist result = db.get().keyMayExist( family.getHandle(), key, out); @@ -651,13 +649,13 @@ public Collection getExtraColumnFamilies() { return Collections.unmodifiableCollection(columnFamilies.values()); } - byte[] get(ColumnFamily family, byte[] key) throws IOException { + byte[] get(ColumnFamily family, byte[] key) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return db.get().get(family.getHandle(), key); } catch (RocksDBException e) { closeOnError(e); final String message = "get " + bytes2String(key) + " from " + family; - throw toIOException(this, message, e); + throw toRocksDatabaseException(this, message, e); } } @@ -671,12 +669,12 @@ byte[] get(ColumnFamily family, byte[] key) throws IOException { * partial result will be written. * @return null if the key is not found; * otherwise, return the size (possibly 0) of the value. - * @throws IOException if the db is closed or the db throws an exception. + * @throws RocksDatabaseException if the db is closed or the db throws an exception. * @see org.rocksdb.RocksDB#get(ColumnFamilyHandle, org.rocksdb.ReadOptions, * ByteBuffer, ByteBuffer) */ Integer get(ColumnFamily family, ByteBuffer key, ByteBuffer outValue) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { final int size = db.get().get(family.getHandle(), DEFAULT_READ_OPTION, key, outValue); @@ -686,82 +684,82 @@ Integer get(ColumnFamily family, ByteBuffer key, ByteBuffer outValue) } catch (RocksDBException e) { closeOnError(e); final String message = "get " + bytes2String(key) + " from " + family; - throw toIOException(this, message, e); + throw toRocksDatabaseException(this, message, e); } } - public long estimateNumKeys() throws IOException { + public long estimateNumKeys() throws RocksDatabaseException { return getLongProperty(ESTIMATE_NUM_KEYS); } - public long estimateNumKeys(ColumnFamily family) throws IOException { + public long estimateNumKeys(ColumnFamily family) throws RocksDatabaseException { return getLongProperty(family, ESTIMATE_NUM_KEYS); } - private long getLongProperty(String key) throws IOException { + private long getLongProperty(String key) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return db.get().getLongProperty(key); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "getLongProperty " + key, e); + throw toRocksDatabaseException(this, "getLongProperty " + key, e); } } private long getLongProperty(ColumnFamily family, String key) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return db.get().getLongProperty(family.getHandle(), key); } catch (RocksDBException e) { closeOnError(e); final String message = "getLongProperty " + key + " from " + family; - throw toIOException(this, message, e); + throw toRocksDatabaseException(this, message, e); } } - public String getProperty(String key) throws IOException { + public String getProperty(String key) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return db.get().getProperty(key); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "getProperty " + key, e); + throw toRocksDatabaseException(this, "getProperty " + key, e); } } public String getProperty(ColumnFamily family, String key) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return db.get().getProperty(family.getHandle(), key); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "getProperty " + key + " from " + family, e); + throw toRocksDatabaseException(this, "getProperty " + key + " from " + family, e); } } public ManagedTransactionLogIterator getUpdatesSince(long sequenceNumber) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return managed(db.get().getUpdatesSince(sequenceNumber)); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "getUpdatesSince " + sequenceNumber, e); + throw toRocksDatabaseException(this, "getUpdatesSince " + sequenceNumber, e); } } - public long getLatestSequenceNumber() throws IOException { + public long getLatestSequenceNumber() throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return db.get().getLatestSequenceNumber(); } } public ManagedRocksIterator newIterator(ColumnFamily family) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return managed(db.get().newIterator(family.getHandle())); } } public ManagedRocksIterator newIterator(ColumnFamily family, - boolean fillCache) throws IOException { + boolean fillCache) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire(); ManagedReadOptions readOptions = new ManagedReadOptions()) { readOptions.setFillCache(fillCache); @@ -771,48 +769,48 @@ public ManagedRocksIterator newIterator(ColumnFamily family, public void batchWrite(ManagedWriteBatch writeBatch, ManagedWriteOptions options) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().write(options, writeBatch); } catch (RocksDBException e) { closeOnError(e); - throw toIOException(this, "batchWrite", e); + throw toRocksDatabaseException(this, "batchWrite", e); } } - public void batchWrite(ManagedWriteBatch writeBatch) throws IOException { + public void batchWrite(ManagedWriteBatch writeBatch) throws RocksDatabaseException { batchWrite(writeBatch, writeOptions); } - public void delete(ColumnFamily family, byte[] key) throws IOException { + public void delete(ColumnFamily family, byte[] key) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().delete(family.getHandle(), key); } catch (RocksDBException e) { closeOnError(e); final String message = "delete " + bytes2String(key) + " from " + family; - throw toIOException(this, message, e); + throw toRocksDatabaseException(this, message, e); } } - public void delete(ColumnFamily family, ByteBuffer key) throws IOException { + public void delete(ColumnFamily family, ByteBuffer key) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().delete(family.getHandle(), writeOptions, key); } catch (RocksDBException e) { closeOnError(e); final String message = "delete " + bytes2String(key) + " from " + family; - throw toIOException(this, message, e); + throw toRocksDatabaseException(this, message, e); } } public void deleteRange(ColumnFamily family, byte[] beginKey, byte[] endKey) - throws IOException { + throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { db.get().deleteRange(family.getHandle(), beginKey, endKey); } catch (RocksDBException e) { closeOnError(e); final String message = "delete range " + bytes2String(beginKey) + " to " + bytes2String(endKey) + " from " + family; - throw toIOException(this, message, e); + throw toRocksDatabaseException(this, message, e); } } @@ -822,7 +820,7 @@ public String toString() { } @VisibleForTesting - public List getSstFileList() throws IOException { + public List getSstFileList() throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { return db.get().getLiveFilesMetaData(); } @@ -832,7 +830,7 @@ public List getSstFileList() throws IOException { * return the max compaction level of sst files in the db. * @return level */ - private int getLastLevel() throws IOException { + private int getLastLevel() throws RocksDatabaseException { return getSstFileList().stream() .max(Comparator.comparing(LiveFileMetaData::level)).get().level(); } @@ -842,8 +840,7 @@ private int getLastLevel() throws IOException { * for given table. * @param prefixPairs a map of TableName to prefixUsed. */ - public void deleteFilesNotMatchingPrefix(Map prefixPairs) - throws IOException, RocksDBException { + public void deleteFilesNotMatchingPrefix(Map prefixPairs) throws RocksDatabaseException { try (UncheckedAutoCloseable ignored = acquire()) { for (LiveFileMetaData liveFileMetaData : getSstFileList()) { String sstFileColumnFamily = StringUtils.bytes2String(liveFileMetaData.columnFamilyName()); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DumpFileWriter.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabaseException.java similarity index 58% rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DumpFileWriter.java rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabaseException.java index 16ac547591bc..e7906879b4f8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DumpFileWriter.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDatabaseException.java @@ -17,30 +17,27 @@ package org.apache.hadoop.hdds.utils.db; -import java.io.Closeable; -import java.io.File; import java.io.IOException; +import org.rocksdb.RocksDBException; /** - * Interface for write data into a dump file. + * Exceptions converted from {@link RocksDBException}. */ -public interface DumpFileWriter extends Closeable { - /** - * Open an external file for dump. - * @param externalFile - */ - void open(File externalFile) throws IOException; +public class RocksDatabaseException extends IOException { + private static String getStatus(RocksDBException e) { + return e.getStatus() == null ? "NULL_STATUS" : e.getStatus().getCodeString(); + } - /** - * Put a key value pair into the file. - * @param key - * @param value - */ - void put(byte[] key, byte[] value) throws IOException; + /** Construct from the given {@link RocksDBException} cause. */ + public RocksDatabaseException(String message, RocksDBException cause) { + super(getStatus(cause) + ": " + message, cause); + } - /** - * Finish dumping. - */ - @Override - void close() throws IOException; + public RocksDatabaseException(String message) { + super(message); + } + + public RocksDatabaseException() { + super(); + } } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java index cf16ac5fc99c..285c69ef78c3 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksDB.java @@ -18,12 +18,12 @@ package org.apache.hadoop.hdds.utils.db.managed; import java.io.File; -import java.io.IOException; import java.time.Duration; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import org.apache.commons.io.FilenameUtils; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.DBOptions; @@ -93,14 +93,17 @@ public static ManagedRocksDB open( * This function makes the RocksDB#deleteFile Api synchronized by waiting * for the deletes to happen. * @param fileToBeDeleted File to be deleted. - * @throws RocksDBException In the underlying db throws an exception. - * @throws IOException In the case file is not deleted. + * @throws RocksDatabaseException if the underlying db throws an exception + * or the file is not deleted within a time limit. */ - public void deleteFile(LiveFileMetaData fileToBeDeleted) - throws RocksDBException, IOException { + public void deleteFile(LiveFileMetaData fileToBeDeleted) throws RocksDatabaseException { String sstFileName = fileToBeDeleted.fileName(); - this.get().deleteFile(sstFileName); File file = new File(fileToBeDeleted.path(), fileToBeDeleted.fileName()); + try { + get().deleteFile(sstFileName); + } catch (RocksDBException e) { + throw new RocksDatabaseException("Failed to delete " + file, e); + } ManagedRocksObjectUtils.waitForFileDelete(file, Duration.ofSeconds(60)); } diff --git a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java index d970b4d21678..eef1f286d6c6 100644 --- a/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/managed/ManagedRocksObjectUtils.java @@ -19,11 +19,11 @@ import jakarta.annotation.Nullable; import java.io.File; -import java.io.IOException; import java.time.Duration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.utils.LeakDetector; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; import org.apache.ratis.util.UncheckedAutoCloseable; import org.rocksdb.RocksDB; import org.rocksdb.util.Environment; @@ -68,15 +68,15 @@ static String formatStackTrace(@Nullable StackTraceElement[] elements) { * Wait for file to be deleted. * @param file File to be deleted. * @param maxDuration poll max duration. - * @throws IOException in case of failure. + * @throws RocksDatabaseException in case of failure. */ public static void waitForFileDelete(File file, Duration maxDuration) - throws IOException { + throws RocksDatabaseException { if (!RatisHelper.attemptUntilTrue(() -> !file.exists(), POLL_INTERVAL_DURATION, maxDuration)) { String msg = String.format("File: %s didn't get deleted in %s secs.", file.getAbsolutePath(), maxDuration.getSeconds()); - LOG.info(msg); - throw new IOException(msg); + LOG.warn(msg); + throw new RocksDatabaseException(msg); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DumpFileLoader.java b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java similarity index 69% rename from hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DumpFileLoader.java rename to hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java index 46e51b142ae3..6f61339289ed 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DumpFileLoader.java +++ b/hadoop-hdds/managed-rocksdb/src/main/java/org/apache/hadoop/hdds/utils/db/package-info.java @@ -15,25 +15,5 @@ * limitations under the License. */ +/** RocksDB related classes. */ package org.apache.hadoop.hdds.utils.db; - -import java.io.Closeable; -import java.io.File; -import java.io.IOException; - -/** - * Interface for loading data from a dump file. - */ -public interface DumpFileLoader extends Closeable { - - /** - * Load key value pairs from an external dump file. - */ - void load(File externalFile) throws IOException; - - /** - * Close this file loader. - */ - @Override - void close(); -} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java index 1e94899594b9..a4bc9e4f0d8d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/SstFilteringService.java @@ -47,7 +47,6 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.lock.OMLockDetails; import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; -import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -225,7 +224,7 @@ public BackgroundTaskResult call() throws Exception { } } } - } catch (RocksDBException | IOException e) { + } catch (IOException e) { if (isSnapshotDeleted(snapshotInfoTable.get(snapShotTableKey))) { LOG.info("Exception encountered while filtering a snapshot: {} since it was deleted midway", snapShotTableKey, e);