Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -52,33 +52,6 @@ default <KEY, VALUE> TypedTable<KEY, VALUE> getTable(String name, Codec<KEY> key
return getTable(name, keyCodec, valueCodec, CacheType.PARTIAL_CACHE);
}

/**
* Gets an existing TableStore with implicit key/value conversion and
* with default cache type for cache. Default cache type is partial cache.
*
* @param name - Name of the TableStore to get
* @param keyType
* @param valueType
* @return - TableStore.
* @throws IOException on Failure
*/
<KEY, VALUE> Table<KEY, VALUE> getTable(String name,
Class<KEY> keyType, Class<VALUE> valueType) throws IOException;

/**
* Gets an existing TableStore with implicit key/value conversion and
* with specified cache type.
* @param name - Name of the TableStore to get
* @param keyType
* @param valueType
* @param cacheType
* @return - TableStore.
* @throws IOException
*/
<KEY, VALUE> Table<KEY, VALUE> getTable(String name,
Class<KEY> keyType, Class<VALUE> valueType,
TableCache.CacheType cacheType) throws IOException;

/**
* Gets table store with implict key/value conversion.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ public class RDBStore implements DBStore {
LoggerFactory.getLogger(RDBStore.class);
private final RocksDatabase db;
private final File dbLocation;
private final CodecRegistry codecRegistry;
private RocksDBStoreMetrics metrics;
private final RDBCheckpointManager checkPointManager;
private final String checkpointsParentDir;
Expand Down Expand Up @@ -90,7 +89,6 @@ public RDBStore(File dbFile, ManagedDBOptions dbOptions, ManagedStatistics stati
Preconditions.checkNotNull(families);
Preconditions.checkArgument(!families.isEmpty());
this.maxDbUpdatesSizeThreshold = maxDbUpdatesSizeThreshold;
codecRegistry = registry;
dbLocation = dbFile;
this.dbOptions = dbOptions;
this.statistics = statistics;
Expand Down Expand Up @@ -297,26 +295,12 @@ public RDBTable getTable(String name) throws IOException {
return new RDBTable(this.db, handle, rdbMetrics);
}

@Override
public <K, V> TypedTable<K, V> getTable(String name,
Class<K> keyType, Class<V> valueType) throws IOException {
return new TypedTable<>(getTable(name), codecRegistry, keyType,
valueType);
}

@Override
public <K, V> TypedTable<K, V> getTable(
String name, Codec<K> keyCodec, Codec<V> valueCodec, TableCache.CacheType cacheType) throws IOException {
return new TypedTable<>(getTable(name), keyCodec, valueCodec, cacheType);
}

@Override
public <K, V> Table<K, V> getTable(String name,
Class<K> keyType, Class<V> valueType,
TableCache.CacheType cacheType) throws IOException {
return new TypedTable<>(getTable(name), codecRegistry, keyType, valueType, cacheType);
}

@Override
public ArrayList<Table> listTables() {
ArrayList<Table> returnList = new ArrayList<>();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,31 +68,6 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, VALUE> {
= new CodecBuffer.Capacity(this, BUFFER_SIZE_DEFAULT);
private final TableCache<KEY, VALUE> cache;

/**
* The same as this(rawTable, codecRegistry, keyType, valueType,
* CacheType.PARTIAL_CACHE).
*/
TypedTable(RDBTable rawTable, CodecRegistry codecRegistry, Class<KEY> keyType, Class<VALUE> valueType)
throws IOException {
this(rawTable, codecRegistry, keyType, valueType, CacheType.PARTIAL_CACHE);
}

/**
* Create an TypedTable from the raw table with specified cache type.
*
* @param rawTable The underlying (untyped) table in RocksDB.
* @param codecRegistry To look up codecs.
* @param keyType The key type.
* @param valueType The value type.
* @param cacheType How to cache the entries?
* @throws IOException if failed to iterate the raw table.
*/
TypedTable(RDBTable rawTable, CodecRegistry codecRegistry, Class<KEY> keyType, Class<VALUE> valueType,
CacheType cacheType) throws IOException {
this(rawTable, codecRegistry.getCodecFromClass(keyType), codecRegistry.getCodecFromClass(valueType),
cacheType);
}

/**
* Create an TypedTable from the raw table with specified cache type.
*
Expand All @@ -102,8 +77,7 @@ public class TypedTable<KEY, VALUE> implements Table<KEY, VALUE> {
* @param cacheType How to cache the entries?
* @throws IOException
*/
public TypedTable(
RDBTable rawTable, Codec<KEY> keyCodec, Codec<VALUE> valueCodec, CacheType cacheType) throws IOException {
TypedTable(RDBTable rawTable, Codec<KEY> keyCodec, Codec<VALUE> valueCodec, CacheType cacheType) throws IOException {
this.rawTable = Objects.requireNonNull(rawTable, "rawTable==null");
this.keyCodec = Objects.requireNonNull(keyCodec, "keyCodec == null");
this.valueCodec = Objects.requireNonNull(valueCodec, "valueCodec == null");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.StringUtils;
import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
import org.apache.hadoop.hdds.utils.db.cache.TableCache;
import org.apache.hadoop.hdds.utils.db.cache.TableCache.CacheType;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
import org.junit.jupiter.api.AfterEach;
Expand Down Expand Up @@ -309,13 +309,13 @@ public void batchDelete() throws Exception {

@Test
public void putGetTypedTableCodec() throws Exception {
try (Table<String, String> testTable = rdbStore.getTable("Ten", String.class, String.class)) {
try (Table<String, String> testTable = rdbStore.getTable("Ten", StringCodec.get(), StringCodec.get())) {
testTable.put("test1", "123");
assertFalse(testTable.isEmpty());
assertEquals("123", testTable.get("test1"));
}
try (Table<String, ByteString> testTable = rdbStore.getTable("Ten",
StringCodec.get(), ByteStringCodec.get(), TableCache.CacheType.NO_CACHE)) {
StringCodec.get(), ByteStringCodec.get(), CacheType.NO_CACHE)) {
assertEquals("123", testTable.get("test1").toStringUtf8());
}
}
Expand Down Expand Up @@ -407,8 +407,7 @@ public void testGetByteBuffer() throws Exception {
final String tableName = families.get(0);
try (RDBTable testTable = rdbStore.getTable(tableName)) {
final TypedTable<String, String> typedTable = new TypedTable<>(
testTable, CodecRegistry.newBuilder().build(),
String.class, String.class);
testTable, StringCodec.get(), StringCodec.get(), CacheType.PARTIAL_CACHE);

for (int i = 0; i < 20; i++) {
final int valueSize = TypedTable.BUFFER_SIZE_DEFAULT * i / 4;
Expand Down Expand Up @@ -594,7 +593,7 @@ public void testStringPrefixedIterator() throws Exception {
final List<Map<String, String>> data = generateKVs(prefixes, keyCount);

try (TypedTable<String, String> table = rdbStore.getTable(
"PrefixFirst", String.class, String.class)) {
"PrefixFirst", StringCodec.get(), StringCodec.get())) {
populateTable(table, data);
for (String prefix : prefixes) {
assertIterator(keyCount, prefix, table);
Expand Down Expand Up @@ -633,7 +632,7 @@ static void assertIterator(int expectedCount, String prefix,
@Test
public void testStringPrefixedIteratorCloseDb() throws Exception {
try (Table<String, String> testTable = rdbStore.getTable(
"PrefixFirst", String.class, String.class)) {
"PrefixFirst", StringCodec.get(), StringCodec.get())) {
// iterator should seek to right pos in the middle
rdbStore.close();
assertThrows(IOException.class, () -> testTable.iterator("abc"));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@
import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.hdds.utils.db.cache.TableCache.CacheType;
import org.apache.hadoop.hdds.utils.db.managed.ManagedColumnFamilyOptions;
import org.apache.hadoop.hdds.utils.db.managed.ManagedDBOptions;
import org.apache.ozone.test.GenericTestUtils;
Expand All @@ -69,7 +70,6 @@ public class TestTypedRDBTableStore {
"Ninth", "Ten");
private RDBStore rdbStore = null;
private ManagedDBOptions options = null;
private CodecRegistry codecRegistry;

@BeforeEach
public void setUp(@TempDir File tempDir) throws Exception {
Expand All @@ -91,9 +91,6 @@ public void setUp(@TempDir File tempDir) throws Exception {
}
rdbStore = TestRDBStore.newRDBStore(tempDir, options, configSet,
MAX_DB_UPDATES_SIZE_THRESHOLD);

codecRegistry = CodecRegistry.newBuilder().build();

}

@AfterEach
Expand Down Expand Up @@ -123,10 +120,9 @@ public void putGetAndEmpty() throws Exception {

private Table<String, String> createTypedTable(String name)
throws IOException {
return new TypedTable<String, String>(
return new TypedTable<>(
rdbStore.getTable(name),
codecRegistry,
String.class, String.class);
StringCodec.get(), StringCodec.get(), CacheType.PARTIAL_CACHE);
}

@Test
Expand Down Expand Up @@ -253,7 +249,7 @@ public void testIteratorOnException() throws Exception {
when(rdbTable.iterator((CodecBuffer) null))
.thenThrow(new IOException());
try (Table<String, String> testTable = new TypedTable<>(rdbTable,
codecRegistry, String.class, String.class)) {
StringCodec.get(), StringCodec.get(), CacheType.PARTIAL_CACHE)) {
assertThrows(IOException.class, testTable::iterator);
}
}
Expand Down Expand Up @@ -411,8 +407,7 @@ public void testCountEstimatedRowsInTable() throws Exception {
public void testByteArrayTypedTable() throws Exception {
try (Table<byte[], byte[]> testTable = new TypedTable<>(
rdbStore.getTable("Ten"),
codecRegistry,
byte[].class, byte[].class)) {
ByteArrayCodec.get(), ByteArrayCodec.get(), CacheType.PARTIAL_CACHE)) {
byte[] key = new byte[] {1, 2, 3};
byte[] value = new byte[] {4, 5, 6};
testTable.put(key, value);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.hdds.utils.db.RDBStore;
import org.apache.hadoop.hdds.utils.db.StringCodec;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.hdds.utils.db.cache.TableCache;
Expand Down Expand Up @@ -112,7 +113,7 @@ private void initializeNewRdbStore(File dbFile) throws IOException {
@Override
public Table<String, KeyEntityInfoProtoWrapper> getKeyTableLite(BucketLayout bucketLayout) throws IOException {
String tableName = bucketLayout.isFileSystemOptimized() ? FILE_TABLE : KEY_TABLE;
return getStore().getTable(tableName, String.class, KeyEntityInfoProtoWrapper.class);
return getStore().getTable(tableName, StringCodec.get(), KeyEntityInfoProtoWrapper.getCodec());
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
import org.apache.hadoop.ozone.freon.FreonSubcommand;
import org.apache.hadoop.ozone.om.OMStorage;
import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo.Builder;
Expand Down Expand Up @@ -111,9 +112,7 @@ public Void call() throws Exception {
// initialization: create one bucket and volume in OM.
writeOmBucketVolume();

omKeyTable = omDb.getTable(OmMetadataManagerImpl.KEY_TABLE, String.class,
OmKeyInfo.class);

omKeyTable = OMDBDefinition.KEY_TABLE.getTable(omDb);
timer = getMetrics().timer("om-generator");
runTests(this::writeOmKeys);

Expand Down Expand Up @@ -142,9 +141,7 @@ public void writeOmKeys(long index) throws Exception {

private void writeOmBucketVolume() throws IOException {

Table<String, OmVolumeArgs> volTable =
omDb.getTable(OmMetadataManagerImpl.VOLUME_TABLE, String.class,
OmVolumeArgs.class);
final Table<String, OmVolumeArgs> volTable = OMDBDefinition.VOLUME_TABLE.getTable(omDb);

String admin = getUserId();
String owner = getUserId();
Expand All @@ -166,9 +163,7 @@ private void writeOmBucketVolume() throws IOException {

volTable.put("/" + volumeName, omVolumeArgs);

final Table<String, PersistedUserVolumeInfo> userTable =
omDb.getTable(OmMetadataManagerImpl.USER_TABLE, String.class,
PersistedUserVolumeInfo.class);
final Table<String, PersistedUserVolumeInfo> userTable = OMDBDefinition.USER_TABLE.getTable(omDb);

PersistedUserVolumeInfo currentUserVolumeInfo =
userTable.get(getUserId());
Expand All @@ -189,9 +184,7 @@ private void writeOmBucketVolume() throws IOException {

userTable.put(getUserId(), currentUserVolumeInfo);

Table<String, OmBucketInfo> bucketTable =
omDb.getTable(OmMetadataManagerImpl.BUCKET_TABLE, String.class,
OmBucketInfo.class);
final Table<String, OmBucketInfo> bucketTable = OMDBDefinition.BUCKET_TABLE.getTable(omDb);

OmBucketInfo omBucketInfo = new OmBucketInfo.Builder()
.setBucketName(bucketName)
Expand Down
Loading