Skip to content
Closed
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
902c701
HDDS-3172. Deprecate MetadataStore interface
elek Mar 13, 2020
fda35bb
fix NPE
elek Mar 14, 2020
eb341bd
fix findbugs / unit test and acceptance test problems
elek Mar 16, 2020
4d07fa3
Retrigger build with empty commit.
elek Mar 18, 2020
68f7a0f
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Mar 19, 2020
f3e4ac2
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Mar 23, 2020
3b52e14
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Mar 27, 2020
876ff67
use recon db definition
elek Mar 27, 2020
353502b
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Mar 27, 2020
21deb6d
retrigger build
elek Mar 30, 2020
0a2ec3f
fix checkstyle problem
elek Mar 30, 2020
dd244be
fixing unit test (isolated test cases)
elek Mar 30, 2020
cd45050
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Mar 30, 2020
5d330ec
fix db store initialization
elek Apr 1, 2020
80cb1d3
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Apr 2, 2020
7527553
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Apr 3, 2020
34d25b1
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Apr 8, 2020
4a8721c
multiple the number of threads with the number of disks
elek Apr 8, 2020
1307209
retrigger build
elek Apr 8, 2020
a3c3b01
Revert "multiple the number of threads with the number of disks"
elek Apr 14, 2020
72a55d1
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Apr 14, 2020
0ddd27e
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Apr 14, 2020
d745282
separate creation and type definition logic
elek Apr 17, 2020
8f7f59a
Update hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/sc…
elek Apr 17, 2020
26c8a58
Use ContainerID as key instead of Long
elek Apr 17, 2020
701e5d1
Merge remote-tracking branch 'elek/HDDS-3172' into HDDS-3172
elek Apr 17, 2020
f6b60be
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Apr 21, 2020
884cc16
address review comments
elek Apr 21, 2020
e6285b9
retrigger build
elek Apr 21, 2020
78ca84c
Merge remote-tracking branch 'origin/master' into HDDS-3172
elek Apr 21, 2020
8490ff7
fix merge problem with the master
elek Apr 21, 2020
1a57c78
fix merge problem
elek Apr 21, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -114,14 +114,9 @@ public final class OzoneConsts {
*/
public static final String CONTAINER_DB_SUFFIX = "container.db";
public static final String PIPELINE_DB_SUFFIX = "pipeline.db";
public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
public static final String SCM_PIPELINE_DB = "scm-" + PIPELINE_DB_SUFFIX;
public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
public static final String DELETED_BLOCK_DB = "deletedBlock.db";
public static final String OM_DB_NAME = "om.db";
public static final String OM_DB_BACKUP_PREFIX = "om.db.backup.";
public static final String OZONE_MANAGER_TOKEN_DB_NAME = "om-token.db";
public static final String SCM_DB_NAME = "scm.db";

public static final String STORAGE_DIR_CHUNKS = "chunks";
public static final String OZONE_DB_CHECKPOINT_REQUEST_FLUSH =
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.hdds.utils.db;

import java.io.IOException;

/**
* Create and commit batch operation for one DB.
*/
public interface BatchOperationHandler {

/**
* Initialize an atomic batch operation which can hold multiple PUT/DELETE
* operations and committed later in one step.
*
* @return BatchOperation holder which can be used to add or commit batch
* operations.
*/
BatchOperation initBatchOperation();

/**
* Commit the batch operations.
*
* @param operation which contains all the required batch operation.
* @throws IOException on Failure.
*/
void commitBatchOperation(BatchOperation operation) throws IOException;
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.hdds.utils.db;

import java.io.IOException;

/**
* Class represents one single column table with the required codecs and types.
*
* @param <KEY> the type of the key.
* @param <VALUE> they type of the value.
*/
public class DBColumnFamilyDefinition<KEY, VALUE> {

private String tableName;

private Class<KEY> keyType;

private Codec<KEY> keyCodec;

private Class<VALUE> valueType;

private Codec<VALUE> valueCodec;

public DBColumnFamilyDefinition(
String tableName,
Class<KEY> keyType,
Codec<KEY> keyCodec,
Class<VALUE> valueType,
Codec<VALUE> valueCodec) {
this.tableName = tableName;
this.keyType = keyType;
this.keyCodec = keyCodec;
this.valueType = valueType;
this.valueCodec = valueCodec;
}

public Table<KEY, VALUE> getTable(DBStore db) throws IOException {
return db.getTable(tableName, keyType, valueType);
}

public void registerTable(DBStoreBuilder storeBuilder) {
storeBuilder.addTable(tableName)
.addCodec(keyType, keyCodec)
.addCodec(valueType, valueCodec);
}

public String getName() {
return tableName;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.hadoop.hdds.utils.db;

import java.io.File;
import java.io.IOException;
import java.nio.file.Paths;

import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;

import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig;
import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
* Simple interface to provide a db store.
*/
public interface DBDefinition {

Logger LOG = LoggerFactory.getLogger(DBDefinition.class);

String getName();

String getLocationConfigKey();

/**
* Create a new DB store instance based on the configuration.
*/
DBColumnFamilyDefinition[] getColumnFamilies();

default void registerTables(DBStoreBuilder builder) {
for (DBColumnFamilyDefinition columnTableDefinition : getColumnFamilies()) {
columnTableDefinition.registerTable(builder);
}
}

default DBStoreBuilder createDBStoreBuilder(
OzoneConfiguration configuration) {

File metadataDir = getDirectoryFromConfig(configuration,
getLocationConfigKey(), getName());

if (metadataDir == null) {

LOG.warn("{} is not configured. We recommend adding this setting. " +
"Falling back to {} instead.",
getLocationConfigKey(), HddsConfigKeys.OZONE_METADATA_DIRS);
metadataDir = getOzoneMetaDirPath(configuration);
}

DBStoreBuilder builder = DBStoreBuilder.newBuilder(configuration)
.setName(getName())
.setPath(Paths.get(metadataDir.getPath()));
return builder;
}

default DBStore createDBStore(OzoneConfiguration configuration)
throws IOException {
DBStoreBuilder builder = createDBStoreBuilder(configuration);
registerTables(builder);
return builder.build();
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
*
*/
@InterfaceStability.Evolving
public interface DBStore extends AutoCloseable {
public interface DBStore extends AutoCloseable, BatchOperationHandler {

/**
* Gets an existing TableStore.
Expand Down Expand Up @@ -141,22 +141,6 @@ <KEY, VALUE> void move(KEY sourceKey, KEY destKey, VALUE value,
*/
long getEstimatedKeyCount() throws IOException;

/**
* Initialize an atomic batch operation which can hold multiple PUT/DELETE
* operations and committed later in one step.
*
* @return BatchOperation holder which can be used to add or commit batch
* operations.
*/
BatchOperation initBatchOperation();

/**
* Commit the batch operations.
*
* @param operation which contains all the required batch operation.
* @throws IOException on Failure.
*/
void commitBatchOperation(BatchOperation operation) throws IOException;

/**
* Get current snapshot of DB store as an artifact stored on
Expand Down
Loading