Skip to content

Commit 0466ade

Browse files
committed
HDDS-3172. Use DBStore instead of MetadataStore in SCM
Closes #700
1 parent 2098516 commit 0466ade

36 files changed

+1054
-393
lines changed

hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -114,14 +114,9 @@ public final class OzoneConsts {
114114
*/
115115
public static final String CONTAINER_DB_SUFFIX = "container.db";
116116
public static final String PIPELINE_DB_SUFFIX = "pipeline.db";
117-
public static final String SCM_CONTAINER_DB = "scm-" + CONTAINER_DB_SUFFIX;
118-
public static final String SCM_PIPELINE_DB = "scm-" + PIPELINE_DB_SUFFIX;
119117
public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX;
120-
public static final String DELETED_BLOCK_DB = "deletedBlock.db";
121118
public static final String OM_DB_NAME = "om.db";
122119
public static final String OM_DB_BACKUP_PREFIX = "om.db.backup.";
123-
public static final String OZONE_MANAGER_TOKEN_DB_NAME = "om-token.db";
124-
public static final String SCM_DB_NAME = "scm.db";
125120

126121
public static final String STORAGE_DIR_CHUNKS = "chunks";
127122
public static final String OZONE_DB_CHECKPOINT_REQUEST_FLUSH =
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*
18+
*/
19+
package org.apache.hadoop.hdds.utils.db;
20+
21+
import java.io.IOException;
22+
23+
/**
24+
* Create and commit batch operation for one DB.
25+
*/
26+
public interface BatchOperationHandler {
27+
28+
/**
29+
* Initialize an atomic batch operation which can hold multiple PUT/DELETE
30+
* operations and committed later in one step.
31+
*
32+
* @return BatchOperation holder which can be used to add or commit batch
33+
* operations.
34+
*/
35+
BatchOperation initBatchOperation();
36+
37+
/**
38+
* Commit the batch operations.
39+
*
40+
* @param operation which contains all the required batch operation.
41+
* @throws IOException on Failure.
42+
*/
43+
void commitBatchOperation(BatchOperation operation) throws IOException;
44+
}
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*
18+
*/
19+
package org.apache.hadoop.hdds.utils.db;
20+
21+
import java.io.IOException;
22+
23+
/**
24+
* Class represents one single column table with the required codecs and types.
25+
*
26+
* @param <KEY> the type of the key.
27+
* @param <VALUE> they type of the value.
28+
*/
29+
public class DBColumnFamilyDefinition<KEY, VALUE> {
30+
31+
private final String tableName;
32+
33+
private final Class<KEY> keyType;
34+
35+
private final Codec<KEY> keyCodec;
36+
37+
private final Class<VALUE> valueType;
38+
39+
private final Codec<VALUE> valueCodec;
40+
41+
public DBColumnFamilyDefinition(
42+
String tableName,
43+
Class<KEY> keyType,
44+
Codec<KEY> keyCodec,
45+
Class<VALUE> valueType,
46+
Codec<VALUE> valueCodec) {
47+
this.tableName = tableName;
48+
this.keyType = keyType;
49+
this.keyCodec = keyCodec;
50+
this.valueType = valueType;
51+
this.valueCodec = valueCodec;
52+
}
53+
54+
public Table<KEY, VALUE> getTable(DBStore db) throws IOException {
55+
return db.getTable(tableName, keyType, valueType);
56+
}
57+
58+
public String getName() {
59+
return tableName;
60+
}
61+
62+
public String getTableName() {
63+
return tableName;
64+
}
65+
66+
public Class<KEY> getKeyType() {
67+
return keyType;
68+
}
69+
70+
public Codec<KEY> getKeyCodec() {
71+
return keyCodec;
72+
}
73+
74+
public Class<VALUE> getValueType() {
75+
return valueType;
76+
}
77+
78+
public Codec<VALUE> getValueCodec() {
79+
return valueCodec;
80+
}
81+
}
Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
/*
2+
* Licensed to the Apache Software Foundation (ASF) under one
3+
* or more contributor license agreements. See the NOTICE file
4+
* distributed with this work for additional information
5+
* regarding copyright ownership. The ASF licenses this file
6+
* to you under the Apache License, Version 2.0 (the
7+
* "License"); you may not use this file except in compliance
8+
* with the License. You may obtain a copy of the License at
9+
*
10+
* http://www.apache.org/licenses/LICENSE-2.0
11+
*
12+
* Unless required by applicable law or agreed to in writing, software
13+
* distributed under the License is distributed on an "AS IS" BASIS,
14+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15+
* See the License for the specific language governing permissions and
16+
* limitations under the License.
17+
*
18+
*/
19+
package org.apache.hadoop.hdds.utils.db;
20+
21+
import org.slf4j.Logger;
22+
import org.slf4j.LoggerFactory;
23+
24+
/**
25+
* Simple interface to provide information to create a DBStore..
26+
*/
27+
public interface DBDefinition {
28+
29+
Logger LOG = LoggerFactory.getLogger(DBDefinition.class);
30+
31+
/**
32+
* Logical name of the DB.
33+
*/
34+
String getName();
35+
36+
/**
37+
* Configuration key defines the location of the DB.
38+
*/
39+
String getLocationConfigKey();
40+
41+
/**
42+
* Create a new DB store instance based on the configuration.
43+
*/
44+
DBColumnFamilyDefinition[] getColumnFamilies();
45+
46+
}

hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
*
3636
*/
3737
@InterfaceStability.Evolving
38-
public interface DBStore extends AutoCloseable {
38+
public interface DBStore extends AutoCloseable, BatchOperationHandler {
3939

4040
/**
4141
* Gets an existing TableStore.
@@ -141,22 +141,6 @@ <KEY, VALUE> void move(KEY sourceKey, KEY destKey, VALUE value,
141141
*/
142142
long getEstimatedKeyCount() throws IOException;
143143

144-
/**
145-
* Initialize an atomic batch operation which can hold multiple PUT/DELETE
146-
* operations and committed later in one step.
147-
*
148-
* @return BatchOperation holder which can be used to add or commit batch
149-
* operations.
150-
*/
151-
BatchOperation initBatchOperation();
152-
153-
/**
154-
* Commit the batch operations.
155-
*
156-
* @param operation which contains all the required batch operation.
157-
* @throws IOException on Failure.
158-
*/
159-
void commitBatchOperation(BatchOperation operation) throws IOException;
160144

161145
/**
162146
* Get current snapshot of DB store as an artifact stored on

hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java

Lines changed: 49 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -29,12 +29,15 @@
2929
import java.util.List;
3030
import java.util.Set;
3131

32-
import org.apache.hadoop.hdds.conf.ConfigurationSource;
32+
import org.apache.hadoop.hdds.HddsConfigKeys;
3333
import org.apache.hadoop.hdds.StringUtils;
34+
import org.apache.hadoop.hdds.conf.ConfigurationSource;
3435
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
3536

3637
import com.google.common.base.Preconditions;
3738
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DB_PROFILE;
39+
import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig;
40+
import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath;
3841
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
3942
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
4043
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
@@ -78,10 +81,11 @@ public final class DBStoreBuilder {
7881
private String rocksDbStat;
7982
private RocksDBConfiguration rocksDBConfiguration;
8083

81-
private DBStoreBuilder(OzoneConfiguration configuration) {
84+
private DBStoreBuilder(ConfigurationSource configuration) {
8285
this(configuration, configuration.getObject(RocksDBConfiguration.class));
8386
}
84-
private DBStoreBuilder(OzoneConfiguration configuration,
87+
88+
private DBStoreBuilder(ConfigurationSource configuration,
8589
RocksDBConfiguration rocksDBConfiguration) {
8690
tables = new HashSet<>();
8791
tableNames = new LinkedList<>();
@@ -93,8 +97,7 @@ private DBStoreBuilder(OzoneConfiguration configuration,
9397
this.rocksDBConfiguration = rocksDBConfiguration;
9498
}
9599

96-
97-
public static DBStoreBuilder newBuilder(OzoneConfiguration configuration) {
100+
public static DBStoreBuilder newBuilder(ConfigurationSource configuration) {
98101
return new DBStoreBuilder(configuration);
99102
}
100103

@@ -263,4 +266,45 @@ private File getDBFile() throws IOException {
263266
return Paths.get(dbPath.toString(), dbname).toFile();
264267
}
265268

269+
private static DBStoreBuilder createDBStoreBuilder(
270+
ConfigurationSource configuration, DBDefinition definition) {
271+
272+
File metadataDir = getDirectoryFromConfig(configuration,
273+
definition.getLocationConfigKey(), definition.getName());
274+
275+
if (metadataDir == null) {
276+
277+
LOG.warn("{} is not configured. We recommend adding this setting. " +
278+
"Falling back to {} instead.",
279+
definition.getLocationConfigKey(),
280+
HddsConfigKeys.OZONE_METADATA_DIRS);
281+
metadataDir = getOzoneMetaDirPath(configuration);
282+
}
283+
284+
return DBStoreBuilder.newBuilder(configuration)
285+
.setName(definition.getName())
286+
.setPath(Paths.get(metadataDir.getPath()));
287+
}
288+
289+
/**
290+
* Create DBStoreBuilder from a generic DBDefinition.
291+
*/
292+
public static DBStore createDBStore(ConfigurationSource configuration,
293+
DBDefinition definition)
294+
throws IOException {
295+
DBStoreBuilder builder = createDBStoreBuilder(configuration, definition);
296+
for (DBColumnFamilyDefinition columnTableDefinition : definition
297+
.getColumnFamilies()) {
298+
builder.registerTable(columnTableDefinition);
299+
}
300+
return builder.build();
301+
}
302+
303+
private <KEY, VALUE> void registerTable(
304+
DBColumnFamilyDefinition<KEY, VALUE> definition) {
305+
addTable(definition.getName())
306+
.addCodec(definition.getKeyType(), definition.getKeyCodec())
307+
.addCodec(definition.getValueType(), definition.getValueCodec());
308+
}
309+
266310
}

hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java

Lines changed: 17 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -16,38 +16,35 @@
1616
*/
1717
package org.apache.hadoop.hdds.scm.block;
1818

19-
import com.google.common.annotations.VisibleForTesting;
20-
import com.google.common.base.Preconditions;
19+
import java.io.IOException;
20+
import java.util.List;
21+
import java.util.Map;
22+
import java.util.UUID;
23+
import java.util.concurrent.TimeUnit;
24+
2125
import org.apache.hadoop.hdds.conf.ConfigurationSource;
26+
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
27+
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
28+
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
2229
import org.apache.hadoop.hdds.scm.container.ContainerManager;
2330
import org.apache.hadoop.hdds.scm.events.SCMEvents;
2431
import org.apache.hadoop.hdds.scm.node.NodeManager;
25-
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
26-
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
27-
import org.apache.hadoop.hdds.protocol.proto
28-
.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
2932
import org.apache.hadoop.hdds.server.events.EventPublisher;
30-
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
31-
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
32-
import org.apache.hadoop.util.Time;
3333
import org.apache.hadoop.hdds.utils.BackgroundService;
3434
import org.apache.hadoop.hdds.utils.BackgroundTask;
3535
import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
3636
import org.apache.hadoop.hdds.utils.BackgroundTaskResult.EmptyTaskResult;
37+
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
38+
import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand;
39+
import org.apache.hadoop.util.Time;
40+
41+
import com.google.common.annotations.VisibleForTesting;
42+
import com.google.common.base.Preconditions;
43+
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
44+
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT;
3745
import org.slf4j.Logger;
3846
import org.slf4j.LoggerFactory;
3947

40-
import java.io.IOException;
41-
import java.util.List;
42-
import java.util.Map;
43-
import java.util.UUID;
44-
import java.util.concurrent.TimeUnit;
45-
46-
import static org.apache.hadoop.ozone.OzoneConfigKeys
47-
.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
48-
import static org.apache.hadoop.ozone.OzoneConfigKeys
49-
.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT;
50-
5148
/**
5249
* A background service running in SCM to delete blocks. This service scans
5350
* block deletion log in certain interval and caches block deletion commands

hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,15 +16,15 @@
1616
*/
1717
package org.apache.hadoop.hdds.scm.container;
1818

19-
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
20-
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
21-
2219
import java.io.Closeable;
2320
import java.io.IOException;
2421
import java.util.List;
2522
import java.util.Map;
2623
import java.util.Set;
2724

25+
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
26+
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
27+
2828
// TODO: Write extensive java doc.
2929
// This is the main interface of ContainerManager.
3030
/**

0 commit comments

Comments
 (0)