Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,10 @@ public final class ScmConfigKeys {
public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY =
"ozone.scm.container.placement.impl";

public static final String OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT =
"org.apache.hadoop.hdds.scm.container.placement.algorithms." +
"SCMContainerPlacementRackAware";

public static final String OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT =
"ozone.scm.pipeline.owner.container.count";
public static final int OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT = 3;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm.container.placement.algorithms;
/**
Contains container placement policy interface definition.
**/
Original file line number Diff line number Diff line change
Expand Up @@ -732,7 +732,7 @@ public String toString() {
try {
// print the number of leaves
int numOfLeaves = clusterTree.getNumOfLeaves();
tree.append("Expected number of leaves:");
tree.append("Number of leaves:");
tree.append(numOfLeaves);
tree.append("\n");
// print all nodes
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.net;

import org.apache.commons.io.FilenameUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.w3c.dom.Document;
Expand All @@ -31,7 +32,10 @@
import javax.xml.parsers.ParserConfigurationException;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URISyntaxException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
Expand Down Expand Up @@ -93,23 +97,50 @@ public List<NodeSchema> getSchemaList() {
}

/**
* Load user defined network layer schemas from a XML configuration file.
* Load user defined network layer schemas from a XML/YAML configuration file.
* @param schemaFilePath path of schema file
* @return all valid node schemas defined in schema file
*/
public NodeSchemaLoadResult loadSchemaFromXml(String schemaFilePath)
throws IllegalArgumentException {
public NodeSchemaLoadResult loadSchemaFromFile(String schemaFilePath)
throws IllegalArgumentException, FileNotFoundException {
try {
File schemaFile = new File(schemaFilePath);
if (!schemaFile.exists()) {
String msg = "Network topology layer schema file " + schemaFilePath +
" is not found.";
// try to load with classloader
ClassLoader classloader =
Thread.currentThread().getContextClassLoader();
if (classloader == null) {
classloader = NodeSchemaLoader.class.getClassLoader();
}
if (classloader != null) {
URL url = classloader.getResource(schemaFilePath);
if (url != null) {
schemaFile = new File(url.toURI());
}
}
}

if (!schemaFile.exists()) {
String msg = "Network topology layer schema file " +
schemaFilePath + "[" + schemaFile.getAbsolutePath() +
"] is not found.";
LOG.warn(msg);
throw new IllegalArgumentException(msg);
throw new FileNotFoundException(msg);
}
return loadSchema(schemaFile);
} catch (ParserConfigurationException | IOException | SAXException e) {
throw new IllegalArgumentException("Fail to load network topology node"

LOG.info("Load network topology schema file " +
schemaFile.getCanonicalPath());
if (FilenameUtils.getExtension(schemaFilePath).toLowerCase()
.compareTo("yaml") == 0) {
return loadSchemaFromYaml(schemaFile);
} else {
return loadSchema(schemaFile);
}
} catch (FileNotFoundException e) {
throw e;
} catch (ParserConfigurationException | IOException | SAXException |
URISyntaxException e) {
throw new IllegalArgumentException("Failed to load network topology node"
+ " schema file: " + schemaFilePath + " , error:" + e.getMessage());
}
}
Expand Down Expand Up @@ -167,29 +198,6 @@ private NodeSchemaLoadResult loadSchema(File schemaFile) throws
return schemaList;
}

/**
* Load user defined network layer schemas from a YAML configuration file.
* @param schemaFilePath path of schema file
* @return all valid node schemas defined in schema file
*/
public NodeSchemaLoadResult loadSchemaFromYaml(String schemaFilePath)
throws IllegalArgumentException {
try {
File schemaFile = new File(schemaFilePath);
if (!schemaFile.exists()) {
String msg = "Network topology layer schema file " + schemaFilePath +
" is not found.";
LOG.warn(msg);
throw new IllegalArgumentException(msg);
}
return loadSchemaFromYaml(schemaFile);
} catch (Exception e) {
throw new IllegalArgumentException("Fail to load network topology node"
+ " schema file: " + schemaFilePath + " , error:"
+ e.getMessage());
}
}

/**
* Load network topology layer schemas from a YAML configuration file.
* @param schemaFile schema file
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.io.FilenameUtils;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.net.NodeSchemaLoader.NodeSchemaLoadResult;
import org.slf4j.Logger;
Expand Down Expand Up @@ -63,20 +62,14 @@ public void init(Configuration conf) {
String schemaFile = conf.get(
ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE,
ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE_DEFAULT);

NodeSchemaLoadResult result;
try {
if (FilenameUtils.getExtension(schemaFile).toLowerCase()
.compareTo("yaml") == 0) {
result = NodeSchemaLoader.getInstance().loadSchemaFromYaml(schemaFile);
} else {
result = NodeSchemaLoader.getInstance().loadSchemaFromXml(schemaFile);
}
result = NodeSchemaLoader.getInstance().loadSchemaFromFile(schemaFile);
allSchema = result.getSchemaList();
enforcePrefix = result.isEnforePrefix();
maxLevel = allSchema.size();
} catch (Throwable e) {
String msg = "Fail to load schema file:" + schemaFile
String msg = "Failed to load schema file:" + schemaFile
+ ", error:" + e.getMessage();
LOG.error(msg);
throw new RuntimeException(msg);
Expand Down
8 changes: 5 additions & 3 deletions hadoop-hdds/common/src/main/resources/ozone-default.xml
Original file line number Diff line number Diff line change
Expand Up @@ -815,11 +815,13 @@
<property>
<name>ozone.scm.container.placement.impl</name>
<value>
org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom
org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware
</value>
<tag>OZONE, MANAGEMENT</tag>
<description>Placement policy class for containers.
Defaults to SCMContainerPlacementRandom.class
<description>
The full name of class which implements org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy.
The class decides which datanode will be used to host the container replica. If not set,
org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRackAware will be used as default value.
</description>
</property>
<property>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ public TestNodeSchemaLoader(String schemaFile, String errMsg) {
try {
String filePath = classLoader.getResource(
"./networkTopologyTestFiles/" + schemaFile).getPath();
NodeSchemaLoader.getInstance().loadSchemaFromXml(filePath);
NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
fail("expect exceptions");
} catch (Throwable e) {
assertTrue(e.getMessage().contains(errMsg));
Expand Down Expand Up @@ -83,7 +83,7 @@ public void testGood() {
try {
String filePath = classLoader.getResource(
"./networkTopologyTestFiles/good.xml").getPath();
NodeSchemaLoader.getInstance().loadSchemaFromXml(filePath);
NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
} catch (Throwable e) {
fail("should succeed");
}
Expand All @@ -94,10 +94,10 @@ public void testNotExist() {
String filePath = classLoader.getResource(
"./networkTopologyTestFiles/good.xml").getPath() + ".backup";
try {
NodeSchemaLoader.getInstance().loadSchemaFromXml(filePath);
NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
fail("should fail");
} catch (Throwable e) {
assertTrue(e.getMessage().contains("file " + filePath + " is not found"));
assertTrue(e.getMessage().contains("not found"));
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ public void testInitFailure() {
manager.init(conf);
fail("should fail");
} catch (Throwable e) {
assertTrue(e.getMessage().contains("Fail to load schema file:" +
assertTrue(e.getMessage().contains("Failed to load schema file:" +
filePath));
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ public TestYamlSchemaLoader(String schemaFile, String errMsg) {
try {
String filePath = classLoader.getResource(
"./networkTopologyTestFiles/" + schemaFile).getPath();
NodeSchemaLoader.getInstance().loadSchemaFromYaml(filePath);
NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
fail("expect exceptions");
} catch (Throwable e) {
assertTrue(e.getMessage().contains(errMsg));
Expand All @@ -69,7 +69,7 @@ public void testGood() {
try {
String filePath = classLoader.getResource(
"./networkTopologyTestFiles/good.yaml").getPath();
NodeSchemaLoader.getInstance().loadSchemaFromYaml(filePath);
NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
} catch (Throwable e) {
fail("should succeed");
}
Expand All @@ -78,12 +78,12 @@ public void testGood() {
@Test
public void testNotExist() {
String filePath = classLoader.getResource(
"./networkTopologyTestFiles/good.xml").getPath() + ".backup";
"./networkTopologyTestFiles/good.yaml").getPath() + ".backup";
try {
NodeSchemaLoader.getInstance().loadSchemaFromXml(filePath);
NodeSchemaLoader.getInstance().loadSchemaFromFile(filePath);
fail("should fail");
} catch (Throwable e) {
assertTrue(e.getMessage().contains("file " + filePath + " is not found"));
assertTrue(e.getMessage().contains("not found"));
}
}

Expand Down
5 changes: 5 additions & 0 deletions hadoop-hdds/server-scm/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -141,5 +141,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
</executions>
</plugin>
</plugins>
<testResources>
<testResource>
<directory>${basedir}/../../hadoop-hdds/common/src/main/resources</directory>
</testResource>
</testResources>
</build>
</project>
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdds.scm.container.placement.algorithms;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.lang.reflect.Constructor;

/**
* A factory to create container placement instance based on configuration
* property ozone.scm.container.placement.classname.
*/
public final class ContainerPlacementPolicyFactory {
private static final Logger LOG =
LoggerFactory.getLogger(ContainerPlacementPolicyFactory.class);

private ContainerPlacementPolicyFactory() {
}

public static ContainerPlacementPolicy getPolicy(Configuration conf,
final NodeManager nodeManager, NetworkTopology clusterMap,
final boolean fallback) throws SCMException{
final Class<? extends ContainerPlacementPolicy> placementClass = conf
.getClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY,
SCMContainerPlacementRackAware.class,
ContainerPlacementPolicy.class);
Constructor<? extends ContainerPlacementPolicy> constructor;
try {
constructor = placementClass.getDeclaredConstructor(NodeManager.class,
Configuration.class, NetworkTopology.class, boolean.class);
} catch (NoSuchMethodException e) {
String msg = "Failed to find constructor(NodeManager, Configuration, " +
"NetworkTopology, boolean) for class " +
placementClass.getCanonicalName();
LOG.error(msg);
throw new SCMException(msg,
SCMException.ResultCodes.FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY);
}

try {
return constructor.newInstance(nodeManager, conf, clusterMap, fallback);
} catch (Exception e) {
throw new RuntimeException("Failed to instantiate class " +
placementClass.getCanonicalName() + " for " + e.getMessage());
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.node.NodeManager;

import com.google.common.annotations.VisibleForTesting;
Expand Down Expand Up @@ -77,7 +78,8 @@ public final class SCMContainerPlacementCapacity extends SCMCommonPolicy {
* @param conf Configuration
*/
public SCMContainerPlacementCapacity(final NodeManager nodeManager,
final Configuration conf) {
final Configuration conf, final NetworkTopology networkTopology,
final boolean fallback) {
super(nodeManager, conf);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.slf4j.Logger;
Expand Down Expand Up @@ -49,7 +50,8 @@ public final class SCMContainerPlacementRandom extends SCMCommonPolicy
* @param conf Config
*/
public SCMContainerPlacementRandom(final NodeManager nodeManager,
final Configuration conf) {
final Configuration conf, final NetworkTopology networkTopology,
final boolean fallback) {
super(nodeManager, conf);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,7 @@ public enum ResultCodes {
DUPLICATE_DATANODE,
NO_SUCH_DATANODE,
NO_REPLICA_FOUND,
FAILED_TO_FIND_ACTIVE_PIPELINE
FAILED_TO_FIND_ACTIVE_PIPELINE,
FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY
}
}
Loading