diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java index 969f8366efb1..b889103b5ccd 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java @@ -297,40 +297,50 @@ public static synchronized XceiverClientMetrics getXceiverClientMetrics() { @ConfigGroup(prefix = "scm.container.client") public static class ScmClientConfig { + @Config(key = "max.size", + defaultValue = "256", + tags = {OZONE, PERFORMANCE}, + description = + "Controls the maximum number of connections that are cached via" + + " client connection pooling. If the number of connections" + + " exceed this count, then the oldest idle connection is " + + "evicted." + ) private int maxSize; - private long staleThreshold; - private int maxOutstandingRequests; - - public long getStaleThreshold(TimeUnit unit) { - return unit.convert(staleThreshold, MILLISECONDS); - } @Config(key = "idle.threshold", type = ConfigType.TIME, timeUnit = MILLISECONDS, defaultValue = "10s", - tags = { OZONE, PERFORMANCE }, + tags = {OZONE, PERFORMANCE}, description = "In the standalone pipelines, the SCM clients use netty to " - + " communicate with the container. It also uses connection pooling" - + " to reduce client side overheads. This allows a connection to" - + " stay idle for a while before the connection is closed." + + " communicate with the container. It also uses connection " + + "pooling" + + " to reduce client side overheads. This allows a connection" + + " to" + + " stay idle for a while before the connection is closed." + ) + private long staleThreshold; + + @Config(key = "max.outstanding.requests", + defaultValue = "100", + tags = {OZONE, PERFORMANCE}, + description = + "Controls the maximum number of outstanding async requests that can" + + " be handled by the Standalone as well as Ratis client." ) - public void setStaleThreshold(long staleThreshold) { - this.staleThreshold = staleThreshold; + private int maxOutstandingRequests; + + public long getStaleThreshold(TimeUnit unit) { + return unit.convert(staleThreshold, MILLISECONDS); } + public int getMaxSize() { return maxSize; } - @Config(key = "max.size", - defaultValue = "256", - tags = { OZONE, PERFORMANCE }, - description = - "Controls the maximum number of connections that are cached via" - + " client connection pooling. If the number of connections" - + " exceed this count, then the oldest idle connection is evicted." - ) + @VisibleForTesting public void setMaxSize(int maxSize) { this.maxSize = maxSize; } @@ -339,16 +349,6 @@ public int getMaxOutstandingRequests() { return maxOutstandingRequests; } - @Config(key = "max.outstanding.requests", - defaultValue = "100", - tags = { OZONE, PERFORMANCE }, - description = - "Controls the maximum number of outstanding async requests that can" - + " be handled by the Standalone as well as Ratis client." - ) - public void setMaxOutstandingRequests(int maxOutstandingRequests) { - this.maxOutstandingRequests = maxOutstandingRequests; - } } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index c0486335cdd2..11018ee11a83 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.conf; +import javax.annotation.PostConstruct; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Unmarshaller; @@ -26,6 +27,7 @@ import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import java.io.IOException; +import java.lang.reflect.Field; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.net.URL; @@ -103,39 +105,77 @@ public List readPropertyFromXml(URL url) throws JAXBException { */ public T getObject(Class configurationClass) { - T configuration; + T configObject; try { - configuration = configurationClass.newInstance(); + configObject = configurationClass.newInstance(); } catch (InstantiationException | IllegalAccessException e) { throw new ConfigurationException( "Configuration class can't be created: " + configurationClass, e); } ConfigGroup configGroup = configurationClass.getAnnotation(ConfigGroup.class); + String prefix = configGroup.prefix(); - for (Method setterMethod : configurationClass.getMethods()) { - if (setterMethod.isAnnotationPresent(Config.class)) { + injectConfiguration(configurationClass, configObject, prefix); - String methodLocation = - configurationClass + "." + setterMethod.getName(); + callPostConstruct(configurationClass, configObject); - Config configAnnotation = setterMethod.getAnnotation(Config.class); + return configObject; - String key = prefix + "." + configAnnotation.key(); + } - Class[] parameterTypes = setterMethod.getParameterTypes(); - if (parameterTypes.length != 1) { - throw new ConfigurationException( - "@Config annotation should be used on simple setter: " - + methodLocation); + private void injectConfiguration(Class configurationClass, + T configObject, String prefix) { + injectConfigurationToObject(configurationClass, configObject, prefix); + Class superClass = configurationClass.getSuperclass(); + while (superClass != null) { + injectConfigurationToObject(superClass, configObject, prefix); + superClass = superClass.getSuperclass(); + } + } + + private void callPostConstruct(Class configurationClass, + T configObject) { + for (Method method : configurationClass.getMethods()) { + if (method.isAnnotationPresent(PostConstruct.class)) { + try { + method.invoke(configObject); + } catch (IllegalAccessException ex) { + throw new IllegalArgumentException( + "@PostConstruct method in " + configurationClass + + " is not accessible"); + } catch (InvocationTargetException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else { + throw new IllegalArgumentException( + "@PostConstruct can't be executed on " + configurationClass + + " after configObject " + + "injection", e); + } } + } + } + } + + private void injectConfigurationToObject(Class configurationClass, + T configuration, String prefix) { + for (Field field : configurationClass.getDeclaredFields()) { + if (field.isAnnotationPresent(Config.class)) { + + String fieldLocation = + configurationClass + "." + field.getName(); + + Config configAnnotation = field.getAnnotation(Config.class); + + String key = prefix + "." + configAnnotation.key(); ConfigType type = configAnnotation.type(); if (type == ConfigType.AUTO) { - type = detectConfigType(parameterTypes[0], methodLocation); + type = detectConfigType(field.getType(), fieldLocation); } //Note: default value is handled by ozone-default.xml. Here we can @@ -143,37 +183,48 @@ public T getObject(Class configurationClass) { try { switch (type) { case STRING: - setterMethod.invoke(configuration, get(key)); + forcedFieldSet(field, configuration, get(key)); break; case INT: - setterMethod.invoke(configuration, - getInt(key, 0)); + forcedFieldSet(field, configuration, getInt(key, 0)); break; case BOOLEAN: - setterMethod.invoke(configuration, - getBoolean(key, false)); + forcedFieldSet(field, configuration, getBoolean(key, false)); break; case LONG: - setterMethod.invoke(configuration, - getLong(key, 0)); + forcedFieldSet(field, configuration, getLong(key, 0)); break; case TIME: - setterMethod.invoke(configuration, + forcedFieldSet(field, configuration, getTimeDuration(key, 0, configAnnotation.timeUnit())); break; default: throw new ConfigurationException( - "Unsupported ConfigType " + type + " on " + methodLocation); + "Unsupported ConfigType " + type + " on " + fieldLocation); } - } catch (InvocationTargetException | IllegalAccessException e) { + } catch (IllegalAccessException e) { throw new ConfigurationException( - "Can't inject configuration to " + methodLocation, e); + "Can't inject configuration to " + fieldLocation, e); } } } - return configuration; + } + /** + * Set the value of one field even if it's private. + */ + private void forcedFieldSet(Field field, T object, Object value) + throws IllegalAccessException { + boolean accessChanged = false; + if (!field.isAccessible()) { + field.setAccessible(true); + accessChanged = true; + } + field.set(object, value); + if (accessChanged) { + field.setAccessible(false); + } } private ConfigType detectConfigType(Class parameterType, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java index 3efa6cfe485f..c98546b47152 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java @@ -27,26 +27,30 @@ */ @ConfigGroup(prefix = "hdds.scm") public class ScmConfig { - private String principal; - private String keytab; @Config(key = "kerberos.principal", - type = ConfigType.STRING, - defaultValue = "", - tags = { ConfigTag.SECURITY, ConfigTag.OZONE }, - description = "This Kerberos principal is used by the SCM service." + type = ConfigType.STRING, + defaultValue = "", + tags = { ConfigTag.SECURITY, ConfigTag.OZONE }, + description = "This Kerberos principal is used by the SCM service." + ) + private String principal; + + @Config(key = "kerberos.keytab.file", + type = ConfigType.STRING, + defaultValue = "", + tags = { ConfigTag.SECURITY, ConfigTag.OZONE }, + description = "The keytab file used by SCM daemon to login as "+ + "its service principal." ) + private String keytab; + + public void setKerberosPrincipal(String kerberosPrincipal) { this.principal = kerberosPrincipal; } - @Config(key = "kerberos.keytab.file", - type = ConfigType.STRING, - defaultValue = "", - tags = { ConfigTag.SECURITY, ConfigTag.OZONE }, - description = "The keytab file used by SCM daemon to login as "+ - "its service principal." - ) + public void setKerberosKeytab(String kerberosKeytab) { this.keytab = kerberosKeytab; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java index 63e355c0a0c5..31c6dbb81e09 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RocksDBConfiguration.java @@ -29,13 +29,29 @@ @ConfigGroup(prefix = "hadoop.hdds.db") public class RocksDBConfiguration { - private boolean rocksdbLogEnabled; - @Config(key = "rocksdb.logging.enabled", type = ConfigType.BOOLEAN, defaultValue = "false", tags = {ConfigTag.OM}, description = "Enable/Disable RocksDB logging for OM.") + private boolean rocksdbLogEnabled; + + @Config(key = "rocksdb.logging.level", + type = ConfigType.STRING, + defaultValue = "INFO", + tags = {ConfigTag.OM}, + description = "OM RocksDB logging level (INFO/DEBUG/WARN/ERROR/FATAL)") + private String rocksdbLogLevel; + + @Config(key = "rocksdb.writeoption.sync", + type = ConfigType.BOOLEAN, + defaultValue = "false", + tags = {ConfigTag.OM}, + description = "Enable/Disable Sync option. If true write will be " + + "considered complete, once flushed to persistent storage. If false," + + " writes are flushed asynchronously.") + private boolean syncOption; + public void setRocksdbLoggingEnabled(boolean enabled) { this.rocksdbLogEnabled = enabled; } @@ -44,13 +60,6 @@ public boolean isRocksdbLoggingEnabled() { return rocksdbLogEnabled; } - private String rocksdbLogLevel; - - @Config(key = "rocksdb.logging.level", - type = ConfigType.STRING, - defaultValue = "INFO", - tags = {ConfigTag.OM}, - description = "OM RocksDB logging level (INFO/DEBUG/WARN/ERROR/FATAL)") public void setRocksdbLogLevel(String level) { this.rocksdbLogLevel = level; } @@ -59,14 +68,6 @@ public String getRocksdbLogLevel() { return rocksdbLogLevel; } - private boolean syncOption; - @Config(key = "rocksdb.writeoption.sync", - type = ConfigType.BOOLEAN, - defaultValue = "false", - tags = {ConfigTag.OM}, - description = "Enable/Disable Sync option. If true write will be " + - "considered complete, once flushed to persistent storage. If false," + - " writes are flushed asynchronously.") public void setSyncOption(boolean enabled) { this.syncOption = enabled; } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java index 96674032abd0..a7f7b8eb0316 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfiguration.java @@ -17,51 +17,60 @@ */ package org.apache.hadoop.hdds.conf; +import javax.annotation.PostConstruct; import java.util.concurrent.TimeUnit; /** * Example configuration to test the configuration injection. */ @ConfigGroup(prefix = "test.scm.client") -public class SimpleConfiguration { +public class SimpleConfiguration extends SimpleConfigurationParent { + @Config(key = "address", defaultValue = "localhost", description = "Client " + + "addres (To test string injection).", tags = ConfigTag.MANAGEMENT) private String clientAddress; + @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Bind " + + "host(To test string injection).", tags = ConfigTag.MANAGEMENT) private String bindHost; - private boolean enabled; + @Config(key = "compression.enabled", defaultValue = "true", description = + "Compression enabled. (Just to test boolean flag)", tags = + ConfigTag.MANAGEMENT) + private boolean compressionEnabled; - private int port = 1234; + @Config(key = "port", defaultValue = "9878", description = "Port number " + + "config (To test in injection)", tags = ConfigTag.MANAGEMENT) + private int port; + @Config(key = "wait", type = ConfigType.TIME, timeUnit = + TimeUnit.SECONDS, defaultValue = "30m", description = "Wait time (To " + + "test TIME config type)", tags = ConfigTag.MANAGEMENT) private long waitTime = 1; - @Config(key = "address", defaultValue = "localhost", description = "Just " - + "for testing", tags = ConfigTag.MANAGEMENT) + @PostConstruct + public void validate() { + if (port < 0) { + throw new NumberFormatException("Please use a postitive port number"); + } + } + public void setClientAddress(String clientAddress) { this.clientAddress = clientAddress; } - @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Just " - + "for testing", tags = ConfigTag.MANAGEMENT) public void setBindHost(String bindHost) { this.bindHost = bindHost; } - @Config(key = "enabled", defaultValue = "true", description = "Just for " - + "testing", tags = ConfigTag.MANAGEMENT) - public void setEnabled(boolean enabled) { - this.enabled = enabled; + public void setCompressionEnabled(boolean compressionEnabled) { + this.compressionEnabled = compressionEnabled; } - @Config(key = "port", defaultValue = "9878", description = "Just for " - + "testing", tags = ConfigTag.MANAGEMENT) public void setPort(int port) { this.port = port; } - @Config(key = "wait", type = ConfigType.TIME, timeUnit = - TimeUnit.SECONDS, defaultValue = "10m", description = "Just for " - + "testing", tags = ConfigTag.MANAGEMENT) public void setWaitTime(long waitTime) { this.waitTime = waitTime; } @@ -74,8 +83,8 @@ public String getBindHost() { return bindHost; } - public boolean isEnabled() { - return enabled; + public boolean isCompressionEnabled() { + return compressionEnabled; } public int getPort() { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfigurationParent.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfigurationParent.java new file mode 100644 index 000000000000..92f4c12b88ff --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/SimpleConfigurationParent.java @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.conf; + +/** + * Parent class for the example configuration. + */ +public class SimpleConfigurationParent { + + @Config(key = "enabled", defaultValue = "true", description = "Example " + + "boolean config.", tags = ConfigTag.MANAGEMENT) + private boolean enabled; + + public boolean isEnabled() { + return enabled; + } +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java index 46cd2f65a554..ee724e2ef70b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java @@ -128,6 +128,15 @@ public void getConfigurationObjectWithDefault() { Assert.assertEquals(9878, configuration.getPort()); } + @Test(expected = NumberFormatException.class) + public void postConstructValidation() { + OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); + ozoneConfiguration.setInt("test.scm.client.port", -3); + + SimpleConfiguration configuration = + ozoneConfiguration.getObject(SimpleConfiguration.class); + } + private void appendProperty(BufferedWriter out, String name, String val) throws IOException { diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java index 70aa58d54175..316c867e9944 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java @@ -27,7 +27,7 @@ * Mark field to be configurable from ozone-site.xml. */ @Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.METHOD) +@Target({ElementType.FIELD}) public @interface Config { /** diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java index 471b679f8452..f3d71be7d12f 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigFileGenerator.java @@ -24,6 +24,8 @@ import javax.lang.model.element.Element; import javax.lang.model.element.ElementKind; import javax.lang.model.element.TypeElement; +import javax.lang.model.type.DeclaredType; +import javax.lang.model.util.SimpleTypeVisitor8; import javax.tools.Diagnostic.Kind; import javax.tools.FileObject; import javax.tools.StandardLocation; @@ -44,6 +46,15 @@ public class ConfigFileGenerator extends AbstractProcessor { public static final String OUTPUT_FILE_NAME = "ozone-default-generated.xml"; + private static final SimpleTypeVisitor8 GET_PARENT_ELEMENT = + new SimpleTypeVisitor8() { + @Override + public Element visitDeclared(DeclaredType t, Void aVoid) { + return t.asElement(); + } + }; + + @Override public boolean process(Set annotations, RoundEnvironment roundEnv) { @@ -68,32 +79,26 @@ public boolean process(Set annotations, Set annotatedElements = roundEnv.getElementsAnnotatedWith(ConfigGroup.class); for (Element annotatedElement : annotatedElements) { - TypeElement configGroup = (TypeElement) annotatedElement; - - //check if any of the setters are annotated with @Config - for (Element element : configGroup.getEnclosedElements()) { - if (element.getKind() == ElementKind.METHOD) { - processingEnv.getMessager() - .printMessage(Kind.WARNING, element.getSimpleName().toString()); - if (element.getSimpleName().toString().startsWith("set") - && element.getAnnotation(Config.class) != null) { - - //update the ozone-site-generated.xml - Config configAnnotation = element.getAnnotation(Config.class); - ConfigGroup configGroupAnnotation = - configGroup.getAnnotation(ConfigGroup.class); - - String key = configGroupAnnotation.prefix() + "." - + configAnnotation.key(); - - appender.addConfig(key, - configAnnotation.defaultValue(), - configAnnotation.description(), - configAnnotation.tags()); - } + TypeElement configurationObject = (TypeElement) annotatedElement; + + ConfigGroup configGroupAnnotation = + configurationObject.getAnnotation(ConfigGroup.class); + + TypeElement elementToCheck = configurationObject; + while (elementToCheck != null) { + + writeConfigAnnotations(configGroupAnnotation, appender, + elementToCheck); + if (!elementToCheck.getSuperclass().toString() + .equals("java.lang.Object")) { + elementToCheck = + (TypeElement) elementToCheck.getSuperclass() + .accept(GET_PARENT_ELEMENT, null); + } else { + elementToCheck = null; } - } + } FileObject resource = filer .createResource(StandardLocation.CLASS_OUTPUT, "", @@ -111,4 +116,28 @@ public boolean process(Set annotations, return false; } + private void writeConfigAnnotations(ConfigGroup configGroup, + ConfigFileAppender appender, + TypeElement typeElement) { + //check if any of the setters are annotated with @Config + for (Element element : typeElement.getEnclosedElements()) { + if (element.getKind() == ElementKind.FIELD) { + if (element.getAnnotation(Config.class) != null) { + + //update the ozone-site-generated.xml + Config configAnnotation = element.getAnnotation(Config.class); + + String key = configGroup.prefix() + "." + + configAnnotation.key(); + + appender.addConfig(key, + configAnnotation.defaultValue(), + configAnnotation.description(), + configAnnotation.tags()); + } + } + + } + } + } diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java index 2dd26696b276..c61ee497fe93 100644 --- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java +++ b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExample.java @@ -23,46 +23,46 @@ * Example configuration to test the configuration injection. */ @ConfigGroup(prefix = "ozone.scm.client") -public class ConfigurationExample { +public class ConfigurationExample extends ConfigurationExampleParent { + @Config(key = "address", defaultValue = "localhost", description = "Client " + + "addres (To test string injection).", tags = ConfigTag.MANAGEMENT) private String clientAddress; + @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Bind " + + "host(To test string injection).", tags = ConfigTag.MANAGEMENT) private String bindHost; + @Config(key = "compression.enabled", defaultValue = "true", description = + "Compression enabled. (Just to test boolean flag)", tags = + ConfigTag.MANAGEMENT) private boolean compressionEnabled; + @Config(key = "port", defaultValue = "1234", description = "Port number " + + "config (To test in injection)", tags = ConfigTag.MANAGEMENT) private int port = 1234; + @Config(key = "wait", type = ConfigType.TIME, timeUnit = + TimeUnit.SECONDS, defaultValue = "30m", description = "Wait time (To " + + "test TIME config type)", tags = ConfigTag.MANAGEMENT) private long waitTime = 1; - @Config(key = "address", defaultValue = "localhost", description = "Client " - + "addres (To test string injection).", tags = ConfigTag.MANAGEMENT) public void setClientAddress(String clientAddress) { this.clientAddress = clientAddress; } - @Config(key = "bind.host", defaultValue = "0.0.0.0", description = "Bind " - + "host(To test string injection).", tags = ConfigTag.MANAGEMENT) public void setBindHost(String bindHost) { this.bindHost = bindHost; } - @Config(key = "compression.enabled", defaultValue = "true", description = - "Compression enabled. (Just to test boolean flag)", tags = - ConfigTag.MANAGEMENT) public void setCompressionEnabled(boolean compressionEnabled) { this.compressionEnabled = compressionEnabled; } - @Config(key = "port", defaultValue = "1234", description = "Port number " - + "config (To test in injection)", tags = ConfigTag.MANAGEMENT) public void setPort(int port) { this.port = port; } - @Config(key = "wait", type = ConfigType.TIME, timeUnit = - TimeUnit.SECONDS, defaultValue = "30m", description = "Wait time (To " - + "test TIME config type)", tags = ConfigTag.MANAGEMENT) public void setWaitTime(long waitTime) { this.waitTime = waitTime; } diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExampleGrandParent.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExampleGrandParent.java new file mode 100644 index 000000000000..f6007c2221de --- /dev/null +++ b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExampleGrandParent.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.conf; + +/** + * Example configuration to test inherited configuration injection. + */ +public class ConfigurationExampleGrandParent { + + @Config(key = "number", defaultValue = "2", description = "Example numeric " + + "configuration", tags = ConfigTag.MANAGEMENT) + private int number = 1; + + public int getNumber() { + return number; + } + + public void setNumber(int number) { + this.number = number; + } +} diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExampleParent.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExampleParent.java new file mode 100644 index 000000000000..ab586a3b2c40 --- /dev/null +++ b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/ConfigurationExampleParent.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.conf; + +/** + * Example configuration to test inherited configuration injection. + */ +public class ConfigurationExampleParent + extends ConfigurationExampleGrandParent { + + @Config(key = "secure", defaultValue = "true", description = "Make " + + "everything secure.", tags = ConfigTag.MANAGEMENT) + private boolean secure = true; + + public boolean isSecure() { + return secure; + } + + public void setSecure(boolean secure) { + this.secure = secure; + } +} diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java new file mode 100644 index 000000000000..b2dc99f361d8 --- /dev/null +++ b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.conf; + +import java.io.File; +import java.io.FileNotFoundException; +import java.util.Scanner; + +import org.junit.Assert; +import org.junit.Test; + +/** + * Test the ConfigFileGenerator. + *

+ * ConfigFileGenerator is an annotation processor and activated for the + * testCompile. Therefore in the unit test we can check the content of the + * generated ozone-default-generated.xml + */ +public class TestConfigFileGenerator { + + @Test + public void testGeneratedXml() throws FileNotFoundException { + String generatedXml = + new Scanner(new File("target/test-classes/ozone-default-generated.xml")) + .useDelimiter("//Z") + .next(); + + Assert.assertTrue( + "Generated config should have entry based on the annotation", + generatedXml.contains("ozone.scm.client.bind.host")); + + Assert.assertTrue( + "Generated config should have entry based on the annotation from the " + + "parent class", + generatedXml.contains("ozone.scm.client.secure")); + + Assert.assertTrue( + "Generated config should have entry based on the annotation from the " + + "grand-parent class.", + generatedXml.contains("ozone.scm.client.number")); + + Assert.assertTrue("Generated config should contain tags", + generatedXml.contains("MANAGEMENT")); + } +} \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java index 3d2a834a7083..0d6a99e2a48a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java @@ -16,6 +16,8 @@ */ package org.apache.hadoop.ozone.container.common.statemachine; +import javax.annotation.PostConstruct; + import org.apache.hadoop.hdds.conf.Config; import org.apache.hadoop.hdds.conf.ConfigGroup; import static org.apache.hadoop.hdds.conf.ConfigTag.DATANODE; @@ -35,15 +37,6 @@ public class DatanodeConfiguration { * The maximum number of replication commands a single datanode can execute * simultaneously. */ - private final int replicationMaxStreamsDefault = 10; - private int replicationMaxStreams = replicationMaxStreamsDefault; - /** - * The maximum number of threads used to delete containers on a datanode - * simultaneously. - */ - private final int containerDeleteThreadsDefault = 2; - private int containerDeleteThreads = containerDeleteThreadsDefault; - @Config(key = "replication.streams.limit", type = ConfigType.INT, defaultValue = "10", @@ -51,21 +44,13 @@ public class DatanodeConfiguration { description = "The maximum number of replication commands a single " + "datanode can execute simultaneously" ) - public void setReplicationMaxStreams(int val) { - if (val < 1) { - LOG.warn("hdds.datanode.replication.streams.limit must be greater than" + - "zero and was set to {}. Defaulting to {}", - val, replicationMaxStreamsDefault); - replicationMaxStreams = replicationMaxStreamsDefault; - } else { - this.replicationMaxStreams = val; - } - } - - public int getReplicationMaxStreams() { - return replicationMaxStreams; - } + private final int replicationMaxStreamsDefault = 10; + private int replicationMaxStreams = replicationMaxStreamsDefault; + /** + * The maximum number of threads used to delete containers on a datanode + * simultaneously. + */ @Config(key = "container.delete.threads.max", type = ConfigType.INT, defaultValue = "2", @@ -73,17 +58,39 @@ public int getReplicationMaxStreams() { description = "The maximum number of threads used to delete containers " + "on a datanode" ) - public void setContainerDeleteThreads(int val) { - if (val < 1) { + private final int containerDeleteThreadsDefault = 2; + + private int containerDeleteThreads = containerDeleteThreadsDefault; + + @PostConstruct + public void validate() { + if (replicationMaxStreamsDefault < 1) { + LOG.warn("hdds.datanode.replication.streams.limit must be greater than" + + "zero and was set to {}. Defaulting to {}", + replicationMaxStreamsDefault, replicationMaxStreamsDefault); + replicationMaxStreams = replicationMaxStreamsDefault; + } + + if (containerDeleteThreads < 1) { LOG.warn("hdds.datanode.container.delete.threads.max must be greater " + - "than zero and was set to {}. Defaulting to {}", - val, containerDeleteThreadsDefault); + "than zero and was set to {}. Defaulting to {}", + containerDeleteThreads, containerDeleteThreadsDefault); containerDeleteThreads = containerDeleteThreadsDefault; - } else { - this.containerDeleteThreads = val; } } + public void setReplicationMaxStreams(int replicationMaxStreams) { + this.replicationMaxStreams = replicationMaxStreams; + } + + public void setContainerDeleteThreads(int containerDeleteThreads) { + this.containerDeleteThreads = containerDeleteThreads; + } + + public int getReplicationMaxStreams() { + return replicationMaxStreams; + } + public int getContainerDeleteThreads() { return containerDeleteThreads; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java index 7f112eacd81c..d4ed820d5c9b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/RatisServerConfiguration.java @@ -29,14 +29,14 @@ @ConfigGroup(prefix = "hdds.ratis.server") public class RatisServerConfiguration { - private int numSnapshotsRetained; - @Config(key = "num.snapshots.retained", type = ConfigType.INT, defaultValue = "5", tags = {ConfigTag.STORAGE}, description = "Config parameter to specify number of old snapshots " + "retained at the Ratis leader.") + private int numSnapshotsRetained; + public void setNumSnapshotsRetained(int numSnapshotsRetained) { this.numSnapshotsRetained = numSnapshotsRetained; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java index 454ce84310aa..c7c55de45e1f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerScrubberConfiguration.java @@ -27,23 +27,13 @@ **/ @ConfigGroup(prefix = "hdds.containerscrub") public class ContainerScrubberConfiguration { - private boolean enabled; - private long metadataScanInterval; - private long dataScanInterval; - private long bandwidthPerVolume; @Config(key = "enabled", type = ConfigType.BOOLEAN, defaultValue = "false", tags = {ConfigTag.STORAGE}, description = "Config parameter to enable container scrubber.") - public void setEnabled(boolean enabled) { - this.enabled = enabled; - } - - public boolean isEnabled() { - return enabled; - } + private boolean enabled; @Config(key = "metadata.scan.interval", type = ConfigType.TIME, @@ -51,29 +41,17 @@ public boolean isEnabled() { tags = {ConfigTag.STORAGE}, description = "Config parameter define time interval in milliseconds" + " between two metadata scans by container scrubber.") - public void setMetadataScanInterval(long metadataScanInterval) { - this.metadataScanInterval = metadataScanInterval; - } - - public long getMetadataScanInterval() { - return metadataScanInterval; - } + private long metadataScanInterval; @Config(key = "data.scan.interval", type = ConfigType.TIME, defaultValue = "1m", - tags = { ConfigTag.STORAGE }, + tags = {ConfigTag.STORAGE}, description = "Minimum time interval between two iterations of container" + " data scanning. If an iteration takes less time than this, the" + " scanner will wait before starting the next iteration." ) - public void setDataScanInterval(long dataScanInterval) { - this.dataScanInterval = dataScanInterval; - } - - public long getDataScanInterval() { - return dataScanInterval; - } + private long dataScanInterval; @Config(key = "volume.bytes.per.second", type = ConfigType.LONG, @@ -81,8 +59,32 @@ public long getDataScanInterval() { tags = {ConfigTag.STORAGE}, description = "Config parameter to throttle I/O bandwidth used" + " by scrubber per volume.") - public void setBandwidthPerVolume(long bandwidthPerVolume) { - this.bandwidthPerVolume = bandwidthPerVolume; + + private long bandwidthPerVolume; + + + public void setEnabled(boolean enabled) { + this.enabled = enabled; + } + + public boolean isEnabled() { + return enabled; + } + + public void setMetadataScanInterval(long metadataScanInterval) { + this.metadataScanInterval = metadataScanInterval; + } + + public long getMetadataScanInterval() { + return metadataScanInterval; + } + + public void setDataScanInterval(long dataScanInterval) { + this.dataScanInterval = dataScanInterval; + } + + public long getDataScanInterval() { + return dataScanInterval; } public long getBandwidthPerVolume() { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java index 37afd36da019..bfa411d30973 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java @@ -793,14 +793,6 @@ public static class ReplicationManagerConfiguration { /** * The frequency in which ReplicationMonitor thread should run. */ - private long interval = 5 * 60 * 1000; - - /** - * Timeout for container replication & deletion command issued by - * ReplicationManager. - */ - private long eventTimeout = 10 * 60 * 1000; - @Config(key = "thread.interval", type = ConfigType.TIME, defaultValue = "300s", @@ -810,10 +802,12 @@ public static class ReplicationManagerConfiguration { "cluster. This property is used to configure the interval in " + "which that thread runs." ) - public void setInterval(long interval) { - this.interval = interval; - } + private long interval = 5 * 60 * 1000; + /** + * Timeout for container replication & deletion command issued by + * ReplicationManager. + */ @Config(key = "event.timeout", type = ConfigType.TIME, defaultValue = "10m", @@ -821,6 +815,14 @@ public void setInterval(long interval) { description = "Timeout for the container replication/deletion commands " + "sent to datanodes. After this timeout the command will be " + "retried.") + private long eventTimeout = 10 * 60 * 1000; + + + public void setInterval(long interval) { + this.interval = interval; + } + + public void setEventTimeout(long eventTimeout) { this.eventTimeout = eventTimeout; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java index c3cfcadc9c3c..18eb8b35f422 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMHTTPServerConfig.java @@ -32,27 +32,28 @@ @ConfigGroup(prefix = "hdds.scm.http") public class SCMHTTPServerConfig { - private String principal = ""; - private String keytab = ""; - @Config(key = "kerberos.principal", type = ConfigType.STRING, defaultValue = "", - tags = { ConfigTag.SECURITY }, + tags = {ConfigTag.SECURITY}, description = "This Kerberos principal is used when communicating to " + - "the HTTP server of SCM.The protocol used is SPNEGO." + "the HTTP server of SCM.The protocol used is SPNEGO." ) - public void setKerberosPrincipal(String kerberosPrincipal) { - this.principal = kerberosPrincipal; - } + private String principal = ""; @Config(key = "kerberos.keytab", type = ConfigType.STRING, defaultValue = "", - tags = { ConfigTag.SECURITY }, + tags = {ConfigTag.SECURITY}, description = "The keytab file used by SCM http server to login" + - " as its service principal." + " as its service principal." ) + private String keytab = ""; + + public void setKerberosPrincipal(String kerberosPrincipal) { + this.principal = kerberosPrincipal; + } + public void setKerberosKeytab(String kerberosKeytab) { this.keytab = kerberosKeytab; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java index b51af56a4bb1..246756e0e4f9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneAclConfig.java @@ -28,10 +28,8 @@ * */ @ConfigGroup(prefix = "ozone.om") public class OzoneAclConfig { - // OM Default user/group permissions - private ACLType userDefaultRights = ACLType.ALL; - private ACLType groupDefaultRights = ACLType.ALL; + // OM Default user/group permissions @Config(key = "user.rights", defaultValue = "ALL", type = ConfigType.STRING, @@ -39,12 +37,7 @@ public class OzoneAclConfig { description = "Default user permissions set for an object in " + "OzoneManager." ) - public void setUserDefaultRights(String userRights) { - if(userRights == null) { - userRights = "ALL"; - } - this.userDefaultRights = ACLType.valueOf(userRights); - } + private String userDefaultRights; @Config(key = "group.rights", defaultValue = "ALL", @@ -53,19 +46,17 @@ public void setUserDefaultRights(String userRights) { description = "Default group permissions set for an object in " + "OzoneManager." ) - public void setGroupDefaultRights(String groupRights) { - if(groupRights == null) { - groupRights = "ALL"; - } - this.groupDefaultRights = ACLType.valueOf(groupRights); - } + private String groupDefaultRights; public ACLType getUserDefaultRights() { - return userDefaultRights; + return ACLType.valueOf(userDefaultRights); } public ACLType getGroupDefaultRights() { - return groupDefaultRights; + if (groupDefaultRights == null) { + return ACLType.ALL; + } + return ACLType.valueOf(groupDefaultRights); } } diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java index df5127c47b5d..443008ac8050 100644 --- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java +++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java @@ -94,18 +94,28 @@ public static void main(String[] args) { */ @ConfigGroup(prefix = "ozone.csi") public static class CsiConfig { + + @Config(key = "socket", + defaultValue = "/var/lib/csi.sock", + description = + "The socket where all the CSI services will listen (file name).", + tags = ConfigTag.STORAGE) private String socketPath; - private long defaultVolumeSize; - private String s3gAddress; - private String volumeOwner; - public String getSocketPath() { - return socketPath; - } + @Config(key = "default-volume-size", + defaultValue = "1000000000", + description = + "The default size of the create volumes (if not specified).", + tags = ConfigTag.STORAGE) + private long defaultVolumeSize; - public String getVolumeOwner() { - return volumeOwner; - } + @Config(key = "s3g.address", + defaultValue = "http://localhost:9878", + description = + "The defaul t size of the created volumes (if not specified in the" + + " requests).", + tags = ConfigTag.STORAGE) + private String s3gAddress; @Config(key = "owner", defaultValue = "", @@ -117,15 +127,22 @@ public String getVolumeOwner() { + "ALL the users can request the mount of a specific bucket " + "via the CSI interface.", tags = ConfigTag.STORAGE) + private String volumeOwner; + + public String getSocketPath() { + return socketPath; + } + + public String getVolumeOwner() { + return volumeOwner; + } + + public void setVolumeOwner(String volumeOwner) { this.volumeOwner = volumeOwner; } - @Config(key = "socket", - defaultValue = "/var/lib/csi.sock", - description = - "The socket where all the CSI services will listen (file name).", - tags = ConfigTag.STORAGE) + public void setSocketPath(String socketPath) { this.socketPath = socketPath; } @@ -134,11 +151,7 @@ public long getDefaultVolumeSize() { return defaultVolumeSize; } - @Config(key = "default-volume-size", - defaultValue = "1000000000", - description = - "The default size of the create volumes (if not specified).", - tags = ConfigTag.STORAGE) + public void setDefaultVolumeSize(long defaultVolumeSize) { this.defaultVolumeSize = defaultVolumeSize; } @@ -147,12 +160,6 @@ public String getS3gAddress() { return s3gAddress; } - @Config(key = "s3g.address", - defaultValue = "http://localhost:9878", - description = - "The default size of the created volumes (if not specified in the" - + " requests).", - tags = ConfigTag.STORAGE) public void setS3gAddress(String s3gAddress) { this.s3gAddress = s3gAddress; }