diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java index 480ce0ed5fb20..0f6ad94719634 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java @@ -121,7 +121,7 @@ public static void main(String[] args) throws Exception { * After this call is made, no operations may be made of this * object, or of a YARN registry instance used when constructing * this object. - * @throws IOException + * @throws IOException IO problems */ @Override public void close() throws IOException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java index 5fd2aef5b5845..6e5c61435cafa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/BindFlags.java @@ -29,7 +29,7 @@ public interface BindFlags { /** - * Create the entry.. This is just "0" and can be "or"ed with anything + * Create the entry.. This is just "0" and can be "or"ed with anything. */ int CREATE = 0; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java index a6fe216ec91ff..b726c972e2d0e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java @@ -32,12 +32,12 @@ public interface RegistryConstants { /** * prefix for registry configuration options: {@value}. * Why hadoop. and not YARN? It can - * live outside YARN + * live outside YARN. */ String REGISTRY_PREFIX = "hadoop.registry."; /** - * Prefix for zookeeper-specific options: {@value} + * Prefix for zookeeper-specific options: {@value}. *

* For clients using other protocols, these options are not supported. */ @@ -45,12 +45,12 @@ public interface RegistryConstants { /** * flag to indicate whether or not the registry should - * be enabled in the RM: {@value} + * be enabled in the RM: {@value}. */ String KEY_REGISTRY_ENABLED = REGISTRY_PREFIX + "rm.enabled"; /** - * Defaut value for enabling the registry in the RM: {@value} + * Default value for enabling the registry in the RM: {@value}. */ boolean DEFAULT_REGISTRY_ENABLED = false; @@ -69,12 +69,12 @@ public interface RegistryConstants { boolean DEFAULT_REGISTRY_SECURE = false; /** - * Root path in the ZK tree for the registry: {@value} + * Root path in the ZK tree for the registry: {@value}. */ String KEY_REGISTRY_ZK_ROOT = ZK_PREFIX + "root"; /** - * Default root of the yarn registry: {@value} + * Default root of the yarn registry: {@value}. */ String DEFAULT_ZK_REGISTRY_ROOT = "/registry"; @@ -92,7 +92,7 @@ public interface RegistryConstants { /** * Registry client uses Kerberos: authentication is automatic from - * logged in user + * logged in user. */ String REGISTRY_CLIENT_AUTH_KERBEROS = "kerberos"; @@ -104,12 +104,12 @@ public interface RegistryConstants { String REGISTRY_CLIENT_AUTH_DIGEST = "digest"; /** - * No authentication; client is anonymous + * No authentication; client is anonymous. */ String REGISTRY_CLIENT_AUTH_ANONYMOUS = ""; /** - * Registry client authentication ID + * Registry client authentication ID. *

* This is only used in secure clusters with * {@link #KEY_REGISTRY_CLIENT_AUTH} set to @@ -134,24 +134,24 @@ public interface RegistryConstants { /** * List of hostname:port pairs defining the - * zookeeper quorum binding for the registry {@value} + * zookeeper quorum binding for the registry {@value}. */ String KEY_REGISTRY_ZK_QUORUM = ZK_PREFIX + "quorum"; /** - * The default zookeeper quorum binding for the registry: {@value} + * The default zookeeper quorum binding for the registry: {@value}. */ String DEFAULT_REGISTRY_ZK_QUORUM = "localhost:2181"; /** - * Zookeeper session timeout in milliseconds: {@value} + * Zookeeper session timeout in milliseconds: {@value}. */ String KEY_REGISTRY_ZK_SESSION_TIMEOUT = ZK_PREFIX + "session.timeout.ms"; /** - * The default ZK session timeout: {@value}. - */ + * The default ZK session timeout: {@value}. + */ int DEFAULT_ZK_SESSION_TIMEOUT = 60000; /** @@ -224,9 +224,9 @@ public interface RegistryConstants { /** * A comma separated list of Zookeeper ACL identifiers with - * system access to the registry in a secure cluster: {@value}. + * user access to the registry in a secure cluster: {@value}. * - * These are given full access to all entries. + * These are given full access to entries under the user account. * * If there is an "@" at the end of an entry it * instructs the registry client to append the default kerberos domain. @@ -259,7 +259,7 @@ public interface RegistryConstants { String KEY_REGISTRY_CLIENT_JAAS_CONTEXT = REGISTRY_PREFIX + "jaas.context"; /** - * default client-side registry JAAS context: {@value} + * default client-side registry JAAS context: {@value}. */ String DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT = "Client"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java index c51bcf7465d7e..1cbfaeb7a6332 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperations.java @@ -34,7 +34,7 @@ import java.util.List; /** - * Registry Operations + * Registry Operations. */ @InterfaceAudience.Public @InterfaceStability.Evolving @@ -63,7 +63,7 @@ boolean mknode(String path, boolean createParents) IOException; /** - * Bind a path in the registry to a service record + * Bind a path in the registry to a service record. * @param path path to service record * @param record service record service record to create/update * @param flags bind flags @@ -80,7 +80,7 @@ void bind(String path, ServiceRecord record, int flags) IOException; /** - * Resolve the record at a path + * Resolve the record at a path. * @param path path to an entry containing a {@link ServiceRecord} * @return the record * @throws PathNotFoundException path is not in the registry. @@ -97,7 +97,7 @@ ServiceRecord resolve(String path) IOException; /** - * Get the status of a path + * Get the status of a path. * @param path path to query * @return the status of the path * @throws PathNotFoundException path is not in the registry. @@ -115,7 +115,7 @@ RegistryPathStatus stat(String path) * any failure downgraded to a * @param path path to query * @return true if the path was found - * @throws IOException + * @throws IOException IO problems */ boolean exists(String path) throws IOException; @@ -125,9 +125,9 @@ RegistryPathStatus stat(String path) * @param path path to query * @return a possibly empty list of the short path names of * child entries. - * @throws PathNotFoundException - * @throws InvalidPathnameException - * @throws IOException + * @throws PathNotFoundException path is not in the registry. + * @throws InvalidPathnameException the path is invalid. + * @throws IOException IO problems */ List list(String path) throws PathNotFoundException, @@ -178,5 +178,5 @@ void delete(String path, boolean recursive) * Only accessors added via {@link #addWriteAccessor(String, String)} * are removed. */ - public void clearWriteAccessors(); + void clearWriteAccessors(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java index 443654df37eb9..bd7756a5f679b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java @@ -32,7 +32,7 @@ * Each created instance will be returned initialized. *

* That is, the service will have had Service.init(conf) applied - * to it —possibly after the configuration has been modified to + * to it, possibly after the configuration has been modified to * support the specific binding/security mechanism used */ public final class RegistryOperationsFactory { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java index eddff20e3498a..11a855697aba9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java @@ -54,7 +54,7 @@ * which use the mapper * @param Type to marshal. */ -@InterfaceAudience.Private() +@InterfaceAudience.Private @InterfaceStability.Evolving public class JsonSerDeser { @@ -69,7 +69,7 @@ public class JsonSerDeser { private final ObjectMapper mapper; /** - * Create an instance bound to a specific type + * Create an instance bound to a specific type. * @param classType class to marshall */ public JsonSerDeser(Class classType) { @@ -88,7 +88,7 @@ public String getName() { } /** - * Convert from JSON + * Convert from JSON. * * @param json input * @return the parsed JSON @@ -108,7 +108,7 @@ public synchronized T fromJson(String json) } /** - * Convert from a JSON file + * Convert from a JSON file. * @param jsonFile input file * @return the parsed JSON * @throws IOException IO problems @@ -121,13 +121,13 @@ public synchronized T fromFile(File jsonFile) try { return mapper.readValue(jsonFile, classType); } catch (IOException e) { - LOG.error("Exception while parsing json file {}: {}", jsonFile, e); + LOG.error("Exception while parsing json file {}", jsonFile, e); throw e; } } /** - * Convert from a JSON file + * Convert from a JSON file. * @param resource input file * @return the parsed JSON * @throws IOException IO problems @@ -164,7 +164,7 @@ public T fromInstance(T instance) throws IOException { } /** - * Load from a Hadoop filesystem + * Load from a Hadoop filesystem. * @param fs filesystem * @param path path * @return a loaded CD @@ -187,7 +187,7 @@ public T load(FileSystem fs, Path path) } /** - * Save a cluster description to a hadoop filesystem + * Save a cluster description to a hadoop filesystem. * @param fs filesystem * @param path path * @param overwrite should any existing file be overwritten @@ -201,8 +201,8 @@ public void save(FileSystem fs, Path path, T instance, } /** - * Write the json as bytes -then close the file - * @param dataOutputStream an outout stream that will always be closed + * Write the json as bytes -then close the file. + * @param dataOutputStream an output stream that will always be closed * @throws IOException on any failure */ private void writeJsonAsBytes(T instance, @@ -216,10 +216,10 @@ private void writeJsonAsBytes(T instance, } /** - * Convert JSON To bytes + * Convert JSON To bytes. * @param instance instance to convert * @return a byte array - * @throws IOException + * @throws IOException any problem serializing the instance. */ public byte[] toBytes(T instance) throws IOException { String json = toJson(instance); @@ -227,9 +227,10 @@ public byte[] toBytes(T instance) throws IOException { } /** - * Deserialize from a byte array + * Deserialize from a byte array. * @param path path the data came from * @param bytes byte array + * @return the deserialized instance * @throws IOException all problems * @throws EOFException not enough data * @throws InvalidRecordException if the parsing failed -the record is invalid @@ -279,7 +280,7 @@ public T fromBytes(String path, byte[] bytes, String marker) } /** - * Convert an instance to a JSON string + * Convert an instance to a JSON string. * @param instance instance to convert * @return a JSON string description * @throws JsonProcessingException Json generation problems diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java index 5d8ea3f5b15c6..849e394117b6d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java @@ -40,14 +40,14 @@ public class RegistryPathUtils { /** - * Compiled down pattern to validate single entries in the path + * Compiled down pattern to validate single entries in the path. */ private static final Pattern PATH_ENTRY_VALIDATION_PATTERN = Pattern.compile(RegistryInternalConstants.VALID_PATH_ENTRY_PATTERN); /** * Validate ZK path with the path itself included in - * the exception text + * the exception text. * @param path path to validate * @return the path parameter * @throws InvalidPathnameException if the pathname is invalid. @@ -83,7 +83,8 @@ public static String validateElementsAsDNS(String path) throws } /** - * Create a full path from the registry root and the supplied subdir + * Create a full path from the registry root and the supplied subdir. + * @param base root of the registry * @param path path of operation * @return an absolute path * @throws InvalidPathnameException if the path is invalid @@ -99,7 +100,7 @@ public static String createFullPath(String base, String path) throws * Join two paths, guaranteeing that there will not be exactly * one separator between the two, and exactly one at the front * of the path. There will be no trailing "/" except for the special - * case that this is the root path + * case that this is the root path. * @param base base path * @param path second path to add * @return a combined path. @@ -138,14 +139,14 @@ public static String join(String base, String path) { } /** - * split a path into elements, stripping empty elements + * split a path into elements, stripping empty elements. * @param path the path * @return the split path */ public static List split(String path) { // String[] pathelements = path.split("/"); - List dirs = new ArrayList(pathelements.length); + List dirs = new ArrayList<>(pathelements.length); for (String pathelement : pathelements) { if (!pathelement.isEmpty()) { dirs.add(pathelement); @@ -172,7 +173,7 @@ public static String lastPathEntry(String path) { } /** - * Get the parent of a path + * Get the parent of a path. * @param path path to look at * @return the parent path * @throws PathNotFoundException if the path was at root. @@ -198,7 +199,7 @@ public static String parentOf(String path) throws PathNotFoundException { /** * Perform any formatting for the registry needed to convert - * non-simple-DNS elements + * non-simple-DNS elements. * @param element element to encode * @return an encoded string */ @@ -208,7 +209,7 @@ public static String encodeForRegistry(String element) { /** * Perform whatever transforms are needed to get a YARN ID into - * a DNS-compatible name + * a DNS-compatible name. * @param yarnId ID as string of YARN application, instance or container * @return a string suitable for use in registry paths. */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java index ec59d5985a044..8cd18d5616c91 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java @@ -37,7 +37,7 @@ import java.util.Map; /** - * Static methods to work with registry types —primarily endpoints and the + * Static methods to work with registry types, primarily endpoints and the * list representation of addresses. */ @InterfaceAudience.Public @@ -45,7 +45,7 @@ public class RegistryTypeUtils { /** - * Create a URL endpoint from a list of URIs + * Create a URL endpoint from a list of URIs. * @param api implemented API * @param protocolType protocol type * @param uris URIs @@ -58,7 +58,7 @@ public static Endpoint urlEndpoint(String api, } /** - * Create a REST endpoint from a list of URIs + * Create a REST endpoint from a list of URIs. * @param api implemented API * @param uris URIs * @return a new endpoint @@ -69,7 +69,7 @@ public static Endpoint restEndpoint(String api, } /** - * Create a Web UI endpoint from a list of URIs + * Create a Web UI endpoint from a list of URIs. * @param api implemented API * @param uris URIs * @return a new endpoint @@ -80,7 +80,7 @@ public static Endpoint webEndpoint(String api, } /** - * Create an internet address endpoint from a list of URIs + * Create an internet address endpoint from a list of URIs. * @param api implemented API * @param protocolType protocol type * @param hostname hostname/FQDN @@ -102,7 +102,7 @@ public static Endpoint inetAddrEndpoint(String api, } /** - * Create an IPC endpoint + * Create an IPC endpoint. * @param api API * @param address the address as a tuple of (hostname, port) * @return the new endpoint @@ -115,19 +115,19 @@ public static Endpoint ipcEndpoint(String api, InetSocketAddress address) { } /** - * Create a single entry map + * Create a single entry map. * @param key map entry key * @param val map entry value * @return a 1 entry map. */ public static Map map(String key, String val) { - Map map = new HashMap(1); + Map map = new HashMap<>(1); map.put(key, val); return map; } /** - * Create a URI + * Create a URI. * @param uri value * @return a 1 entry map. */ @@ -136,7 +136,7 @@ public static Map uri(String uri) { } /** - * Create a (hostname, port) address pair + * Create a (hostname, port) address pair. * @param hostname hostname * @param port port * @return a 1 entry map. @@ -149,7 +149,7 @@ public static Map hostnamePortPair(String hostname, int port) { } /** - * Create a (hostname, port) address pair + * Create a (hostname, port) address pair. * @param address socket address whose hostname and port are used for the * generated address. * @return a 1 entry map. @@ -159,7 +159,7 @@ public static Map hostnamePortPair(InetSocketAddress address) { } /** - * Require a specific address type on an endpoint + * Require a specific address type on an endpoint. * @param required required type * @param epr endpoint * @throws InvalidRecordException if the type is wrong @@ -176,10 +176,10 @@ public static void requireAddressType(String required, Endpoint epr) throws } /** - * Get a single URI endpoint + * Get a single URI endpoint. * @param epr endpoint - * @return the uri of the first entry in the address list. Null if the endpoint - * itself is null + * @return the uri of the first entry in the address list. + * Null if the endpoint itself is null * @throws InvalidRecordException if the type is wrong, there are no addresses * or the payload ill-formatted */ @@ -194,7 +194,7 @@ public static List retrieveAddressesUriType(Endpoint epr) throw new InvalidRecordException(epr.toString(), "No addresses in endpoint"); } - List results = new ArrayList(addresses.size()); + List results = new ArrayList<>(addresses.size()); for (Map address : addresses) { results.add(getAddressField(address, ADDRESS_URI)); } @@ -203,7 +203,7 @@ public static List retrieveAddressesUriType(Endpoint epr) /** * Get a specific field from an address -raising an exception if - * the field is not present + * the field is not present. * @param address address to query * @param field field to resolve * @return the resolved value. Guaranteed to be non-null. @@ -219,7 +219,7 @@ public static String getAddressField(Map address, } /** - * Get the address URLs. Guranteed to return at least one address. + * Get the address URLs. Guaranteed to return at least one address. * @param epr endpoint * @return the address as a URL * @throws InvalidRecordException if the type is wrong, there are no addresses @@ -232,7 +232,7 @@ public static List retrieveAddressURLs(Endpoint epr) throw new InvalidRecordException("", "Null endpoint"); } List addresses = retrieveAddressesUriType(epr); - List results = new ArrayList(addresses.size()); + List results = new ArrayList<>(addresses.size()); for (String address : addresses) { results.add(new URL(address)); } @@ -241,7 +241,7 @@ public static List retrieveAddressURLs(Endpoint epr) /** * Validate the record by checking for null fields and other invalid - * conditions + * conditions. * @param path path for exceptions * @param record record to validate. May be null * @throws InvalidRecordException on invalid entries @@ -270,7 +270,7 @@ public static void validateServiceRecord(String path, ServiceRecord record) /** * Validate the endpoint by checking for null fields and other invalid - * conditions + * conditions. * @param path path for exceptions * @param endpoint endpoint to validate. May be null * @throws InvalidRecordException on invalid entries diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java index 858b6b123570e..42fa83ac76abf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java @@ -102,7 +102,7 @@ public static String convertUsername(String username) { } /** - * Create a service classpath + * Create a service classpath. * @param user username or "" * @param serviceClass service name * @return a full path @@ -116,7 +116,7 @@ public static String serviceclassPath(String user, } /** - * Create a path to a service under a user and service class + * Create a path to a service under a user and service class. * @param user username or "" * @param serviceClass service name * @param serviceName service name unique for that user and service class @@ -132,7 +132,7 @@ public static String servicePath(String user, } /** - * Create a path for listing components under a service + * Create a path for listing components under a service. * @param user username or "" * @param serviceClass service name * @param serviceName service name unique for that user and service class @@ -146,7 +146,7 @@ public static String componentListPath(String user, } /** - * Create the path to a service record for a component + * Create the path to a service record for a component. * @param user username or "" * @param serviceClass service name * @param serviceName service name unique for that user and service class @@ -162,12 +162,12 @@ public static String componentPath(String user, } /** - * List service records directly under a path + * List service records directly under a path. * @param registryOperations registry operations instance * @param path path to list * @return a mapping of the service records that were resolved, indexed * by their full path - * @throws IOException + * @throws IOException IO problems */ public static Map listServiceRecords( RegistryOperations registryOperations, @@ -186,8 +186,9 @@ public static Map listServiceRecords( * This is not an atomic operation; A child may be deleted * during the iteration through the child entries. If this happens, * the PathNotFoundException is caught and that child - * entry ommitted. + * entry omitted. * + * @param registryOperations operations instance to use * @param path path * @return a possibly empty map of child entries listed by * their short name. @@ -203,7 +204,7 @@ public static Map statChildren( IOException { List childNames = registryOperations.list(path); Map results = - new HashMap(); + new HashMap<>(); for (String childName : childNames) { String child = join(path, childName); try { @@ -279,7 +280,7 @@ public static String getCurrentUsernameUnencoded(String env_hadoop_username) { } /** - * Get the current user path formatted for the registry + * Get the current user path formatted for the registry. *

* In an insecure cluster, the environment variable * HADOOP_USER_NAME is queried first. @@ -312,7 +313,7 @@ public static Map extractServiceRecords( RegistryOperations operations, String parentpath, Collection stats) throws IOException { - Map results = new HashMap(stats.size()); + Map results = new HashMap<>(stats.size()); for (RegistryPathStatus stat : stats) { if (stat.size > ServiceRecord.RECORD_TYPE.length()) { // maybe has data @@ -344,6 +345,7 @@ public static Map extractServiceRecords( *

* @param operations operation support for fetches * @param parentpath path of the parent of all the entries + * @param stats list of registry stat values to examine * @return a possibly empty map of fullpath:record. * @throws IOException for any IO Operation that wasn't ignored. */ @@ -376,7 +378,7 @@ public static Map extractServiceRecords( /** - * Static instance of service record marshalling + * Static instance of service record marshalling. */ public static class ServiceRecordMarshal extends JsonSerDeser { public ServiceRecordMarshal() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/AuthenticationFailedException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/AuthenticationFailedException.java index aadb7fc46d975..c0392a62ac17a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/AuthenticationFailedException.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/AuthenticationFailedException.java @@ -18,10 +18,15 @@ package org.apache.hadoop.registry.client.exceptions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + /** * Exception raised when client access wasn't authenticated. * That is: the credentials provided were incomplete or invalid. */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public class AuthenticationFailedException extends RegistryIOException { public AuthenticationFailedException(String path, Throwable cause) { super(path, cause); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidRecordException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidRecordException.java index e4f545e5b4ca2..16841a21395e6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidRecordException.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/InvalidRecordException.java @@ -23,7 +23,6 @@ /** * Raised if an attempt to parse a record failed. - * */ @InterfaceAudience.Public @InterfaceStability.Evolving diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoPathPermissionsException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoPathPermissionsException.java index ce84f5b610279..798265eefaeb6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoPathPermissionsException.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/NoPathPermissionsException.java @@ -18,13 +18,15 @@ package org.apache.hadoop.registry.client.exceptions; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.PathIOException; /** * Raised on path permission exceptions. - *

- * This is similar to PathIOException, except that exception doesn't let */ +@InterfaceAudience.Public +@InterfaceStability.Evolving public class NoPathPermissionsException extends RegistryIOException { public NoPathPermissionsException(String path, Throwable cause) { super(path, cause); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/package-info.java index 7d9c8ade8356f..f453fbf30e42a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/exceptions/package-info.java @@ -30,4 +30,9 @@ * All exceptions in this package are derived from * {@link org.apache.hadoop.registry.client.exceptions.RegistryIOException} */ +@InterfaceAudience.Public +@InterfaceStability.Evolving package org.apache.hadoop.registry.client.exceptions; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/BindingInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/BindingInformation.java index 8ae003d5485fd..5c97fd529ce70 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/BindingInformation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/BindingInformation.java @@ -23,19 +23,19 @@ import org.apache.hadoop.classification.InterfaceStability; /** - * Binding information provided by a {@link RegistryBindingSource} + * Binding information provided by a {@link RegistryBindingSource}. */ @InterfaceAudience.Public @InterfaceStability.Evolving public class BindingInformation { /** - * The Curator Ensemble Provider + * The Curator Ensemble Provider. */ public EnsembleProvider ensembleProvider; /** - * Any information that may be useful for diagnostics + * Any information that may be useful for diagnostics. */ public String description; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java index 7f35c3fe62843..85814b229ab73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java @@ -69,12 +69,12 @@ public class CuratorService extends CompositeService LoggerFactory.getLogger(CuratorService.class); /** - * the Curator binding + * the Curator binding. */ private CuratorFramework curator; /** - * Path to the registry root + * Path to the registry root. */ private String registryRoot; @@ -85,17 +85,17 @@ public class CuratorService extends CompositeService private final RegistryBindingSource bindingSource; /** - * Security service + * Security service. */ private RegistrySecurity registrySecurity; /** - * the connection binding text for messages + * the connection binding text for messages. */ private String connectionDescription; /** - * Security connection diagnostics + * Security connection diagnostics. */ private String securityConnectionDiagnostics = ""; @@ -133,7 +133,7 @@ public CuratorService(String name) { * Init the service. * This is where the security bindings are set up * @param conf configuration of the service - * @throws Exception + * @throws Exception initialization problems. */ @Override protected void serviceInit(Configuration conf) throws Exception { @@ -155,7 +155,7 @@ protected void serviceInit(Configuration conf) throws Exception { /** * Start the service. * This is where the curator instance is started. - * @throws Exception + * @throws Exception startup problems */ @Override protected void serviceStart() throws Exception { @@ -167,7 +167,7 @@ protected void serviceStart() throws Exception { } /** - * Close the ZK connection if it is open + * Close the ZK connection if it is open. */ @Override protected void serviceStop() throws Exception { @@ -176,7 +176,7 @@ protected void serviceStop() throws Exception { } /** - * Internal check that a service is in the live state + * Internal check that a service is in the live state. * @throws ServiceStateException if not */ private void checkServiceLive() throws ServiceStateException { @@ -197,7 +197,7 @@ public boolean isSecure() { } /** - * Get the registry security helper + * Get the registry security helper. * @return the registry security helper */ protected RegistrySecurity getRegistrySecurity() { @@ -205,7 +205,7 @@ protected RegistrySecurity getRegistrySecurity() { } /** - * Build the security diagnostics string + * Build the security diagnostics string. * @return a string for diagnostics */ protected String buildSecurityDiagnostics() { @@ -277,7 +277,7 @@ public String toString() { } /** - * Get the binding diagnostics + * Get the binding diagnostics. * @return a diagnostics string valid after the service is started. */ public String bindingDiagnosticDetails() { @@ -287,7 +287,7 @@ public String bindingDiagnosticDetails() { } /** - * Create a full path from the registry root and the supplied subdir + * Create a full path from the registry root and the supplied subdir. * @param path path of operation * @return an absolute path * @throws IllegalArgumentException if the path is invalide @@ -297,8 +297,8 @@ protected String createFullPath(String path) throws IOException { } /** - * Get the registry binding source ... this can be used to - * create new ensemble providers + * Get the registry binding source; this can be used to + * create new ensemble providers. * @return the registry binding source in use */ public RegistryBindingSource getBindingSource() { @@ -308,7 +308,7 @@ public RegistryBindingSource getBindingSource() { /** * Create the ensemble provider for this registry, by invoking * {@link RegistryBindingSource#supplyBindingInformation()} on - * the provider stored in {@link #bindingSource} + * the provider stored in {@link #bindingSource}. * Sets {@link #ensembleProvider} to that value; * sets {@link #connectionDescription} to the binding info * for use in toString and logging; @@ -339,7 +339,7 @@ public BindingInformation supplyBindingInformation() { /** * Override point: get the connection string used to connect to - * the ZK service + * the ZK service. * @return a registry quorum */ protected String buildConnectionString() { @@ -348,7 +348,7 @@ protected String buildConnectionString() { } /** - * Create an IOE when an operation fails + * Create an IOE when an operation fails. * @param path path of operation * @param operation operation attempted * @param exception caught the exception caught @@ -361,7 +361,7 @@ protected IOException operationFailure(String path, } /** - * Create an IOE when an operation fails + * Create an IOE when an operation fails. * @param path path of operation * @param operation operation attempted * @param exception caught the exception caught @@ -421,7 +421,7 @@ protected IOException operationFailure(String path, * @param acl ACL for path -used when creating a new entry * @param createParents flag to trigger parent creation * @return true iff the path was created - * @throws IOException + * @throws IOException problem creating the path. */ @VisibleForTesting public boolean maybeCreate(String path, @@ -432,11 +432,12 @@ public boolean maybeCreate(String path, } /** - * Stat the file + * Stat the file. * @param path path of operation * @return a curator stat entry * @throws IOException on a failure * @throws PathNotFoundException if the path was not found + * @throws IOException any other problem */ public Stat zkStat(String path) throws IOException { checkServiceLive(); @@ -457,10 +458,11 @@ public Stat zkStat(String path) throws IOException { } /** - * Get the ACLs of a path + * Get the ACLs of a path. * @param path path of operation * @return a possibly empty list of ACLs - * @throws IOException + * @throws PathNotFoundException if the path was not found + * @throws IOException any other problem */ public List zkGetACLS(String path) throws IOException { checkServiceLive(); @@ -481,10 +483,11 @@ public List zkGetACLS(String path) throws IOException { } /** - * Probe for a path existing + * Probe for a path existing. * @param path path of operation * @return true if the path was visible from the ZK server * queried. + * @throws PathNotFoundException if the path was not found * @throws IOException on any exception other than * {@link PathNotFoundException} */ @@ -497,16 +500,14 @@ public boolean zkPathExists(String path) throws IOException { return true; } catch (PathNotFoundException e) { return false; - } catch (IOException e) { - throw e; } } /** - * Verify a path exists + * Verify a path exists. * @param path path of operation * @throws PathNotFoundException if the path is absent - * @throws IOException + * @throws IOException any other problem */ public String zkPathMustExist(String path) throws IOException { zkStat(path); @@ -558,7 +559,7 @@ public boolean zkMkPath(String path, } /** - * Recursively make a path + * Recursively make a path. * @param path path to create * @param acl ACL for path * @throws IOException any problem @@ -574,11 +575,11 @@ public void zkMkParentPath(String path, /** * Create a path with given data. byte[0] is used for a path - * without data + * without data. * @param path path of operation * @param data initial data - * @param acls - * @throws IOException + * @param acls ACL list + * @throws IOException any problem creating the path. */ public void zkCreate(String path, CreateMode mode, @@ -600,10 +601,10 @@ public void zkCreate(String path, } /** - * Update the data for a path + * Update the data for a path. * @param path path of operation * @param data new data - * @throws IOException + * @throws IOException any problem updating the path. */ public void zkUpdate(String path, byte[] data) throws IOException { Preconditions.checkArgument(data != null, "null data"); @@ -620,12 +621,13 @@ public void zkUpdate(String path, byte[] data) throws IOException { } /** - * Create or update an entry + * Create or update an entry. * @param path path * @param data data * @param acl ACL for path -used when creating a new entry * @param overwrite enable overwrite - * @throws IOException + * @throws FileAlreadyExistsException if the path existed and overwrite==false + * @throws IOException IO problem * @return true if the entry was created, false if it was simply updated. */ public boolean zkSet(String path, @@ -682,10 +684,10 @@ public void zkDelete(String path, } /** - * List all children of a path + * List all children of a path. * @param path path of operation * @return a possibly empty list of children - * @throws IOException + * @throws IOException read failure */ public List zkList(String path) throws IOException { checkServiceLive(); @@ -703,7 +705,7 @@ public List zkList(String path) throws IOException { } /** - * Read data on a path + * Read data on a path. * @param path path of operation * @return the data * @throws IOException read failure @@ -723,7 +725,7 @@ public byte[] zkRead(String path) throws IOException { /** * Return a path dumper instance which can do a full dump - * of the registry tree in its toString() + * of the registry tree in its toString(). * operation * @return a class to dump the registry * @param verbose verbose flag - includes more details (such as ACLs) @@ -746,7 +748,7 @@ public boolean addWriteAccessor(String id, String pass) throws IOException { } /** - * Clear all write accessors + * Clear all write accessors. */ public void clearWriteAccessors() { getRegistrySecurity().resetDigestACLs(); @@ -765,7 +767,7 @@ protected String dumpRegistryRobustly(boolean verbose) { return pathDumper.toString(); } catch (Exception e) { // ignore - LOG.debug("Ignoring exception: {}", e); + LOG.debug("Ignoring exception", e); } return ""; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryBindingSource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryBindingSource.java index bab4742ad6c0e..092351f997e4f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryBindingSource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryBindingSource.java @@ -22,14 +22,14 @@ import org.apache.hadoop.classification.InterfaceStability; /** - * Interface which can be implemented by a registry binding source + * Interface which can be implemented by a registry binding source. */ @InterfaceAudience.Public @InterfaceStability.Evolving public interface RegistryBindingSource { /** - * Supply the binding information for this registry + * Supply the binding information for this registry. * @return the binding information data */ BindingInformation supplyBindingInformation(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryInternalConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryInternalConstants.java index f04673a08bde6..af9897d28f813 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryInternalConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryInternalConstants.java @@ -46,7 +46,7 @@ public interface RegistryInternalConstants { int PERMISSIONS_REGISTRY_READERS = ZooDefs.Perms.READ; /** - * Permissions for system services: {@value} + * Permissions for system services: {@value}. */ int PERMISSIONS_REGISTRY_SYSTEM_SERVICES = ZooDefs.Perms.ALL; @@ -75,7 +75,7 @@ public interface RegistryInternalConstants { /** * This the Hadoop environment variable which propagates the identity - * of a user in an insecure cluster + * of a user in an insecure cluster. */ String HADOOP_USER_NAME = "HADOOP_USER_NAME"; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java index 271ab25463335..18915ebbfee68 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java @@ -77,7 +77,7 @@ public RegistryOperationsService(String name, /** * Get the aggregate set of ACLs the client should use - * to create directories + * to create directories. * @return the ACL list */ public List getClientAcls() { @@ -85,7 +85,7 @@ public List getClientAcls() { } /** - * Validate a path + * Validate a path. * @param path path to validate * @throws InvalidPathnameException if a path is considered invalid */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java index 39325a3f19471..022530c365f89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java @@ -24,6 +24,7 @@ import org.apache.commons.lang.StringUtils; import org.apache.curator.framework.CuratorFrameworkFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.apache.hadoop.service.AbstractService; @@ -78,45 +79,38 @@ public class RegistrySecurity extends AbstractService { /** * there's no default user to add with permissions, so it would be - * impossible to create nodes with unrestricted user access + * impossible to create nodes with unrestricted user access. */ public static final String E_NO_USER_DETERMINED_FOR_ACLS = "No user for ACLs determinable from current user or registry option " + KEY_REGISTRY_USER_ACCOUNTS; /** - * Error raised when the registry is tagged as secure but this - * process doesn't have hadoop security enabled. - */ - public static final String E_NO_KERBEROS = - "Registry security is enabled -but Hadoop security is not enabled"; - - /** - * Access policy options + * Access policy options. */ private enum AccessPolicy { anon, sasl, digest } /** - * Access mechanism + * Access mechanism. */ private AccessPolicy access; /** - * User used for digest auth + * User used for digest auth. */ private String digestAuthUser; /** - * Password used for digest auth + * Password used for digest auth. */ private String digestAuthPassword; /** - * Auth data used for digest auth + * Auth data used for digest auth. */ private byte[] digestAuthData; @@ -126,13 +120,13 @@ private enum AccessPolicy { private boolean secureRegistry; /** - * An ACL with read-write access for anyone + * An ACL with read-write access for anyone. */ public static final ACL ALL_READWRITE_ACCESS = new ACL(ZooDefs.Perms.ALL, ZooDefs.Ids.ANYONE_ID_UNSAFE); /** - * An ACL with read access for anyone + * An ACL with read access for anyone. */ public static final ACL ALL_READ_ACCESS = new ACL(ZooDefs.Perms.READ, ZooDefs.Ids.ANYONE_ID_UNSAFE); @@ -144,39 +138,39 @@ private enum AccessPolicy { public static final List WorldReadWriteACL; static { - List acls = new ArrayList(); + List acls = new ArrayList<>(); acls.add(ALL_READWRITE_ACCESS); - WorldReadWriteACL = new CopyOnWriteArrayList(acls); + WorldReadWriteACL = new CopyOnWriteArrayList<>(acls); } /** - * the list of system ACLs + * the list of system ACLs. */ - private final List systemACLs = new ArrayList(); + private final List systemACLs = new ArrayList<>(); /** * A list of digest ACLs which can be added to permissions - * —and cleared later. + * and cleared later. */ - private final List digestACLs = new ArrayList(); + private final List digestACLs = new ArrayList<>(); /** - * the default kerberos realm + * the default kerberos realm. */ private String kerberosRealm; /** - * Client context + * Client context. */ private String jaasClientContext; /** - * Client identity + * Client identity. */ private String jaasClientIdentity; /** - * Create an instance + * Create an instance. * @param name service name */ public RegistrySecurity(String name) { @@ -184,9 +178,10 @@ public RegistrySecurity(String name) { } /** - * Init the service: this sets up security based on the configuration + * Init the service: this sets up security based on the configuration. * @param conf configuration - * @throws Exception + * @throws ServiceStateException if the authenticatin mechanism is unknown. + * @throws Exception any initialization problem. */ @Override protected void serviceInit(Configuration conf) throws Exception { @@ -215,7 +210,7 @@ protected void serviceInit(Configuration conf) throws Exception { * Init security. * * After this operation, the {@link #systemACLs} list is valid. - * @throws IOException + * @throws IOException problems initializing security. */ private void initSecurity() throws IOException { @@ -250,51 +245,53 @@ private void initSecurity() throws IOException { } } - // here check for UGI having secure on or digest + ID + // configure security access based on settings. switch (access) { - case sasl: - // secure + SASL => has to be authenticated - if (!UserGroupInformation.isSecurityEnabled()) { - throw new IOException("Kerberos required for secure registry access"); - } - UserGroupInformation currentUser = - UserGroupInformation.getCurrentUser(); - jaasClientContext = getOrFail(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, - DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT); - jaasClientIdentity = currentUser.getShortUserName(); - if (LOG.isDebugEnabled()) { - LOG.debug("Auth is SASL user=\"{}\" JAAS context=\"{}\"", - jaasClientIdentity, - jaasClientContext); - } - break; - - case digest: - String id = getOrFail(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, ""); - String pass = getOrFail(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, ""); - if (userACLs.isEmpty()) { - // - throw new ServiceStateException(E_NO_USER_DETERMINED_FOR_ACLS); - } - digest(id, pass); - ACL acl = new ACL(ZooDefs.Perms.ALL, toDigestId(id, pass)); - userACLs.add(acl); - digestAuthUser = id; - digestAuthPassword = pass; - String authPair = id + ":" + pass; - digestAuthData = authPair.getBytes("UTF-8"); - if (LOG.isDebugEnabled()) { - LOG.debug("Auth is Digest ACL: {}", aclToString(acl)); - } - break; + case sasl: + // secure + SASL => has to be authenticated + if (!UserGroupInformation.isSecurityEnabled()) { + throw new AccessControlException("Kerberos required" + + " for secure registry access"); + } + UserGroupInformation currentUser = + UserGroupInformation.getCurrentUser(); + jaasClientContext = getOrFail(KEY_REGISTRY_CLIENT_JAAS_CONTEXT, + DEFAULT_REGISTRY_CLIENT_JAAS_CONTEXT); + jaasClientIdentity = currentUser.getShortUserName(); + if (LOG.isDebugEnabled()) { + LOG.debug("Auth is SASL user=\"{}\" JAAS context=\"{}\"", + jaasClientIdentity, + jaasClientContext); + } + break; + + case digest: + String id = getOrFail(KEY_REGISTRY_CLIENT_AUTHENTICATION_ID, ""); + String pass = getOrFail(KEY_REGISTRY_CLIENT_AUTHENTICATION_PASSWORD, + ""); + if (userACLs.isEmpty()) { + // + throw new ServiceStateException(E_NO_USER_DETERMINED_FOR_ACLS); + } + digest(id, pass); + ACL acl = new ACL(ZooDefs.Perms.ALL, toDigestId(id, pass)); + userACLs.add(acl); + digestAuthUser = id; + digestAuthPassword = pass; + String authPair = id + ":" + pass; + digestAuthData = authPair.getBytes("UTF-8"); + if (LOG.isDebugEnabled()) { + LOG.debug("Auth is Digest ACL: {}", aclToString(acl)); + } + break; - case anon: - // nothing is needed; account is read only. - if (LOG.isDebugEnabled()) { - LOG.debug("Auth is anonymous"); - } - userACLs = new ArrayList(0); - break; + case anon: + // nothing is needed; account is read only. + if (LOG.isDebugEnabled()) { + LOG.debug("Auth is anonymous"); + } + userACLs = new ArrayList<>(0); + break; } systemACLs.addAll(userACLs); @@ -308,7 +305,7 @@ private void initSecurity() throws IOException { } /** - * Add another system ACL + * Add another system ACL. * @param acl add ACL */ public void addSystemACL(ACL acl) { @@ -316,8 +313,9 @@ public void addSystemACL(ACL acl) { } /** - * Add a digest ACL + * Add a digest ACL. * @param acl add ACL + * @return true if the registry is secure and the ACL added */ public boolean addDigestACL(ACL acl) { if (secureRegistry) { @@ -336,7 +334,7 @@ public boolean addDigestACL(ACL acl) { } /** - * Reset the digest ACL list + * Reset the digest ACL list. */ public void resetDigestACLs() { if (LOG.isDebugEnabled()) { @@ -346,7 +344,7 @@ public void resetDigestACLs() { } /** - * Flag to indicate the cluster is secure + * Flag to indicate the cluster is secure. * @return true if the config enabled security */ public boolean isSecureRegistry() { @@ -354,7 +352,7 @@ public boolean isSecureRegistry() { } /** - * Get the system principals + * Get the system principals. * @return the system principals */ public List getSystemACLs() { @@ -363,21 +361,21 @@ public List getSystemACLs() { } /** - * Get all ACLs needed for a client to use when writing to the repo. + * Get all ACLs needed for a client to use when creating a new entry. * That is: system ACLs, its own ACL, any digest ACLs * @return the client ACLs */ public List getClientACLs() { - List clientACLs = new ArrayList(systemACLs); + List clientACLs = new ArrayList<>(systemACLs); clientACLs.addAll(digestACLs); return clientACLs; } /** - * Create a SASL ACL for the user + * Create a SASL ACL for the user. * @param perms permissions * @return an ACL for the current user or null if they aren't a kerberos user - * @throws IOException + * @throws IOException IO problems */ public ACL createSaslACLFromCurrentUser(int perms) throws IOException { UserGroupInformation currentUser = UserGroupInformation.getCurrentUser(); @@ -389,7 +387,7 @@ public ACL createSaslACLFromCurrentUser(int perms) throws IOException { } /** - * Given a UGI, create a SASL ACL from it + * Given a UGI, create a SASL ACL from it. * @param ugi UGI * @param perms permissions * @return a new ACL @@ -400,7 +398,7 @@ public ACL createSaslACL(UserGroupInformation ugi, int perms) { } /** - * Get a conf option, throw an exception if it is null/empty + * Get a conf option, throw an exception if it is null/empty. * @param key key * @param defval default value * @return the value @@ -439,9 +437,10 @@ public String getKerberosRealm() { } /** - * Generate a base-64 encoded digest of the idPasswordPair pair + * Generate a base-64 encoded digest of the idPasswordPair pair. * @param idPasswordPair id:password * @return a string that can be used for authentication + * @throws IOException on invalid idPasswordPair or digest problems */ public String digest(String idPasswordPair) throws IOException { if (StringUtils.isEmpty(idPasswordPair) || !isValid(idPasswordPair)) { @@ -457,18 +456,18 @@ public String digest(String idPasswordPair) throws IOException { } /** - * Generate a base-64 encoded digest of the idPasswordPair pair + * Generate a base-64 encoded digest of the idPasswordPair pair. * @param id ID * @param password pass * @return a string that can be used for authentication - * @throws IOException + * @throws IOException on invalid idPasswordPair or digest problems */ public String digest(String id, String password) throws IOException { return digest(id + ":" + password); } /** - * Given a digest, create an ID from it + * Given a digest, create an ID from it. * @param digest digest * @return ID */ @@ -477,7 +476,7 @@ public Id toDigestId(String digest) { } /** - * Create a Digest ID from an id:pass pair + * Create a Digest ID from an id:pass pair. * @param id ID * @param password password * @return an ID @@ -544,7 +543,7 @@ public Id parse(String idPair, String realm) { } /** - * Parse the IDs, adding a realm if needed, setting the permissions + * Parse the IDs, adding a realm if needed, setting the permissions. * @param principalList id string * @param realm realm to add * @param perms permissions @@ -554,7 +553,7 @@ public Id parse(String idPair, String realm) { public List buildACLs(String principalList, String realm, int perms) throws IOException { List aclPairs = splitAclPairs(principalList, realm); - List ids = new ArrayList(aclPairs.size()); + List ids = new ArrayList<>(aclPairs.size()); for (String aclPair : aclPairs) { ACL newAcl = new ACL(); newAcl.setId(parse(aclPair, realm)); @@ -565,7 +564,7 @@ public List buildACLs(String principalList, String realm, int perms) } /** - * Parse an ACL list. This includes configuration indirection + * Parse an ACL list. This includes configuration indirection. * {@link ZKUtil#resolveConfIndirection(String)} * @param zkAclConf configuration string * @return an ACL list @@ -585,15 +584,11 @@ public List parseACLs(String zkAclConf) throws IOException { * @return a JVM-specific kerberos login module classname. */ public static String getKerberosAuthModuleForJVM() { - if (System.getProperty("java.vendor").contains("IBM")) { - return "com.ibm.security.auth.module.Krb5LoginModule"; - } else { - return "com.sun.security.auth.module.Krb5LoginModule"; - } + return KerberosUtil.getKrb5LoginModuleName(); } /** - * JAAS template: {@value} + * JAAS template: {@value}. * Note the semicolon on the last entry */ private static final String JAAS_ENTRY = @@ -621,7 +616,7 @@ public static String getKerberosAuthModuleForJVM() { ); /** - * Create a JAAS entry for insertion + * Create a JAAS entry for insertion. * @param context context of the entry * @param principal kerberos principal * @param keytab keytab @@ -665,7 +660,7 @@ public static void bindJVMtoJAASFile(File jaasFile) { } /** - * Set the Zookeeper server property + * Set the Zookeeper server property. * {@link ZookeeperConfigOptions#PROP_ZK_SERVER_SASL_CONTEXT} * to the SASL context. When the ZK server starts, this is the context * which it will read in @@ -676,7 +671,7 @@ public static void bindZKToServerJAASContext(String contextName) { } /** - * Reset any system properties related to JAAS + * Reset any system properties related to JAAS. */ public static void clearJaasSystemProperties() { System.clearProperty(Environment.JAAS_CONF_KEY); @@ -754,7 +749,7 @@ public static void setZKSaslClientProperties(String username, } /** - * Clear all the ZK SASL Client properties + * Clear all the ZK SASL Client properties. * Important:This is JVM-wide */ public static void clearZKSaslClientProperties() { @@ -764,7 +759,7 @@ public static void clearZKSaslClientProperties() { } /** - * Turn ZK SASL on + * Turn ZK SASL on. * Important:This is JVM-wide */ protected static void enableZookeeperClientSASL() { @@ -799,7 +794,7 @@ public void logCurrentHadoopUser() { UserGroupInformation realUser = currentUser.getRealUser(); LOG.info("Real User = {}" , realUser); } catch (IOException e) { - LOG.warn("Failed to get current user {}, {}", e); + LOG.warn("Failed to get current user", e); } } @@ -824,7 +819,7 @@ public static String aclsToString(List acls) { } /** - * Convert an ACL to a string, with any obfuscation needed + * Convert an ACL to a string, with any obfuscation needed. * @param acl ACL * @return ACL string value */ @@ -838,7 +833,7 @@ public static String aclToString(ACL acl) { /** * Convert an ID to a string, stripping out all but the first few characters - * of any digest auth hash for security reasons + * of any digest auth hash for security reasons. * @param id ID * @return a string description of a Zookeeper ID */ @@ -858,7 +853,7 @@ public static String idToString(Id id) { } /** - * Build up low-level security diagnostics to aid debugging + * Build up low-level security diagnostics to aid debugging. * @return a string to use in diagnostics */ public String buildSecurityDiagnostics() { @@ -905,8 +900,8 @@ private static String describeProperty(String name, String def) { } /** - * Get the default kerberos realm —returning "" if there - * is no realm or other problem + * Get the default kerberos realm -returning "" if there + * is no realm or other problem. * @return the default realm of the system if it * could be determined */ @@ -929,12 +924,14 @@ public static String getDefaultRealmInJVM() { /** * Create an ACL For a user. * @param ugi User identity - * @return the ACL For the specified user. Ifthe username doesn't end + * @param perms permissions to pass to + * {@link #createACLfromUsername(String, int)} + * @return the ACL For the specified user. If the username doesn't end * in "@" then the realm is added */ public ACL createACLForUser(UserGroupInformation ugi, int perms) { if (LOG.isDebugEnabled()) { - LOG.debug("Creating ACL For ", new UgiInfo(ugi)); + LOG.debug("Creating ACL For {}", new UgiInfo(ugi)); } if (!secureRegistry) { return ALL_READWRITE_ACCESS; @@ -944,7 +941,7 @@ public ACL createACLForUser(UserGroupInformation ugi, int perms) { } /** - * Given a user name (short or long), create a SASL ACL + * Given a user name (short or long), create a SASL ACL. * @param username user name; if it doesn't contain an "@" symbol, the * service's kerberos realm is added * @param perms permissions @@ -961,7 +958,7 @@ public ACL createACLfromUsername(String username, int perms) { } /** - * On demand string-ifier for UGI with extra details + * On demand string-ifier for UGI with extra details. */ public static class UgiInfo { @@ -969,7 +966,7 @@ public static UgiInfo fromCurrentUser() { try { return new UgiInfo(UserGroupInformation.getCurrentUser()); } catch (IOException e) { - LOG.info("Failed to get current user {}", e, e); + LOG.info("Failed to get current user", e); return new UgiInfo(null); } } @@ -991,14 +988,15 @@ public String toString() { builder.append(" hasKerberosCredentials=").append( ugi.hasKerberosCredentials()); builder.append(" isFromKeytab=").append(ugi.isFromKeytab()); - builder.append(" kerberos is enabled in Hadoop =").append(UserGroupInformation.isSecurityEnabled()); + builder.append(" kerberos is enabled in Hadoop =") + .append(UserGroupInformation.isSecurityEnabled()); return builder.toString(); } } /** - * on-demand stringifier for a list of ACLs + * on-demand stringifier for a list of ACLs. */ public static class AclListInfo { public final List acls; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java index 3c4a730608f6e..44a383903b44e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java @@ -43,7 +43,7 @@ public class ZKPathDumper { private final boolean verbose; /** - * Create a path dumper -but do not dump the path until asked + * Create a path dumper -but do not dump the path until asked. * @param curator curator instance * @param root root * @param verbose verbose flag - includes more details (such as ACLs) @@ -73,7 +73,7 @@ public String toString() { /** * Recursively expand the path into the supplied string builder, increasing * the indentation by {@link #INDENT} as it proceeds (depth first) down - * the tree + * the tree. * @param builder string build to append to * @param path path to examine * @param indent current indentation @@ -120,7 +120,7 @@ private void expand(StringBuilder builder, } /** - * Append the specified indentation to a builder + * Append the specified indentation to a builder. * @param builder string build to append to * @param indent current indentation * @param c charactor to use for indentation diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZookeeperConfigOptions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZookeeperConfigOptions.java index edcf0859fc345..7c5092af2188c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZookeeperConfigOptions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZookeeperConfigOptions.java @@ -23,13 +23,13 @@ /** * Configuration options which are internal to Zookeeper, - * as well as some other ZK constants + * as well as some other ZK constants. *

* Zookeeper options are passed via system properties prior to the ZK * Methods/classes being invoked. This implies that: *

    *
  1. There can only be one instance of a ZK client or service class - * in a single JVM —else their configuration options will conflict.
  2. + * in a single JVM -else their configuration options will conflict. *
  3. It is safest to set these properties immediately before * invoking ZK operations.
  4. *
@@ -77,7 +77,7 @@ public interface ZookeeperConfigOptions { /** * The SASL Server context, referring to a context in the JVM's - * JAAS context file: {@value} + * JAAS context file: {@value}. */ String PROP_ZK_SERVER_SASL_CONTEXT = ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/package-info.java index f7ae98372d8ad..8bb4be9f1c923 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/package-info.java @@ -34,6 +34,10 @@ * some operations/instantiating some objects. The definitions of these * are kept in {@link org.apache.hadoop.registry.client.impl.zk.ZookeeperConfigOptions}. * - * */ +@InterfaceAudience.Private +@InterfaceStability.Evolving package org.apache.hadoop.registry.client.impl.zk; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java index 395f8366f1a54..64837c9da4392 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java @@ -50,7 +50,7 @@ public final class Endpoint implements Cloneable { /** - * API implemented at the end of the binding + * API implemented at the end of the binding. */ public String api; @@ -67,7 +67,7 @@ public final class Endpoint implements Cloneable { public String protocolType; /** - * a list of address tuples —tuples whose format depends on the address type + * a list of address tuples -tuples whose format depends on the address type. */ public List> addresses; @@ -88,14 +88,14 @@ public Endpoint(Endpoint that) { this.protocolType = that.protocolType; this.addresses = newAddresses(that.addresses.size()); for (Map address : that.addresses) { - Map addr2 = new HashMap(address.size()); + Map addr2 = new HashMap<>(address.size()); addr2.putAll(address); addresses.add(addr2); } } /** - * Build an endpoint with a list of addresses + * Build an endpoint with a list of addresses. * @param api API name * @param addressType address type * @param protocolType protocol type @@ -115,7 +115,7 @@ public Endpoint(String api, } /** - * Build an endpoint with an empty address list + * Build an endpoint with an empty address list. * @param api API name * @param addressType address type * @param protocolType protocol type @@ -140,44 +140,51 @@ public Endpoint(String api, * @param api API name * @param addressType address type * @param protocolType protocol type - * @param addr address. May be null —in which case it is not added + * @param addr address. May be null -in which case it is not added */ public Endpoint(String api, String addressType, String protocolType, Map addr) { this(api, addressType, protocolType); - if (addr != null) { - addresses.add(addr); - } + maybeAdd(addr); } /** - * Build an endpoint with a list of addresses + * Build an endpoint with a list of addresses. * @param api API name * @param addressType address type * @param protocolType protocol type * @param addrs addresses. Null elements will be skipped */ + @SafeVarargs public Endpoint(String api, String addressType, String protocolType, Map...addrs) { this(api, addressType, protocolType); for (Map addr : addrs) { - if (addr!=null) { - addresses.add(addr); - } + maybeAdd(addr); + } + } + + /** + * Add an address map if it is not null. + * @param addr addresses + */ + private void maybeAdd(Map addr) { + if (addr != null) { + addresses.add(addr); } } /** - * Create a new address structure of the requested size + * Create a new address structure of the requested size. * @param size size to create * @return the new list */ private List> newAddresses(int size) { - return new ArrayList>(size); + return new ArrayList<>(size); } /** @@ -185,7 +192,7 @@ private List> newAddresses(int size) { * is ASCII-encoded and added to the list of addresses. * @param api API name * @param protocolType protocol type - * @param uris URIs to convert to a list of tup;les + * @param uris URIs to convert to a list of tuples */ public Endpoint(String api, String protocolType, @@ -208,7 +215,7 @@ public String toString() { /** * Validate the record by checking for null fields and other invalid - * conditions + * conditions. * @throws NullPointerException if a field is null when it * MUST be set. * @throws RuntimeException on invalid entries @@ -224,9 +231,9 @@ public void validate() { } /** - * Shallow clone: the lists of addresses are shared + * Shallow clone: the lists of addresses are shared. * @return a cloned instance - * @throws CloneNotSupportedException + * @throws CloneNotSupportedException clone not supported */ @Override public Object clone() throws CloneNotSupportedException { @@ -235,7 +242,7 @@ public Object clone() throws CloneNotSupportedException { /** - * Static instance of service record marshalling + * Static instance of service record marshalling. */ private static class Marshal extends JsonSerDeser { private Marshal() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ProtocolTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ProtocolTypes.java index b836b0003c7dc..98675262e2e3a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ProtocolTypes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ProtocolTypes.java @@ -74,7 +74,7 @@ public interface ProtocolTypes { String PROTOCOL_UDP = "udp"; /** - * Default value —the protocol is unknown : "{@value}" + * Default value -the protocol is unknown : "{@value}" */ String PROTOCOL_UNKNOWN = ""; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/RegistryPathStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/RegistryPathStatus.java index acd2b218c0c8c..5025c7a5b732a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/RegistryPathStatus.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/RegistryPathStatus.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability; /** - * Output of a RegistryOperations.stat() call + * Output of a RegistryOperations.stat() call. */ @InterfaceAudience.Public @InterfaceStability.Evolving @@ -33,12 +33,12 @@ public final class RegistryPathStatus { /** - * Short path in the registry to this entry + * Short path in the registry to this entry. */ public final String path; /** - * Timestamp + * Timestamp. */ public final long time; @@ -49,12 +49,12 @@ public final class RegistryPathStatus { public final long size; /** - * Number of child nodes + * Number of child nodes. */ public final int children; /** - * Construct an instance + * Construct an instance. * @param path full path * @param time time * @param size entry size diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java index 674d6d34e1ba8..d7f71296df7d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java @@ -47,29 +47,29 @@ public class ServiceRecord implements Cloneable { public static final String RECORD_TYPE = "JSONServiceRecord"; /** - * The type field. This must be the string {@link #RECORD_TYPE} + * The type field. This must be the string {@link #RECORD_TYPE}. */ public String type = RECORD_TYPE; /** - * Description string + * Description string. */ public String description; /** * map to handle unknown attributes. */ - private Map attributes = new HashMap(4); + private Map attributes = new HashMap<>(4); /** - * List of endpoints intended for use to external callers + * List of endpoints intended for use to external callers. */ - public List external = new ArrayList(); + public List external = new ArrayList<>(); /** * List of endpoints for use within an application. */ - public List internal = new ArrayList(); + public List internal = new ArrayList<>(); /** * Create a service record with no ID, description or registration time. @@ -79,7 +79,7 @@ public ServiceRecord() { } /** - * Deep cloning constructor + * Deep cloning constructor. * @param that service record source */ public ServiceRecord(ServiceRecord that) { @@ -92,14 +92,14 @@ public ServiceRecord(ServiceRecord that) { // endpoints List src = that.internal; if (src != null) { - internal = new ArrayList(src.size()); + internal = new ArrayList<>(src.size()); for (Endpoint endpoint : src) { internal.add(new Endpoint(endpoint)); } } src = that.external; if (src != null) { - external = new ArrayList(src.size()); + external = new ArrayList<>(src.size()); for (Endpoint endpoint : src) { external.add(new Endpoint(endpoint)); } @@ -107,7 +107,7 @@ public ServiceRecord(ServiceRecord that) { } /** - * Add an external endpoint + * Add an external endpoint. * @param endpoint endpoint to set */ public void addExternalEndpoint(Endpoint endpoint) { @@ -117,7 +117,7 @@ public void addExternalEndpoint(Endpoint endpoint) { } /** - * Add an internal endpoint + * Add an internal endpoint. * @param endpoint endpoint to set */ public void addInternalEndpoint(Endpoint endpoint) { @@ -127,7 +127,7 @@ public void addInternalEndpoint(Endpoint endpoint) { } /** - * Look up an internal endpoint + * Look up an internal endpoint. * @param api API * @return the endpoint or null if there was no match */ @@ -136,7 +136,7 @@ public Endpoint getInternalEndpoint(String api) { } /** - * Look up an external endpoint + * Look up an external endpoint. * @param api API * @return the endpoint or null if there was no match */ @@ -146,7 +146,7 @@ public Endpoint getExternalEndpoint(String api) { /** * Handle unknown attributes by storing them in the - * {@link #attributes} map + * {@link #attributes} map. * @param key attribute name * @param value attribute value. */ @@ -167,7 +167,7 @@ public Map attributes() { } /** - * Get the "other" attribute with a specific key + * Get the "other" attribute with a specific key. * @param key key to look up * @return the value or null */ @@ -188,7 +188,7 @@ public String get(String key, String defVal) { } /** - * Find an endpoint by its API + * Find an endpoint by its API. * @param list list * @param api api name * @return the endpoint or null if there was no match @@ -235,9 +235,9 @@ public String toString() { } /** - * Shallow clone: all endpoints will be shared across instances + * Shallow clone: all endpoints will be shared across instances. * @return a clone of the instance - * @throws CloneNotSupportedException + * @throws CloneNotSupportedException if cloning is not supported */ @Override protected Object clone() throws CloneNotSupportedException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/package-info.java index 1c926be00b6be..44947f09423a0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/package-info.java @@ -32,10 +32,16 @@ * * * The {@link org.apache.hadoop.registry.client.types.RegistryPathStatus} - * class is not saved to the registry —it is the status of a registry + * class is not saved to the registry -it is the status of a registry * entry that can be retrieved from the API call. It is still * designed to be marshalled to and from JSON, as it can be served up * from REST front ends to the registry. * */ + +@InterfaceAudience.Public +@InterfaceStability.Evolving package org.apache.hadoop.registry.client.types; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/PersistencePolicies.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/PersistencePolicies.java index e4c7272db67aa..1423158edda92 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/PersistencePolicies.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/PersistencePolicies.java @@ -21,7 +21,7 @@ import org.apache.hadoop.registry.client.types.ServiceRecord; /** - * Persistence policies for {@link ServiceRecord} + * Persistence policies for {@link ServiceRecord}. */ public interface PersistencePolicies { @@ -43,7 +43,7 @@ public interface PersistencePolicies { String APPLICATION_ATTEMPT = "application-attempt"; /** - * Remove when the YARN container in the ID field finishes: {@value} + * Remove when the YARN container in the ID field finishes: {@value}. */ String CONTAINER = "container"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java index 7b78932452ede..4317c6a9f89fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/types/yarn/YarnRegistryAttributes.java @@ -19,7 +19,7 @@ package org.apache.hadoop.registry.client.types.yarn; /** - * YARN specific attributes in the registry + * YARN specific attributes in the registry. */ public class YarnRegistryAttributes { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java index e11890f85c392..d9d18fa6eee33 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/RMRegistryOperationsService.java @@ -37,7 +37,7 @@ import java.util.concurrent.Future; /** - * Handle RM events by updating the registry + * Handle RM events by updating the registry. *

* These actions are all implemented as event handlers to operations * which come from the RM. @@ -62,15 +62,14 @@ public RMRegistryOperationsService(String name, super(name, bindingSource); } - /** * Extend the parent service initialization by verifying that the - * service knows —in a secure cluster— the realm in which it is executing. + * service knows -in a secure cluster- the realm in which it is executing. * It needs this to properly build up the user names and hence their * access rights. * * @param conf configuration of the service - * @throws Exception + * @throws Exception failure to initialize */ @Override protected void serviceInit(Configuration conf) throws Exception { @@ -87,44 +86,8 @@ public void setPurgeOnCompletionPolicy(PurgePolicy purgeOnCompletionPolicy) { this.purgeOnCompletionPolicy = purgeOnCompletionPolicy; } - public void onApplicationAttemptRegistered(ApplicationAttemptId attemptId, - String host, int rpcport, String trackingurl) throws IOException { - - } - - public void onApplicationLaunched(ApplicationId id) throws IOException { - - } - /** - * Actions to take as an AM registers itself with the RM. - * @param attemptId attempt ID - * @throws IOException problems - */ - public void onApplicationMasterRegistered(ApplicationAttemptId attemptId) throws - IOException { - } - - /** - * Actions to take when the AM container is completed - * @param containerId container ID - * @throws IOException problems - */ - public void onAMContainerFinished(ContainerId containerId) throws - IOException { - LOG.info("AM Container {} finished, purging application attempt records", - containerId); - - // remove all application attempt entries - purgeAppAttemptRecords(containerId.getApplicationAttemptId()); - - // also treat as a container finish to remove container - // level records for the AM container - onContainerFinished(containerId); - } - - /** - * remove all application attempt entries + * remove all application attempt entries. * @param attemptId attempt ID */ protected void purgeAppAttemptRecords(ApplicationAttemptId attemptId) { @@ -134,8 +97,8 @@ protected void purgeAppAttemptRecords(ApplicationAttemptId attemptId) { } /** - * Actions to take when an application attempt is completed - * @param attemptId application ID + * Actions to take when an application attempt is completed. + * @param attemptId application ID * @throws IOException problems */ public void onApplicationAttemptUnregistered(ApplicationAttemptId attemptId) @@ -146,7 +109,7 @@ public void onApplicationAttemptUnregistered(ApplicationAttemptId attemptId) } /** - * Actions to take when an application is completed + * Actions to take when an application is completed. * @param id application ID * @throws IOException problems */ @@ -159,23 +122,20 @@ public void onApplicationCompleted(ApplicationId id) PersistencePolicies.APPLICATION); } - public void onApplicationAttemptAdded(ApplicationAttemptId appAttemptId) { - } - /** - * This is the event where the user is known, so the user directory + * This is the event where the user is known, so the user directory. * can be created * @param applicationId application ID * @param user username * @throws IOException problems */ - public void onStateStoreEvent(ApplicationId applicationId, String user) throws - IOException { + public void onStateStoreEvent(ApplicationId applicationId, String user) + throws IOException { initUserRegistryAsync(user); } /** - * Actions to take when the AM container is completed + * Actions to take when the AM container is completed. * @param id container ID * @throws IOException problems */ @@ -207,7 +167,8 @@ public Future purgeRecordsAsync(String path, String persistencePolicyMatch) { return purgeRecordsAsync(path, - id, persistencePolicyMatch, + id, + persistencePolicyMatch, purgeOnCompletionPolicy, new DeleteCompletionCallback()); } @@ -234,7 +195,7 @@ public Future purgeRecordsAsync(String path, String persistencePolicyMatch, PurgePolicy purgePolicy, BackgroundCallback callback) { - LOG.info(" records under {} with ID {} and policy {}: {}", + LOG.info(" records under {} with ID {} and policy {}", path, id, persistencePolicyMatch); return submit( new AsyncPurge(path, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java index 004be86064d99..583e9ae1cc8f0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java @@ -26,7 +26,7 @@ import org.apache.hadoop.registry.server.services.RegistryAdminService; /** - * Select an entry by the YARN persistence policy + * Select an entry by the YARN persistence policy. */ public class SelectByYarnPersistence implements RegistryAdminService.NodeSelector { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/package-info.java index 22d8bc5cfff82..7302505872bad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/integration/package-info.java @@ -20,4 +20,9 @@ * This package contains the classes which integrate with the YARN resource * manager. */ +@InterfaceAudience.Private +@InterfaceStability.Evolving package org.apache.hadoop.registry.server.integration; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/AddingCompositeService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/AddingCompositeService.java index 9faede49dc565..cb13f742c03af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/AddingCompositeService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/AddingCompositeService.java @@ -27,11 +27,11 @@ * Composite service that exports the add/remove methods. *

* This allows external classes to add services to these methods, after which - * they follow the same lifecyce. + * they follow the same lifecycle. *

* It is essential that any service added is in a state where it can be moved * on with that of the parent services. Specifically, do not add an uninited - * service to a parent that is already inited —as the start + * service to a parent that is already inited -as the start * operation will then fail * */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java index e160d4a1ff84c..6c21867e0ad13 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/DeleteCompletionCallback.java @@ -21,7 +21,6 @@ import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.api.BackgroundCallback; import org.apache.curator.framework.api.CuratorEvent; -import org.apache.hadoop.registry.server.integration.RMRegistryOperationsService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,7 +33,7 @@ */ public class DeleteCompletionCallback implements BackgroundCallback { private static final Logger LOG = - LoggerFactory.getLogger(RMRegistryOperationsService.class); + LoggerFactory.getLogger(DeleteCompletionCallback.class); private AtomicInteger events = new AtomicInteger(0); @@ -49,7 +48,7 @@ public void processResult(CuratorFramework client, } /** - * Get the number of deletion events + * Get the number of deletion events. * @return the count of events */ public int getEventCount() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java index 88e9d67b79d72..c5b599da18a71 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java @@ -65,9 +65,11 @@ @InterfaceStability.Evolving public class MicroZookeeperService extends AbstractService - implements RegistryBindingSource, RegistryConstants, + implements RegistryBindingSource, + RegistryConstants, + RegistryInternalConstants, ZookeeperConfigOptions, - MicroZookeeperServiceKeys{ + MicroZookeeperServiceKeys { private static final Logger @@ -86,7 +88,7 @@ public class MicroZookeeperService private StringBuilder diagnostics = new StringBuilder(); /** - * Create an instance + * Create an instance. * @param name service name */ public MicroZookeeperService(String name) { @@ -105,7 +107,7 @@ public String getConnectionString() { } /** - * Get the connection address + * Get the connection address. * @return the connection as an address * @throws IllegalStateException if the connection is not yet valid */ @@ -115,7 +117,7 @@ public InetSocketAddress getConnectionAddress() { } /** - * Create an inet socket addr from the local host + port number + * Create an inet socket addr from the local host + port number. * @param port port to use * @return a (hostname, port) pair * @throws UnknownHostException if the server cannot resolve the host @@ -125,9 +127,9 @@ private InetSocketAddress getAddress(int port) throws UnknownHostException { } /** - * Initialize the service, including choosing a path for the data + * Initialize the service, including choosing a path for the data. * @param conf configuration - * @throws Exception + * @throws Exception initialization problem */ @Override protected void serviceInit(Configuration conf) throws Exception { @@ -156,7 +158,7 @@ protected void serviceInit(Configuration conf) throws Exception { /** * Create a directory, ignoring if the dir is already there, * and failing if a file or something else was at the end of that - * path + * path. * @param dir dir to guarantee the existence of * @throws IOException IO problems, or path exists but is not a dir */ @@ -173,14 +175,14 @@ private void mkdirStrict(File dir) throws IOException { *

* A newline is appended afterwards. * @param text text including any format commands - * @param args arguments for the forma operation. + * @param args arguments for the format operation. */ protected void addDiagnostics(String text, Object ... args) { diagnostics.append(String.format(text, args)).append('\n'); } /** - * Get the diagnostics info + * Get the diagnostics info. * @return the diagnostics string built up */ public String getDiagnostics() { @@ -206,8 +208,8 @@ public boolean setupSecurity() throws IOException { "true")); //needed so that you can use sasl: strings in the registry - System.setProperty(RegistryInternalConstants.ZOOKEEPER_AUTH_PROVIDER +".1", - RegistryInternalConstants.SASLAUTHENTICATION_PROVIDER); + System.setProperty(ZOOKEEPER_AUTH_PROVIDER +".1", + SASLAUTHENTICATION_PROVIDER); String serverContext = System.getProperty(PROP_ZK_SERVER_SASL_CONTEXT); addDiagnostics("Server JAAS context s = %s", serverContext); @@ -220,7 +222,7 @@ public boolean setupSecurity() throws IOException { /** * Startup: start ZK. It is only after this that * the binding information is valid. - * @throws Exception + * @throws Exception startup problem. */ @Override protected void serviceStart() throws Exception { @@ -259,8 +261,8 @@ protected void serviceStart() throws Exception { /** * When the service is stopped, it deletes the data directory - * and its contents - * @throws Exception + * and its contents. + * @throws Exception any problem stopping. */ @Override protected void serviceStop() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperServiceKeys.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperServiceKeys.java index f4f4976c7b23c..5e614c4d87ce3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperServiceKeys.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperServiceKeys.java @@ -26,44 +26,44 @@ * so are kept separate. */ public interface MicroZookeeperServiceKeys { - public static final String ZKSERVICE_PREFIX = + String ZKSERVICE_PREFIX = RegistryConstants.REGISTRY_PREFIX + "zk.service."; /** * Key to define the JAAS context for the ZK service: {@value}. */ - public static final String KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT = + String KEY_REGISTRY_ZKSERVICE_JAAS_CONTEXT = ZKSERVICE_PREFIX + "service.jaas.context"; /** - * ZK servertick time: {@value} + * ZK servertick time: {@value}. */ - public static final String KEY_ZKSERVICE_TICK_TIME = + String KEY_ZKSERVICE_TICK_TIME = ZKSERVICE_PREFIX + "ticktime"; /** * host to register on: {@value}. */ - public static final String KEY_ZKSERVICE_HOST = ZKSERVICE_PREFIX + "host"; + String KEY_ZKSERVICE_HOST = ZKSERVICE_PREFIX + "host"; /** * Default host to serve on -this is localhost as it * is the only one guaranteed to be available: {@value}. */ - public static final String DEFAULT_ZKSERVICE_HOST = "localhost"; + String DEFAULT_ZKSERVICE_HOST = "localhost"; /** - * port; 0 or below means "any": {@value} + * port; 0 or below means "any": {@value}. */ - public static final String KEY_ZKSERVICE_PORT = ZKSERVICE_PREFIX + "port"; + String KEY_ZKSERVICE_PORT = ZKSERVICE_PREFIX + "port"; /** - * Directory containing data: {@value} + * Directory containing data: {@value}. */ - public static final String KEY_ZKSERVICE_DIR = ZKSERVICE_PREFIX + "dir"; + String KEY_ZKSERVICE_DIR = ZKSERVICE_PREFIX + "dir"; /** - * Should failed SASL clients be allowed: {@value}? + * Should failed SASL clients be allowed: {@value}. * * Default is the ZK default: true */ - public static final String KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS = + String KEY_ZKSERVICE_ALLOW_FAILED_SASL_CLIENTS = ZKSERVICE_PREFIX + "allow.failed.sasl.clients"; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java index 7a20c248db2ec..fd636b44eaf2f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/RegistryAdminService.java @@ -22,6 +22,8 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang.StringUtils; import org.apache.curator.framework.api.BackgroundCallback; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.PathNotFoundException; @@ -73,9 +75,11 @@ * A key async action is the depth-first tree purge, which supports * pluggable policies for deleting entries. The method * {@link #purge(String, NodeSelector, PurgePolicy, BackgroundCallback)} - * implements the recursive purge operation —the class - * {{AsyncPurge}} provides the asynchronous scheduling of this. + * implements the recursive purge operation -the class + * {@code AsyncPurge} provides the asynchronous scheduling of this. */ +@InterfaceAudience.Private +@InterfaceStability.Evolving public class RegistryAdminService extends RegistryOperationsService { private static final Logger LOG = @@ -88,12 +92,25 @@ public class RegistryAdminService extends RegistryOperationsService { | ZooDefs.Perms.CREATE | ZooDefs.Perms.DELETE; /** - * Executor for async operations + * Executor for async operations. */ protected final ExecutorService executor; /** - * Construct an instance of the service + * Future of the root path creation operation schedule on + * service start(). + */ + private Future rootPathsFuture; + + /** + * Flag set to true when registry setup is completed -that is, when the + * root directories have been created. If that operation fails, this + * flag will remain false. + */ + private volatile boolean registrySetupCompleted; + + /** + * Construct an instance of the service. * @param name service name */ public RegistryAdminService(String name) { @@ -101,8 +118,8 @@ public RegistryAdminService(String name) { } /** - * construct an instance of the service, using the - * specified binding source to bond to ZK + * Construct an instance of the service, using the + * specified binding source to bond to ZK. * @param name service name * @param bindingSource provider of ZK binding information */ @@ -143,7 +160,7 @@ protected synchronized void stopExecutor() { } /** - * Get the executor + * Get the executor. * @return the executor */ protected ExecutorService getExecutor() { @@ -151,7 +168,7 @@ protected ExecutorService getExecutor() { } /** - * Submit a callable + * Submit a callable. * @param callable callable * @param type of the final get * @return a future to wait on @@ -164,14 +181,14 @@ public Future submit(Callable callable) { } /** - * Asynchronous operation to create a directory + * Asynchronous operation to create a directory. * @param path path * @param acls ACL list * @param createParents flag to indicate parent dirs should be created * as needed * @return the future which will indicate whether or not the operation - * succeeded —and propagate any exceptions - * @throws IOException + * succeeded -and propagate any exceptions + * @throws IOException problem creating the path */ public Future createDirAsync(final String path, final List acls, @@ -179,8 +196,13 @@ public Future createDirAsync(final String path, return submit(new Callable() { @Override public Boolean call() throws Exception { - return maybeCreate(path, CreateMode.PERSISTENT, - acls, createParents); + try { + return maybeCreate(path, CreateMode.PERSISTENT, + acls, createParents); + } catch (IOException e) { + LOG.warn("Exception creating path {}", path, e); + throw e; + } } }); } @@ -188,31 +210,83 @@ public Boolean call() throws Exception { /** * Init operation sets up the system ACLs. * @param conf configuration of the service - * @throws Exception + * @throws Exception on a failure to initialize. */ @Override protected void serviceInit(Configuration conf) throws Exception { super.serviceInit(conf); RegistrySecurity registrySecurity = getRegistrySecurity(); if (registrySecurity.isSecureRegistry()) { - ACL sasl = registrySecurity.createSaslACLFromCurrentUser(ZooDefs.Perms.ALL); + ACL sasl = registrySecurity + .createSaslACLFromCurrentUser(ZooDefs.Perms.ALL); registrySecurity.addSystemACL(sasl); - LOG.info("Registry System ACLs:", + LOG.info("Registry System ACLs: {}", RegistrySecurity.aclsToString( registrySecurity.getSystemACLs())); } } /** - * Start the service, including creating base directories with permissions - * @throws Exception + * Start the service, including creating base directories with permissions. + * @throws Exception on a failure to start. */ @Override protected void serviceStart() throws Exception { super.serviceStart(); // create the root directories + rootPathsFuture = asyncCreateRootRegistryPaths(); + } + + /** + * Asynchronous operation to create the root directories. + * @return the future which can be used to await the outcome of this + * operation + */ + @VisibleForTesting + public Future asyncCreateRootRegistryPaths() { + return submit(new Callable() { + @Override + public Void call() throws Exception { + createRootRegistryPaths(); + return null; + } + }); + } + + /** + * Get the outcome of the asynchronous directory creation operation + * @return the blocking future. If the service has not started this will + * be null. + */ + public Future getRootPathsFuture() { + return rootPathsFuture; + } + + /** + * Query if the registry has been set up successfully. This will be false + * if the operation has not been started, is underway, or if it failed. + * @return the current setup completion flag. + */ + public boolean isRegistrySetupCompleted() { + return registrySetupCompleted; + } + + /** + * Create the initial registry paths. + * @throws IOException any failure + */ + private void createRootRegistryPaths() throws IOException { + try { - createRootRegistryPaths(); + List systemACLs = getRegistrySecurity().getSystemACLs(); + LOG.info("System ACLs {}", + RegistrySecurity.aclsToString(systemACLs)); + maybeCreate("", CreateMode.PERSISTENT, systemACLs, false); + maybeCreate(PATH_USERS, CreateMode.PERSISTENT, + systemACLs, false); + maybeCreate(PATH_SYSTEM_SERVICES, + CreateMode.PERSISTENT, + systemACLs, false); } catch (NoPathPermissionsException e) { String message = String.format(Locale.ENGLISH, @@ -224,35 +298,16 @@ protected void serviceStart() throws Exception { bindingDiagnosticDetails(), dumpRegistryRobustly(true)); - LOG.error(" Failure {}", e, e); + LOG.error(" Failure createRootRegistryPaths: {}", e.getPath(), e); LOG.error(message); - // TODO: this is something temporary to deal with the problem - // that jenkins is failing this test throw new NoPathPermissionsException(e.getPath().toString(), message, e); } + registrySetupCompleted = true; } /** - * Create the initial registry paths - * @throws IOException any failure - */ - @VisibleForTesting - public void createRootRegistryPaths() throws IOException { - - List systemACLs = getRegistrySecurity().getSystemACLs(); - LOG.info("System ACLs {}", - RegistrySecurity.aclsToString(systemACLs)); - maybeCreate("", CreateMode.PERSISTENT, systemACLs, false); - maybeCreate(PATH_USERS, CreateMode.PERSISTENT, - systemACLs, false); - maybeCreate(PATH_SYSTEM_SERVICES, - CreateMode.PERSISTENT, - systemACLs, false); - } - - /** - * Get the path to a user's home dir + * Get the path to a user's home dir. * @param username username * @return a path for services underneath */ @@ -280,7 +335,7 @@ public List aclsForUser(String username, int perms) throws IOException { /** * Start an async operation to create the home path for a user - * if it does not exist + * if it does not exist. * @param shortname username, without any @REALM in kerberos * @return the path created * @throws IOException any failure while setting up the operation @@ -352,11 +407,18 @@ protected void verifyRealmValidity() throws ServiceStateException { } /** - * Policy to purge entries + * How to act when purging an entry which has children.. */ public enum PurgePolicy { + /** Purge everything .*/ PurgeAll, + + /** Fail if there are child entries. */ FailOnChildren, + + /** + * Skip if there are child entries. + */ SkipOnChildren } @@ -364,25 +426,26 @@ public enum PurgePolicy { * Recursive operation to purge all matching records under a base path. *

    *
  1. Uses a depth first search
  2. - *
  3. A match is on ID and persistence policy, or, if policy==-1, any match
  4. - *
  5. If a record matches then it is deleted without any child searches
  6. + *
  7. A path is considered a match if {@code selector} selects it.
  8. + *
  9. If a record matches then it is deleted based on the + * {@code PurgePolicy} and the number of children (any).
  10. *
  11. Deletions will be asynchronous if a callback is provided
  12. *
* * The code is designed to be robust against parallel deletions taking place; * in such a case it will stop attempting that part of the tree. This * avoid the situation of more than 1 purge happening in parallel and - * one of the purge operations deleteing the node tree above the other. + * one of the purge operations deleting the node tree above the other. * @param path base path * @param selector selector for the purge policy * @param purgePolicy what to do if there is a matching record with children * @param callback optional curator callback - * @return the number of delete operations perfomed. As deletes may be for + * @return the number of delete operations performed. As deletes may be for * everything under a path, this may be less than the number of records * actually deleted - * @throws IOException problems * @throws PathIsNotEmptyDirectoryException if an entry cannot be deleted - * as it has children and the purge policy is FailOnChildren + * as it has children and the purge policy is {@link PurgePolicy#FailOnChildren} + * @throws IOException other problems */ @VisibleForTesting public int purge(String path, @@ -410,11 +473,7 @@ public int purge(String path, ServiceRecord serviceRecord = resolve(path); // there is now an entry here. toDelete = selector.shouldSelect(path, registryPathStatus, serviceRecord); - } catch (EOFException ignored) { - // ignore - } catch (InvalidRecordException ignored) { - // ignore - } catch (NoRecordException ignored) { + } catch (EOFException | NoRecordException | InvalidRecordException ignored) { // ignore } catch (PathNotFoundException e) { // there's no record here, it may have been deleted already. @@ -428,26 +487,20 @@ public int purge(String path, } // there's children switch (purgePolicy) { - case SkipOnChildren: - // don't do the deletion... continue to next record - if (LOG.isDebugEnabled()) { - LOG.debug("Skipping deletion"); - } - toDelete = false; - break; - case PurgeAll: - // mark for deletion - if (LOG.isDebugEnabled()) { - LOG.debug("Scheduling for deletion with children"); - } - toDelete = true; - entries = new ArrayList(0); - break; - case FailOnChildren: - if (LOG.isDebugEnabled()) { - LOG.debug("Failing deletion operation"); - } - throw new PathIsNotEmptyDirectoryException(path); + case SkipOnChildren: + // don't do the deletion... continue to next record + LOG.debug("Skipping deletion of {}", path); + toDelete = false; + break; + case PurgeAll: + // mark for deletion + LOG.debug("Scheduling for deletion of {} with children", path); + toDelete = true; + entries = new ArrayList<>(0); + break; + case FailOnChildren: + LOG.debug("Failing deletion operation on {}", path); + throw new PathIsNotEmptyDirectoryException(path); } } @@ -477,7 +530,7 @@ public int purge(String path, } /** - * Comparator used for purge logic + * Comparator used for purge logic. */ public interface NodeSelector { @@ -487,7 +540,7 @@ boolean shouldSelect(String path, } /** - * An async registry purge action taking + * An async registry purge action taking. * a selector which decides what to delete */ public class AsyncPurge implements Callable { @@ -507,15 +560,24 @@ public AsyncPurge(String path, this.purgePolicy = purgePolicy; } + /** + * Execute a purge operation. Exceptions are caught, logged and rethrown. + * @return the number of records purged + */ @Override public Integer call() throws Exception { - if (LOG.isDebugEnabled()) { - LOG.debug("Executing {}", this); + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Executing {}", this); + } + return purge(path, + selector, + purgePolicy, + callback); + } catch (Exception e) { + LOG.warn("Exception in {}", this, e); + throw e; } - return purge(path, - selector, - purgePolicy, - callback); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java index fe2a0a89f2c5a..d9d34c7af9ac5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/services/package-info.java @@ -36,4 +36,5 @@ * * */ + package org.apache.hadoop.registry.server.services; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java index 5b34f6032e139..bbce226907997 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractRegistryTest.java @@ -30,6 +30,9 @@ import java.io.IOException; import java.net.URISyntaxException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; /** * Abstract registry tests .. inits the field {@link #registry} @@ -45,19 +48,27 @@ public class AbstractRegistryTest extends AbstractZKRegistryTest { protected RegistryOperations operations; @Before - public void setupRegistry() throws IOException { + public void setupRegistry() throws + IOException, + InterruptedException, + ExecutionException, + TimeoutException { registry = new RMRegistryOperationsService("yarnRegistry"); operations = registry; registry.init(createRegistryConfiguration()); registry.start(); - operations.delete("/", true); - registry.createRootRegistryPaths(); + // await root directory creation completion + registry.getRootPathsFuture().get(30, TimeUnit.SECONDS); + // then purge the paths to clean up any existing entries + registry.delete("/", true); + // and rebuild + registry.asyncCreateRootRegistryPaths().get(30, TimeUnit.SECONDS); addToTeardown(registry); } /** * Create a service entry with the sample endpoints, and put it - * at the destination + * at the destination. * @param path path * @param createFlags flags * @return the record @@ -71,7 +82,7 @@ protected ServiceRecord putExampleServiceEntry(String path, int createFlags) thr /** * Create a service entry with the sample endpoints, and put it - * at the destination + * at the destination. * @param path path * @param createFlags flags * @return the record @@ -89,7 +100,7 @@ protected ServiceRecord putExampleServiceEntry(String path, } /** - * Assert a path exists + * Assert a path exists. * @param path path in the registry * @throws IOException */ @@ -98,7 +109,7 @@ public void assertPathExists(String path) throws IOException { } /** - * assert that a path does not exist + * assert that a path does not exist. * @param path path in the registry * @throws IOException */ @@ -112,7 +123,7 @@ public void assertPathNotFound(String path) throws IOException { } /** - * Assert that a path resolves to a service record + * Assert that a path resolves to a service record. * @param path path in the registry * @throws IOException */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractZKRegistryTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractZKRegistryTest.java index bcff6222f9226..3f971a8f878d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractZKRegistryTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/AbstractZKRegistryTest.java @@ -84,7 +84,7 @@ public static void createZKServer() throws Exception { } /** - * give our thread a name + * give our thread a name. */ @Before public void nameThread() { @@ -92,7 +92,7 @@ public void nameThread() { } /** - * Returns the connection string to use + * Returns the connection string to use. * * @return connection string */ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java index cd877b2738381..1bac93267cd02 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/RegistryTestHelper.java @@ -49,7 +49,7 @@ /** * This is a set of static methods to aid testing the registry operations. - * The methods can be imported statically —or the class used as a base + * The methods can be imported statically, or the class used as a base * class for tests. */ public class RegistryTestHelper extends Assert { @@ -70,7 +70,7 @@ public class RegistryTestHelper extends Assert { public static final String HTTP_API = "http://"; /** - * Assert the path is valid by ZK rules + * Assert the path is valid by ZK rules. * @param path path to check */ public static void assertValidZKPath(String path) { @@ -82,7 +82,7 @@ public static void assertValidZKPath(String path) { } /** - * Assert that a string is not empty (null or "") + * Assert that a string is not empty (null or ""). * @param message message to raise if the string is empty * @param check string to check */ @@ -93,7 +93,7 @@ public static void assertNotEmpty(String message, String check) { } /** - * Assert that a string is empty (null or "") + * Assert that a string is empty (null or ""). * @param check string to check */ public static void assertNotEmpty(String check) { @@ -103,7 +103,7 @@ public static void assertNotEmpty(String check) { } /** - * Log the details of a login context + * Log the details of a login context. * @param name name to assert that the user is logged in as * @param loginContext the login context */ @@ -115,14 +115,14 @@ public static void logLoginDetails(String name, } /** - * Set the JVM property to enable Kerberos debugging + * Set the JVM property to enable Kerberos debugging. */ public static void enableKerberosDebugging() { System.setProperty(AbstractSecureRegistryTest.SUN_SECURITY_KRB5_DEBUG, "true"); } /** - * Set the JVM property to enable Kerberos debugging + * Set the JVM property to enable Kerberos debugging. */ public static void disableKerberosDebugging() { System.setProperty(AbstractSecureRegistryTest.SUN_SECURITY_KRB5_DEBUG, @@ -130,7 +130,7 @@ public static void disableKerberosDebugging() { } /** - * General code to validate bits of a component/service entry built iwth + * General code to validate bits of a component/service entry built with. * {@link #addSampleEndpoints(ServiceRecord, String)} * @param record instance to check */ @@ -162,7 +162,7 @@ public static void validateEntry(ServiceRecord record) { } /** - * Assert that an endpoint matches the criteria + * Assert that an endpoint matches the criteria. * @param endpoint endpoint to examine * @param addressType expected address type * @param protocolType expected protocol type @@ -207,7 +207,7 @@ public static void assertMatches(ServiceRecord source, ServiceRecord resolved) { } /** - * Find an endpoint in a record or fail, + * Find an endpoint in a record or fail. * @param record record * @param api API * @param external external? @@ -237,7 +237,7 @@ public static Endpoint findEndpoint(ServiceRecord record, } /** - * Log a record + * Log a record. * @param name record name * @param record details * @throws IOException only if something bizarre goes wrong marshalling @@ -249,7 +249,7 @@ public static void logRecord(String name, ServiceRecord record) throws } /** - * Create a service entry with the sample endpoints + * Create a service entry with the sample endpoints. * @param persistence persistence policy * @return the record * @throws IOException on a failure @@ -265,7 +265,7 @@ public static ServiceRecord buildExampleServiceEntry(String persistence) throws } /** - * Add some endpoints + * Add some endpoints. * @param entry entry */ public static void addSampleEndpoints(ServiceRecord entry, String hostname) @@ -291,7 +291,7 @@ public static void addSampleEndpoints(ServiceRecord entry, String hostname) /** * Describe the stage in the process with a box around it -so as - * to highlight it in test logs + * to highlight it in test logs. * @param log log to use * @param text text * @param args logger args @@ -303,7 +303,7 @@ public static void describe(Logger log, String text, Object...args) { } /** - * log out from a context if non-null ... exceptions are caught and logged + * log out from a context if non-null ... exceptions are caught and logged. * @param login login context * @return null, always */ @@ -320,11 +320,11 @@ public static LoginContext logout(LoginContext login) { } /** - * Login via a UGI. Requres UGI to have been set up + * Login via a UGI. Requires UGI to have been set up. * @param user username * @param keytab keytab to list * @return the UGI - * @throws IOException + * @throws IOException login failure. */ public static UserGroupInformation loginUGI(String user, File keytab) throws IOException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/cli/TestRegistryCli.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/cli/TestRegistryCli.java index bd8a38dc64e1b..1ef084b689bb7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/cli/TestRegistryCli.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/cli/TestRegistryCli.java @@ -30,7 +30,7 @@ public class TestRegistryCli extends AbstractRegistryTest { protected static final Logger LOG = - LoggerFactory.getLogger(TestRegistryOperations.class); + LoggerFactory.getLogger(TestRegistryCli.class); private ByteArrayOutputStream sysOutStream; private PrintStream sysOut; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestMarshalling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestMarshalling.java index f1814d30707c0..e010e0b423c80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestMarshalling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestMarshalling.java @@ -32,7 +32,7 @@ import org.slf4j.LoggerFactory; /** - * Test record marshalling + * Test record marshalling. */ public class TestMarshalling extends RegistryTestHelper { private static final Logger diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryOperationUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryOperationUtils.java index b07d2ced64f89..69de4df0b194d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryOperationUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryOperationUtils.java @@ -23,7 +23,7 @@ import org.junit.Test; /** - * Tests for the {@link RegistryUtils} class + * Tests for the {@link RegistryUtils} class. */ public class TestRegistryOperationUtils extends Assert { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryPathUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryPathUtils.java index 9a24f1c9c8597..04108d18864ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryPathUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/client/binding/TestRegistryPathUtils.java @@ -40,7 +40,7 @@ public void testFormatAscii() throws Throwable { } /* - * Euro symbol + * Euro symbol. */ @Test public void testFormatEuroSymbol() throws Throwable { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java index 451a69b695bdb..67f9ab9ab4aee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/integration/TestRegistryRMOperations.java @@ -52,7 +52,7 @@ public class TestRegistryRMOperations extends AbstractRegistryTest { LoggerFactory.getLogger(TestRegistryRMOperations.class); /** - * trigger a purge operation + * trigger a purge operation. * @param path path * @param id yarn ID * @param policyMatch policy to match ID on @@ -72,8 +72,8 @@ public int purge(String path, /** * - * trigger a purge operation - * @param path pathn + * trigger a purge operation. + * @param path path * @param id yarn ID * @param policyMatch policy to match ID on * @param purgePolicy policy when there are children under a match @@ -211,8 +211,7 @@ public void testPutGetContainerPersistenceServiceEntry() throws Throwable { } /** - * Create a complex example app - * @throws Throwable + * Create a complex example app. */ @Test public void testCreateComplexApplication() throws Throwable { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/operations/TestRegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/operations/TestRegistryOperations.java index 853d7f179095f..c2055e57e7645 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/operations/TestRegistryOperations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/operations/TestRegistryOperations.java @@ -283,7 +283,7 @@ public void testListListFully() throws Throwable { List list = operations.list(path); assertEquals("Wrong no. of children", 2, list.size()); // there's no order here, so create one - Map names = new HashMap(); + Map names = new HashMap<>(); String entries = ""; for (String child : list) { names.put(child, child); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java index 8d0dc6a015c1e..0c21ea0cfc329 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestRegistrySecurityHelper.java @@ -36,7 +36,7 @@ import static org.apache.hadoop.registry.client.api.RegistryConstants.*; /** - * Test for registry security operations + * Test for registry security operations. */ public class TestRegistrySecurityHelper extends Assert { private static final Logger LOG = @@ -190,22 +190,4 @@ public void testUGIProperties() throws Throwable { LOG.info("User {} has ACL {}", user, acl); } - - @Test - public void testSecurityImpliesKerberos() throws Throwable { - Configuration conf = new Configuration(); - conf.setBoolean("hadoop.security.authentication", true); - conf.setBoolean(KEY_REGISTRY_SECURE, true); - conf.set(KEY_REGISTRY_KERBEROS_REALM, "KERBEROS"); - RegistrySecurity security = new RegistrySecurity("registry security"); - try { - security.init(conf); - } catch (Exception e) { - assertTrue( - "did not find "+ RegistrySecurity.E_NO_KERBEROS + " in " + e, - e.toString().contains(RegistrySecurity.E_NO_KERBEROS)); - } - } - - } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java index d66bb793b9f1a..dfb73affa6662 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureLogins.java @@ -49,7 +49,7 @@ import org.slf4j.LoggerFactory; /** - * Verify that logins work + * Verify that logins work. */ public class TestSecureLogins extends AbstractSecureRegistryTest { private static final Logger LOG = @@ -134,7 +134,7 @@ public void testKerberosAuth() throws Throwable { Class.forName(KerberosUtil.getKrb5LoginModuleName()); Constructor kerb5LoginConstr = kerb5LoginClass.getConstructor(); Object kerb5LoginObject = kerb5LoginConstr.newInstance(); - final Map options = new HashMap(); + final Map options = new HashMap<>(); options.put("debug", "true"); if (IBM_JAVA) { options.put("useKeytab", diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRMRegistryOperations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRMRegistryOperations.java index 41760d644ef63..679b29bdbc36c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRMRegistryOperations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRMRegistryOperations.java @@ -47,11 +47,12 @@ import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.List; +import java.util.concurrent.TimeUnit; import static org.apache.hadoop.registry.client.api.RegistryConstants.*; /** - * Verify that the {@link RMRegistryOperationsService} works securely + * Verify that the {@link RMRegistryOperationsService} works securely. */ public class TestSecureRMRegistryOperations extends AbstractSecureRegistryTest { private static final Logger LOG = @@ -103,6 +104,7 @@ public RMRegistryOperationsService run() throws Exception { operations.init(secureConf); LOG.info(operations.bindingDiagnosticDetails()); operations.start(); + operations.getRootPathsFuture().get(30, TimeUnit.SECONDS); return operations; } }); @@ -111,7 +113,7 @@ public RMRegistryOperationsService run() throws Exception { } /** - * test that ZK can write as itself + * test that ZK can write as itself. * @throws Throwable */ @Test @@ -170,7 +172,7 @@ public void testAnonNoWriteAccessOffRoot() throws Throwable { } /** - * Expect a mknode operation to fail + * Expect a mknode operation to fail. * @param operations operations instance * @param path path * @throws IOException An IO failure other than those permitted @@ -188,7 +190,7 @@ public void expectMkNodeFailure(RegistryOperations operations, } /** - * Expect a delete operation to fail + * Expect a delete operation to fail. * @param operations operations instance * @param path path * @param recursive diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java index 083f7f9522a40..c4aa8e8f49299 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/test/java/org/apache/hadoop/registry/secure/TestSecureRegistry.java @@ -39,7 +39,7 @@ import static org.apache.hadoop.registry.client.api.RegistryConstants.*; /** - * Verify that the Mini ZK service can be started up securely + * Verify that the Mini ZK service can be started up securely. */ public class TestSecureRegistry extends AbstractSecureRegistryTest { private static final Logger LOG = @@ -58,7 +58,7 @@ public void afterTestSecureZKService() throws Throwable { /** * this is a cut and paste of some of the ZK internal code that was - * failing on windows and swallowing its exceptions + * failing on windows and swallowing its exceptions. */ @Test public void testLowlevelZKSaslLogin() throws Throwable { @@ -107,7 +107,7 @@ public void testInsecureClientToZK() throws Throwable { } /** - * test that ZK can write as itself + * test that ZK can write as itself. * @throws Throwable */ @Test @@ -139,7 +139,7 @@ public void testZookeeperCanWrite() throws Throwable { } /** - * Start a curator service instance + * Start a curator service instance. * @param name name * @param secure flag to indicate the cluster is secure * @return an inited and started curator service diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml index ee892d1a47838..184cd270bfdbb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml @@ -60,6 +60,11 @@ hadoop-yarn-common + + org.apache.hadoop + hadoop-yarn-registry + + com.google.guava guava diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java index 0e305a9417d57..d4fdd0d6d853e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java @@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; +import org.apache.hadoop.yarn.server.resourcemanager.registry.RMRegistryService; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; @@ -97,6 +98,7 @@ public class RMActiveServiceContext { private RMTimelineCollectorManager timelineCollectorManager; private RMNodeLabelsManager nodeLabelManager; + private RMRegistryService registry; private RMDelegatedNodeLabelsUpdater rmDelegatedNodeLabelsUpdater; private long epoch; private Clock systemClock = SystemClock.getInstance(); @@ -123,7 +125,8 @@ public RMActiveServiceContext(Dispatcher rmDispatcher, RMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInRM nmTokenSecretManager, ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager, - ResourceScheduler scheduler) { + ResourceScheduler scheduler, + RMRegistryService registry) { this(); this.setContainerAllocationExpirer(containerAllocationExpirer); this.setAMLivelinessMonitor(amLivelinessMonitor); @@ -483,4 +486,16 @@ public void setRMAppLifetimeMonitor( public RMAppLifetimeMonitor getRMAppLifetimeMonitor() { return this.rmAppLifetimeMonitor; } + + @Private + @Unstable + public RMRegistryService getRegistry() { + return registry; + } + + @Private + @Unstable + public void setRegistry(RMRegistryService registry) { + this.registry = registry; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java index c9d185f9054b4..1789b331967c1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java @@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater; import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; +import org.apache.hadoop.yarn.server.resourcemanager.registry.RMRegistryService; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; @@ -140,6 +141,8 @@ void setRMDelegatedNodeLabelsUpdater( boolean isSchedulerReadyForAllocatingContainers(); Configuration getYarnConfiguration(); + + RMRegistryService getRegistry(); PlacementManager getQueuePlacementManager(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index dc8f7d1ea0b49..e409f01bb6991 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMDelegatedNodeLabelsUpdater; import org.apache.hadoop.yarn.server.resourcemanager.placement.PlacementManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; +import org.apache.hadoop.yarn.server.resourcemanager.registry.RMRegistryService; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; @@ -98,7 +99,8 @@ public RMContextImpl(Dispatcher rmDispatcher, RMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInRM nmTokenSecretManager, ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager, - ResourceScheduler scheduler) { + ResourceScheduler scheduler, + RMRegistryService registry) { this(); this.setDispatcher(rmDispatcher); setActiveServiceContext(new RMActiveServiceContext(rmDispatcher, @@ -106,7 +108,8 @@ public RMContextImpl(Dispatcher rmDispatcher, delegationTokenRenewer, appTokenSecretManager, containerTokenSecretManager, nmTokenSecretManager, clientToAMTokenSecretManager, - scheduler)); + scheduler, + registry)); ConfigurationProvider provider = new LocalConfigurationProvider(); setConfigurationProvider(provider); @@ -132,7 +135,7 @@ public RMContextImpl(Dispatcher rmDispatcher, appTokenSecretManager, containerTokenSecretManager, nmTokenSecretManager, - clientToAMTokenSecretManager, null); + clientToAMTokenSecretManager, null, null); } @Override @@ -511,4 +514,12 @@ public void setRMAppLifetimeMonitor( public RMAppLifetimeMonitor getRMAppLifetimeMonitor() { return this.activeServiceContext.getRMAppLifetimeMonitor(); } + + public RMRegistryService getRegistry() { + return activeServiceContext.getRegistry(); + } + + void setRegistry(RMRegistryService registry) { + activeServiceContext.setRegistry(registry); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 8ddbc20569e88..d5b2623269f70 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -64,6 +64,7 @@ import org.apache.hadoop.yarn.event.EventDispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; @@ -80,6 +81,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreFactory; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; +import org.apache.hadoop.yarn.server.resourcemanager.registry.RMRegistryService; import org.apache.hadoop.yarn.server.resourcemanager.reservation.AbstractReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystem; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -707,6 +709,14 @@ protected void serviceInit(Configuration configuration) throws Exception { new RMNMInfo(rmContext, scheduler); + boolean registryEnabled = + conf.getBoolean(RegistryConstants.KEY_REGISTRY_ENABLED, + RegistryConstants.DEFAULT_REGISTRY_ENABLED); + if (registryEnabled) { + RMRegistryService registry = new RMRegistryService(rmContext); + addService(registry); + rmContext.setRegistry(registry); + } super.serviceInit(conf); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/registry/RMRegistryService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/registry/RMRegistryService.java new file mode 100644 index 0000000000000..d6fbf038ef45b --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/registry/RMRegistryService.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.registry; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.registry.server.integration.RMRegistryOperationsService; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreAppEvent; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreEvent; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStoreEventType; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.ApplicationStateData; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +/** + * This is the RM service which translates from RM events + * to registry actions. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class RMRegistryService extends CompositeService { + private static final Logger LOG = + LoggerFactory.getLogger(RMRegistryService.class); + + private final RMContext rmContext; + + /** + * Registry service for deployment within the YARN RM. + */ + private final RMRegistryOperationsService registryOperations; + + public RMRegistryService(RMContext rmContext) { + super(RMRegistryService.class.getName()); + this.rmContext = rmContext; + registryOperations = new RMRegistryOperationsService("Registry"); + addService(registryOperations); + } + + /** + * Start the service: register event handlers. + * @throws Exception any problem starting up + */ + @Override + protected void serviceStart() throws Exception { + super.serviceStart(); + + LOG.info("RM registry service started : {}", + registryOperations.bindingDiagnosticDetails()); + // Register self as event handler for RM Events + register(RMAppAttemptEventType.class, new AppEventHandler()); + register(RMAppManagerEventType.class, new AppManagerEventHandler()); + register(RMStateStoreEventType.class, new StateStoreEventHandler()); + register(RMContainerEventType.class, new ContainerEventHandler()); + } + + /** + * Register a handler. + * @param eventType event type + * @param handler handler + */ + private void register(Class eventType, + EventHandler handler) { + rmContext.getDispatcher().register(eventType, handler); + } + + @SuppressWarnings( + {"EnumSwitchStatementWhichMissesCases", "UnnecessaryDefault"}) + protected void handleAppManagerEvent(RMAppManagerEvent event) throws + IOException { + RMAppManagerEventType eventType = event.getType(); + ApplicationId appId = + event.getApplicationId(); + switch (eventType) { + case APP_COMPLETED: + registryOperations.onApplicationCompleted(appId); + break; + default: + // this isn't in the enum today...just making sure for the + // future + break; + } + } + + @SuppressWarnings("EnumSwitchStatementWhichMissesCases") + private void handleStateStoreEvent(RMStateStoreEvent event) + throws IOException { + RMStateStoreEventType eventType = event.getType(); + switch (eventType) { + case STORE_APP: + RMStateStoreAppEvent storeAppEvent = (RMStateStoreAppEvent) event; + ApplicationStateData appState = storeAppEvent.getAppState(); + ApplicationId appId = + appState.getApplicationSubmissionContext().getApplicationId(); + registryOperations.onStateStoreEvent(appId, appState.getUser()); + break; + + default: + break; + } + } + + @SuppressWarnings("EnumSwitchStatementWhichMissesCases") + protected void handleAppAttemptEvent(RMAppAttemptEvent event) throws + IOException { + RMAppAttemptEventType eventType = event.getType(); + ApplicationAttemptId appAttemptId = + event.getApplicationAttemptId(); + + switch (eventType) { + + case UNREGISTERED: + registryOperations.onApplicationAttemptUnregistered(appAttemptId); + break; + + // container has finished + case CONTAINER_FINISHED: + RMAppAttemptContainerFinishedEvent cfe = + (RMAppAttemptContainerFinishedEvent) event; + ContainerId containerId = cfe.getContainerStatus().getContainerId(); + registryOperations.onContainerFinished(containerId); + break; + + default: + // do nothing + } + } + + @SuppressWarnings("EnumSwitchStatementWhichMissesCases") + private void handleContainerEvent(RMContainerEvent event) + throws IOException { + RMContainerEventType eventType = event.getType(); + switch (eventType) { + case FINISHED: + ContainerId containerId = event.getContainerId(); + registryOperations.onContainerFinished(containerId); + break; + + default: + break; + } + } + + /** + * Handler for app events. + */ + private class AppEventHandler implements EventHandler { + + @Override + public void handle(RMAppAttemptEvent event) { + try { + handleAppAttemptEvent(event); + } catch (IOException e) { + LOG.warn("handling {}", event, e); + } + } + } + + /** + * Handler for RM-side App manager events. + */ + private class AppManagerEventHandler + implements EventHandler { + @Override + public void handle(RMAppManagerEvent event) { + try { + handleAppManagerEvent(event); + } catch (IOException e) { + LOG.warn("handling {}", event, e); + } + } + } + + /** + * Handler for RM-side state store events. + * This happens early on, and as the data contains the user details, + * it is where paths can be set up in advance of being used. + */ + private class StateStoreEventHandler + implements EventHandler { + @Override + public void handle(RMStateStoreEvent event) { + try { + handleStateStoreEvent(event); + } catch (IOException e) { + LOG.warn("handling {}", event, e); + } + } + } + + /** + * Handler for RM-side container events. + */ + private class ContainerEventHandler + implements EventHandler { + + @Override + public void handle(RMContainerEvent event) { + try { + handleContainerEvent(event); + } catch (IOException e) { + LOG.warn("handling {}", event, e); + } + } + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 6055afb81d492..7b6179c5ec3da 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -116,7 +116,7 @@ public void setUp() throws Exception { rmContext = new RMContextImpl(rmDispatcher, mock(ContainerAllocationExpirer.class), null, null, mock(DelegationTokenRenewer.class), null, null, null, - null, null); + null); NodesListManager nodesListManager = mock(NodesListManager.class); HostsFileReader reader = mock(HostsFileReader.class); when(nodesListManager.getHostsReader()).thenReturn(reader); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java index 1ff6a1a8185a5..87a71c96f2e65 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java @@ -265,7 +265,7 @@ public static RMContext createRMContext(Configuration conf) { new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM())); RMNodeLabelsManager nlm = mock(RMNodeLabelsManager.class); when(nlm.getQueueResource(any(String.class), anySetOf(String.class), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java index c837450f0218b..176cb7b1b88a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMExpiry.java @@ -72,7 +72,7 @@ public void setUp() { // Dispatcher that processes events inline Dispatcher dispatcher = new InlineDispatcher(); RMContext context = new RMContextImpl(dispatcher, null, - null, null, null, null, null, null, null, null); + null, null, null, null, null, null, null); dispatcher.register(SchedulerEventType.class, new InlineDispatcher.EmptyEventHandler()); dispatcher.register(RMNodeEventType.class, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java index e7c7e51bf2c4c..a28b6ece61644 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestNMReconnect.java @@ -93,7 +93,7 @@ public void setUp() { new TestRMNodeEventDispatcher()); context = new RMContextImpl(dispatcher, null, - null, null, null, null, null, null, null, null); + null, null, null, null, null, null, null); dispatcher.register(SchedulerEventType.class, new InlineDispatcher.EmptyEventHandler()); dispatcher.register(RMNodeEventType.class, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java index 4f9469548aed3..f2b75a19a6a90 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java @@ -71,7 +71,7 @@ public void handle(Event event) { RMContext context = new RMContextImpl(dispatcher, null, null, null, null, null, new RMContainerTokenSecretManager(conf), - new NMTokenSecretManagerInRM(conf), null, null); + new NMTokenSecretManagerInRM(conf), null); dispatcher.register(RMNodeEventType.class, new ResourceManager.NodeEventDispatcher(context)); NodesListManager nodesListManager = new NodesListManager(context); @@ -137,4 +137,4 @@ public void testRPCResponseId() throws IOException, YarnException { Assert.assertEquals("Too far behind rm response id:2 nm response id:0", response.getDiagnosticsMessage()); } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java index 63953397d1109..6646a4ba6811a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java @@ -644,7 +644,7 @@ private ResourceTrackerService getPrivateResourceTrackerService( RMContext privateContext = new RMContextImpl(privateDispatcher, null, null, null, null, null, null, - null, null, null); + null, null, null, null); privateContext.setNodeLabelManager(Mockito.mock(RMNodeLabelsManager.class)); privateDispatcher.register(SchedulerEventType.class, sleepHandler); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 0aeedce98aa8a..1b78eca4a04fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -484,7 +484,7 @@ public void testRefreshQueues() throws Exception { RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); setupQueueConfiguration(conf); cs.setConf(new YarnConfiguration()); cs.setRMContext(resourceManager.getRMContext()); @@ -593,7 +593,7 @@ public void testParseQueue() throws IOException { cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM())); } @Test @@ -609,7 +609,7 @@ public void testReconnectedNode() throws Exception { cs.reinitialize(csConf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM())); RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1); RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2); @@ -640,7 +640,7 @@ public void testRefreshQueuesWithNewQueue() throws Exception { cs.reinitialize(conf, new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM())); checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY); // Add a new queue b4 @@ -2298,7 +2298,7 @@ public void testPreemptionDisabled() throws Exception { RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); setupQueueConfiguration(conf); cs.setConf(new YarnConfiguration()); cs.setRMContext(resourceManager.getRMContext()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java index 92baa857007e0..38cc7adfd54a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueParsing.java @@ -431,7 +431,7 @@ public void testQueueParsingReinitializeWithLabels() throws IOException { new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); rmContext.setNodeLabelManager(nodeLabelManager); capacityScheduler.setConf(conf); capacityScheduler.setRMContext(rmContext); @@ -520,7 +520,7 @@ public void testQueueParsingWithLabels() throws IOException { new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); rmContext.setNodeLabelManager(nodeLabelManager); capacityScheduler.setConf(csConf); capacityScheduler.setRMContext(rmContext); @@ -544,7 +544,7 @@ public void testQueueParsingWithLabelsInherit() throws IOException { new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); rmContext.setNodeLabelManager(nodeLabelManager); capacityScheduler.setConf(csConf); capacityScheduler.setRMContext(rmContext); @@ -567,7 +567,7 @@ public void testQueueParsingWhenLabelsNotExistedInNodeLabelManager() new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); RMNodeLabelsManager nodeLabelsManager = new NullRMNodeLabelsManager(); nodeLabelsManager.init(conf); @@ -595,7 +595,7 @@ public void testQueueParsingWhenLabelsInheritedNotExistedInNodeLabelManager() new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); RMNodeLabelsManager nodeLabelsManager = new NullRMNodeLabelsManager(); nodeLabelsManager.init(conf); @@ -623,7 +623,7 @@ public void testSingleLevelQueueParsingWhenLabelsNotExistedInNodeLabelManager() new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); RMNodeLabelsManager nodeLabelsManager = new NullRMNodeLabelsManager(); nodeLabelsManager.init(conf); @@ -650,7 +650,7 @@ public void testQueueParsingWhenLabelsNotExist() throws IOException { new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); RMNodeLabelsManager nodeLabelsManager = new NullRMNodeLabelsManager(); nodeLabelsManager.init(conf); @@ -685,7 +685,7 @@ public void testQueueParsingWithUnusedLabels() throws IOException { new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); rmContext.setNodeLabelManager(nodeLabelManager); capacityScheduler.setRMContext(rmContext); capacityScheduler.init(conf); @@ -794,7 +794,7 @@ public void testQueueParsingFailWhenSumOfChildrenNonLabeledCapacityNot100Percent new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); rmContext.setNodeLabelManager(nodeLabelManager); capacityScheduler.setConf(csConf); capacityScheduler.setRMContext(rmContext); @@ -825,7 +825,7 @@ public void testQueueParsingFailWhenSumOfChildrenLabeledCapacityNot100Percent() new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); rmContext.setNodeLabelManager(nodeLabelManager); capacityScheduler.setConf(csConf); capacityScheduler.setRMContext(rmContext); @@ -860,7 +860,7 @@ public void testQueueParsingWithSumOfChildLabelCapacityNot100PercentWithWildCard new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); rmContext.setNodeLabelManager(nodeLabelManager); capacityScheduler.setConf(csConf); capacityScheduler.setRMContext(rmContext); @@ -886,7 +886,7 @@ public void testQueueParsingWithMoveQueue() new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(csConf), new NMTokenSecretManagerInRM(csConf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); rmContext.setNodeLabelManager(nodeLabelManager); capacityScheduler.setConf(csConf); capacityScheduler.setRMContext(rmContext); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 028bcb9064739..243a2d82a6f21 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -182,7 +182,7 @@ public void testAppAttemptMetrics() throws Exception { FifoScheduler scheduler = new FifoScheduler(); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, - null, null, null, null, null, null, null, scheduler); + null, null, null, null, null, null, null, scheduler, null); ((RMContextImpl) rmContext).setSystemMetricsPublisher( mock(SystemMetricsPublisher.class)); @@ -229,7 +229,8 @@ public void testNodeLocalAssignment() throws Exception { FifoScheduler scheduler = new FifoScheduler(); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, - null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler); + null, containerTokenSecretManager, nmTokenSecretManager, null, + scheduler, null); rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class)); rmContext.setRMApplicationHistoryWriter( mock(RMApplicationHistoryWriter.class)); @@ -306,7 +307,8 @@ public void testUpdateResourceOnNode() throws Exception { FifoScheduler scheduler = new FifoScheduler(); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, - null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler); + null, containerTokenSecretManager, nmTokenSecretManager, null, + scheduler, null); rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class)); rmContext.setRMApplicationHistoryWriter(mock(RMApplicationHistoryWriter.class)); ((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java index df8d67c3686ea..004acc9bbe1a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java @@ -185,7 +185,7 @@ public static RMContext mockRMContext(int numApps, int racks, int numNodes, } RMContextImpl rmContext = new RMContextImpl(null, null, null, null, - null, null, null, null, null, null) { + null, null, null, null, null, null, null) { @Override public ConcurrentMap getRMApps() { return applicationsMaps; @@ -233,7 +233,7 @@ public static CapacityScheduler mockCapacityScheduler() throws IOException { RMContext rmContext = new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null); + new ClientToAMTokenSecretManagerInRM()); rmContext.setNodeLabelManager(new NullRMNodeLabelsManager()); cs.setRMContext(rmContext); cs.init(conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java index 78fadef03e5f8..9460af7939e2e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebAppFairScheduler.java @@ -157,7 +157,7 @@ public YarnApplicationState createApplicationState() { } RMContextImpl rmContext = new RMContextImpl(null, null, null, null, - null, null, null, null, null, null) { + null, null, null, null, null) { @Override public ConcurrentMap getRMApps() { return applicationsMaps; @@ -187,7 +187,7 @@ private static FairScheduler mockFairScheduler() throws IOException { fs.setRMContext(new RMContextImpl(null, null, null, null, null, null, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM(), null)); + new ClientToAMTokenSecretManagerInRM())); fs.init(conf); return fs; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java index 6af2110d1fdac..78a88144aa8d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java @@ -651,7 +651,7 @@ public void testAppsRace() throws Exception { anyBoolean())).thenReturn(mockAppsResponse); ResourceManager mockRM = mock(ResourceManager.class); RMContextImpl rmContext = new RMContextImpl(null, null, null, null, null, - null, null, null, null, null); + null, null, null, null); when(mockRM.getRMContext()).thenReturn(rmContext); when(mockRM.getClientRMService()).thenReturn(mockClientSvc); rmContext.setNodeLabelManager(mock(RMNodeLabelsManager.class)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md index adca4517aeae5..a5cfa61b549f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-configuration.md @@ -41,28 +41,30 @@ the values, so enabling them to read from and potentially write to the registry. The Resource Manager manages user directory creation and record cleanup on YARN container/application attempt/application completion. -``` - - - Is the registry enabled in the YARN Resource Manager? - - If true, the YARN RM will, as needed. - create the user and system paths, and purge - service records when containers, application attempts - and applications complete. - - If false, the paths must be created by other means, - and no automatic cleanup of service records will take place. - - hadoop.registry.rm.enabled - false - +```xml + + + Is the registry enabled in the YARN Resource Manager? + + If true, the YARN RM will, as needed. + create the user and system paths, and purge + service records when containers, application attempts + and applications complete. + + If false, the paths must be created by other means, + and no automatic cleanup of service records will take place. + + hadoop.registry.rm.enabled + false + ``` If the property is set in `core-site.xml` or `yarn-site.xml`, the YARN Resource Manager will behave as follows: + 1. On startup: create the initial root paths of `/`, `/services` and `/users`. - On a secure cluster, access will be restricted to the system accounts (see below). + On a secure cluster, access will be restricted to the system accounts + (see below). 2. When a user submits a job: create the user path under `/users`. 3. When a container is completed: delete from the registry all service records with a `yarn:persistence` field of value `container`, and a `yarn:id` field @@ -74,28 +76,28 @@ the YARN Resource Manager will behave as follows: `yarn:persistence` set to `application` and `yarn:id` set to the application ID. -All these operations are asynchronous, so that zookeeper connectivity problems +All these operations are asynchronous; zookeeper connectivity problems do not delay RM operations or work scheduling. If the property `hadoop.registry.rm.enabled` is set to `false`, the RM will not interact with the registry —and the listed operations will not take place. -The root paths may be created by other means, but service record cleanup will not take place. +The root paths may be created by other means, but service record cleanup will +not take place. ### Setting the Zookeeper Quorum: `hadoop.registry.zk.quorum` This is an essential setting: it identifies the lists of zookeeper hosts and the ports on which the ZK services are listening. - -``` - - - List of hostname:port pairs defining the - zookeeper quorum binding for the registry - - hadoop.registry.zk.quorum - localhost:2181 - +```xml + + + List of hostname:port pairs defining the + zookeeper quorum binding for the registry + + hadoop.registry.zk.quorum + localhost:2181 + ``` It takes a comma-separated list, such as `zk1:2181 ,zk2:2181, zk3:2181` @@ -104,14 +106,14 @@ It takes a comma-separated list, such as `zk1:2181 ,zk2:2181, zk3:2181` This path sets the base zookeeper node for the registry -``` - - - The root zookeeper node for the registry - - hadoop.registry.zk.root - /registry - +```xml + + + The root zookeeper node for the registry + + hadoop.registry.zk.root + /registry + ``` The default value of `/registry` is normally sufficient. A different value @@ -153,18 +155,18 @@ the user: `hadoop.registry.jaas.context` ### Enabling security -``` - - - Key to set if the registry is secure. Turning it on - changes the permissions policy from "open access" - to restrictions on kerberos with the option of - a user adding one or more auth key pairs down their - own tree. - - hadoop.registry.secure - false - +```xml + + + Key to set if the registry is secure. Turning it on + changes the permissions policy from "open access" + to restrictions on kerberos with the option of + a user adding one or more auth key pairs down their + own tree. + + hadoop.registry.secure + false + ``` ### Identifying the client JAAS context @@ -172,14 +174,14 @@ the user: `hadoop.registry.jaas.context` The registry clients must identify the JAAS context which they use to authenticate to the registry. -``` - - - Key to define the JAAS context. Used in secure mode - - hadoop.registry.jaas.context - Client - +```xml + + + Key to define the JAAS context. Used in secure mode + + hadoop.registry.jaas.context + Client + ``` *Note* as the Resource Manager is simply another client of the registry, it @@ -208,32 +210,32 @@ to it. 7. It may be overridden by the property `hadoop.registry.kerberos.realm`. -``` - - - A comma separated list of Zookeeper ACL identifiers with - system access to the registry in a secure cluster. - These are given full access to all entries. - If there is an "@" at the end of a SASL entry it - instructs the registry client to append the default kerberos domain. - - hadoop.registry.system.acls - sasl:yarn@, sasl:mapred@, sasl:mapred@, sasl:hdfs@ - - - - - The kerberos realm: used to set the realm of - system principals which do not declare their realm, - and any other accounts that need the value. - If empty, the default realm of the running process - is used. - If neither are known and the realm is needed, then the registry - service/client will fail. - - hadoop.registry.kerberos.realm - - +```xml + + + A comma separated list of Zookeeper ACL identifiers with + system access to the registry in a secure cluster. + These are given full access to all entries. + If there is an "@" at the end of a SASL entry it + instructs the registry client to append the default kerberos domain. + + hadoop.registry.system.acls + sasl:yarn@, sasl:mapred@, sasl:mapred@, sasl:hdfs@ + + + + + The kerberos realm: used to set the realm of + system principals which do not declare their realm, + and any other accounts that need the value. + If empty, the default realm of the running process + is used. + If neither are known and the realm is needed, then the registry + service/client will fail. + + hadoop.registry.kerberos.realm + + ``` Example: an `hadoop.registry.system.acls` entry of @@ -270,50 +272,50 @@ a library which detects timeouts and attempts to reconnect to one of the servers which forms the zookeeper quorum. It is only after a timeout is detected that a retry is triggered. -``` - - - Zookeeper session timeout in milliseconds - - hadoop.registry.zk.session.timeout.ms - 60000 - - - - - Zookeeper connection timeout in milliseconds - - hadoop.registry.zk.connection.timeout.ms - 15000 - - - - - Zookeeper connection retry count before failing - - hadoop.registry.zk.retry.times - 5 - - - - - - hadoop.registry.zk.retry.interval.ms - 1000 - - - - - Zookeeper retry limit in milliseconds, during - exponential backoff. - This places a limit even - if the retry times and interval limit, combined - with the backoff policy, result in a long retry - period - - hadoop.registry.zk.retry.ceiling.ms - 60000 - +```xml + + + Zookeeper session timeout in milliseconds + + hadoop.registry.zk.session.timeout.ms + 60000 + + + + + Zookeeper connection timeout in milliseconds + + hadoop.registry.zk.connection.timeout.ms + 15000 + + + + + Zookeeper connection retry count before failing + + hadoop.registry.zk.retry.times + 5 + + + + + + hadoop.registry.zk.retry.interval.ms + 1000 + + + + + Zookeeper retry limit in milliseconds, during + exponential backoff. + This places a limit even + if the retry times and interval limit, combined + with the backoff policy, result in a long retry + period + + hadoop.registry.zk.retry.ceiling.ms + 60000 + ``` The retry strategy used in the registry client is @@ -323,131 +325,131 @@ concluding that the quorum is unreachable and failing. ## Complete Set of Configuration Options -``` - - - - - Is the registry enabled: does the RM start it up, - create the user and system paths, and purge - service records when containers, application attempts - and applications complete - - hadoop.registry.rm.enabled - false - - - - - List of hostname:port pairs defining the - zookeeper quorum binding for the registry - - hadoop.registry.zk.quorum - localhost:2181 - - - - - The root zookeeper node for the registry - - hadoop.registry.zk.root - /registry - - - - - Key to set if the registry is secure. Turning it on - changes the permissions policy from "open access" - to restrictions on kerberos with the option of - a user adding one or more auth key pairs down their - own tree. - - hadoop.registry.secure - false - - - - - A comma separated list of Zookeeper ACL identifiers with - system access to the registry in a secure cluster. - - These are given full access to all entries. - - If there is an "@" at the end of a SASL entry it - instructs the registry client to append the default kerberos domain. - - hadoop.registry.system.acls - sasl:yarn@, sasl:mapred@, sasl:mapred@, sasl:hdfs@ - - - - - The kerberos realm: used to set the realm of - system principals which do not declare their realm, - and any other accounts that need the value. - - If empty, the default realm of the running process - is used. - - If neither are known and the realm is needed, then the registry - service/client will fail. - - hadoop.registry.kerberos.realm - - - - - - Key to define the JAAS context. Used in secure - mode - - hadoop.registry.jaas.context - Client - - - - - - Zookeeper session timeout in milliseconds - - hadoop.registry.zk.session.timeout.ms - 60000 - - - - - Zookeeper session timeout in milliseconds - - hadoop.registry.zk.connection.timeout.ms - 15000 - - - - - Zookeeper connection retry count before failing - - hadoop.registry.zk.retry.times - 5 - - - - - - hadoop.registry.zk.retry.interval.ms - 1000 - - - - - Zookeeper retry limit in milliseconds, during - exponential backoff: {@value} - - This places a limit even - if the retry times and interval limit, combined - with the backoff policy, result in a long retry - period - - hadoop.registry.zk.retry.ceiling.ms - 60000 - +```xml + + + + + Is the registry enabled: does the RM start it up, + create the user and system paths, and purge + service records when containers, application attempts + and applications complete + + hadoop.registry.rm.enabled + false + + + + + List of hostname:port pairs defining the + zookeeper quorum binding for the registry + + hadoop.registry.zk.quorum + localhost:2181 + + + + + The root zookeeper node for the registry + + hadoop.registry.zk.root + /registry + + + + + Key to set if the registry is secure. Turning it on + changes the permissions policy from "open access" + to restrictions on kerberos with the option of + a user adding one or more auth key pairs down their + own tree. + + hadoop.registry.secure + false + + + + + A comma separated list of Zookeeper ACL identifiers with + system access to the registry in a secure cluster. + + These are given full access to all entries. + + If there is an "@" at the end of a SASL entry it + instructs the registry client to append the default kerberos domain. + + hadoop.registry.system.acls + sasl:yarn@, sasl:mapred@, sasl:mapred@, sasl:hdfs@ + + + + + The kerberos realm: used to set the realm of + system principals which do not declare their realm, + and any other accounts that need the value. + + If empty, the default realm of the running process + is used. + + If neither are known and the realm is needed, then the registry + service/client will fail. + + hadoop.registry.kerberos.realm + + + + + + Key to define the JAAS context. Used in secure + mode + + hadoop.registry.jaas.context + Client + + + + + + Zookeeper session timeout in milliseconds + + hadoop.registry.zk.session.timeout.ms + 60000 + + + + + Zookeeper session timeout in milliseconds + + hadoop.registry.zk.connection.timeout.ms + 15000 + + + + + Zookeeper connection retry count before failing + + hadoop.registry.zk.retry.times + 5 + + + + + + hadoop.registry.zk.retry.interval.ms + 1000 + + + + + Zookeeper retry limit in milliseconds, during + exponential backoff: {@value} + + This places a limit even + if the retry times and interval limit, combined + with the backoff policy, result in a long retry + period + + hadoop.registry.zk.retry.ceiling.ms + 60000 + ``` diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-security.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-security.md index 6317681a716f5..dd1fdd1441286 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-security.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/registry-security.md @@ -46,7 +46,7 @@ In which different authentication schemes may be used to restrict access to different znodes. This permits the registry to use a mixed Kerberos + Private password model. -* The YARN-based registry (the `RMRegistryOperationsService`), uses kerberos +* The YARN-based registry (the `RMRegistryOperationsService`), uses Kerberos as the authentication mechanism for YARN itself. * The registry configures the base of the registry to be writeable only by itself and other hadoop system accounts holding the relevant kerberos credentials. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/using-the-yarn-service-registry.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/using-the-yarn-service-registry.md index 4df762e5b8480..2a83bdb52ee5c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/using-the-yarn-service-registry.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/registry/using-the-yarn-service-registry.md @@ -70,12 +70,14 @@ The registry cannot be used:- yarn:id = ${application_attemptId} This means that the record will be deleted when the application attempt - completes, even if a new attempt is created. Every Application attempt will have to re-register the endpoint —which may be needed to locate the service anyway. + completes, even if a new attempt is created. Every Application attempt will + have to re-register the endpoint —which may be needed to locate the service anyway. 4. Alternatively, the record MAY have the persistence policy of "application": yarn:persistence = "application_attempt" yarn:id = application_attemptId - This means that the record will persist even between application attempts, albeit with out of date endpoint information. + This means that the record will persist even between application attempts, + albeit with out of date endpoint information. 5. Client applications look up the service by way of the path. The choice of path is an application specific one.