diff --git a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java index d2d2f8c9a7cab..66ef4b1c6fd87 100644 --- a/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java +++ b/hadoop-cloud-storage-project/hadoop-cos/src/main/java/org/apache/hadoop/fs/cosn/auth/COSCredentialsProviderList.java @@ -24,7 +24,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import com.qcloud.cos.auth.AnonymousCOSCredentials; import com.qcloud.cos.auth.COSCredentials; import com.qcloud.cos.auth.COSCredentialsProvider; diff --git a/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java index f5cff2b529a5f..72f0f1630949a 100644 --- a/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java +++ b/hadoop-common-project/hadoop-auth-examples/src/main/java/org/apache/hadoop/security/authentication/examples/WhoClient.java @@ -19,7 +19,7 @@ import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; /** * Example that uses AuthenticatedURL. @@ -42,7 +42,7 @@ public static void main(String[] args) { if (conn.getResponseCode() == HttpURLConnection.HTTP_OK) { BufferedReader reader = new BufferedReader( new InputStreamReader( - conn.getInputStream(), Charset.forName("UTF-8"))); + conn.getInputStream(), StandardCharsets.UTF_8)); String line = reader.readLine(); while (line != null) { System.out.println(line); diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandlerUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandlerUtil.java index 79739a487b431..e86dc3ffaf6ee 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandlerUtil.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationHandlerUtil.java @@ -20,8 +20,6 @@ import java.util.Locale; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; - /** * This is a utility class designed to provide functionality related to * {@link AuthenticationHandler}. @@ -44,8 +42,10 @@ private AuthenticationHandlerUtil() { * @return an instance of AuthenticationHandler implementation. */ public static String getAuthenticationHandlerClassName(String authHandler) { - String handlerName = - Preconditions.checkNotNull(authHandler).toLowerCase(Locale.ENGLISH); + if (authHandler == null) { + throw new NullPointerException(); + } + String handlerName = authHandler.toLowerCase(Locale.ENGLISH); String authHandlerClassName = null; @@ -98,8 +98,14 @@ public static String checkAuthScheme(String scheme) { * specified authentication scheme false Otherwise. */ public static boolean matchAuthScheme(String scheme, String auth) { - scheme = Preconditions.checkNotNull(scheme).trim(); - auth = Preconditions.checkNotNull(auth).trim(); + if (scheme == null) { + throw new NullPointerException(); + } + scheme = scheme.trim(); + if (auth == null) { + throw new NullPointerException(); + } + auth = auth.trim(); return auth.regionMatches(true, 0, scheme, 0, scheme.length()); } } diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/LdapAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/LdapAuthenticationHandler.java index 260febf7598a7..60a62f1a102b5 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/LdapAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/LdapAuthenticationHandler.java @@ -39,7 +39,6 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; /** * The {@link LdapAuthenticationHandler} implements the BASIC authentication @@ -144,15 +143,20 @@ public void init(Properties config) throws ServletException { this.enableStartTls = Boolean.valueOf(config.getProperty(ENABLE_START_TLS, "false")); - Preconditions - .checkNotNull(this.providerUrl, "The LDAP URI can not be null"); - Preconditions.checkArgument((this.baseDN == null) - ^ (this.ldapDomain == null), - "Either LDAP base DN or LDAP domain value needs to be specified"); + if (this.providerUrl == null) { + throw new NullPointerException("The LDAP URI can not be null"); + } + if (!((this.baseDN == null) + ^ (this.ldapDomain == null))) { + throw new IllegalArgumentException( + "Either LDAP base DN or LDAP domain value needs to be specified"); + } if (this.enableStartTls) { String tmp = this.providerUrl.toLowerCase(); - Preconditions.checkArgument(!tmp.startsWith("ldaps"), - "Can not use ldaps and StartTLS option at the same time"); + if (tmp.startsWith("ldaps")) { + throw new IllegalArgumentException( + "Can not use ldaps and StartTLS option at the same time"); + } } } diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java index b2499ff734bbe..03caaa2ec24ff 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/MultiSchemeAuthenticationHandler.java @@ -30,7 +30,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; /** @@ -114,10 +113,10 @@ public void init(Properties config) throws ServletException { } this.types.clear(); - - String schemesProperty = - Preconditions.checkNotNull(config.getProperty(SCHEMES_PROPERTY), - "%s system property is not specified.", SCHEMES_PROPERTY); + if (config.getProperty(SCHEMES_PROPERTY) == null) { + throw new NullPointerException(SCHEMES_PROPERTY + " system property is not specified."); + } + String schemesProperty = config.getProperty(SCHEMES_PROPERTY); for (String scheme : STR_SPLITTER.split(schemesProperty)) { scheme = AuthenticationHandlerUtil.checkAuthScheme(scheme); if (schemeToAuthHandlerMapping.containsKey(scheme)) { @@ -128,8 +127,10 @@ public void init(Properties config) throws ServletException { String authHandlerPropName = String.format(AUTH_HANDLER_PROPERTY, scheme).toLowerCase(); String authHandlerName = config.getProperty(authHandlerPropName); - Preconditions.checkNotNull(authHandlerName, - "No auth handler configured for scheme %s.", scheme); + if (authHandlerName == null) { + throw new NullPointerException( + "No auth handler configured for scheme " + scheme); + } String authHandlerClassName = AuthenticationHandlerUtil @@ -145,7 +146,9 @@ public void init(Properties config) throws ServletException { protected AuthenticationHandler initializeAuthHandler( String authHandlerClassName, Properties config) throws ServletException { try { - Preconditions.checkNotNull(authHandlerClassName); + if (authHandlerClassName == null) { + throw new NullPointerException(); + } logger.debug("Initializing Authentication handler of type " + authHandlerClassName); Class klass = @@ -207,4 +210,4 @@ public AuthenticationToken authenticate(HttpServletRequest request, return null; } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java index 19947aafad232..7bf3398a210ca 100644 --- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/PseudoAuthenticationHandler.java @@ -23,7 +23,7 @@ import javax.servlet.http.HttpServletResponse; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Properties; @@ -53,8 +53,6 @@ public class PseudoAuthenticationHandler implements AuthenticationHandler { */ public static final String ANONYMOUS_ALLOWED = TYPE + ".anonymous.allowed"; - private static final Charset UTF8_CHARSET = Charset.forName("UTF-8"); - private static final String PSEUDO_AUTH = "PseudoAuth"; private boolean acceptAnonymous; @@ -146,7 +144,7 @@ private String getUserName(HttpServletRequest request) { if(queryString == null || queryString.length() == 0) { return null; } - List list = URLEncodedUtils.parse(queryString, UTF8_CHARSET); + List list = URLEncodedUtils.parse(queryString, StandardCharsets.UTF_8); if (list != null) { for (NameValuePair nv : list) { if (PseudoAuthenticator.USER_NAME.equals(nv.getName())) { diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java index 5582c923ae0e7..f7c006a5c307d 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/StringSignerSecretProvider.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.security.authentication.util; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.Properties; import javax.servlet.ServletContext; @@ -39,7 +39,7 @@ public void init(Properties config, ServletContext servletContext, long tokenValidity) throws Exception { String signatureSecret = config.getProperty( AuthenticationFilter.SIGNATURE_SECRET, null); - secret = signatureSecret.getBytes(Charset.forName("UTF-8")); + secret = signatureSecret.getBytes(StandardCharsets.UTF_8); secrets = new byte[][]{secret}; } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java index 628342e40dc4a..4f090c234eece 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestZKSignerSecretProvider.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.security.authentication.util; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.Properties; import java.util.Random; import javax.servlet.ServletContext; @@ -140,11 +140,11 @@ public void testUpgradeChangeSecretLength() throws Exception { long seed = System.currentTimeMillis(); Random rand = new Random(seed); byte[] secret2 = Long.toString(rand.nextLong()) - .getBytes(Charset.forName("UTF-8")); + .getBytes(StandardCharsets.UTF_8); byte[] secret1 = Long.toString(rand.nextLong()) - .getBytes(Charset.forName("UTF-8")); + .getBytes(StandardCharsets.UTF_8); byte[] secret3 = Long.toString(rand.nextLong()) - .getBytes(Charset.forName("UTF-8")); + .getBytes(StandardCharsets.UTF_8); rand = new Random(seed); // Secrets 4 and 5 get thrown away by ZK when the new secret provider tries // to init @@ -238,7 +238,7 @@ private class OldMockZKSignerSecretProvider @Override protected byte[] generateRandomSecret() { - return Long.toString(rand.nextLong()).getBytes(Charset.forName("UTF-8")); + return Long.toString(rand.nextLong()).getBytes(StandardCharsets.UTF_8); } } diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index f61539574978f..d32c0594a2605 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -366,6 +366,10 @@ com.sun.jmx jmxri + + org.apache.yetus + audience-annotations + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index d0c7cce75e943..db6043e39c291 100755 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -43,6 +43,7 @@ import java.net.URISyntaxException; import java.net.URL; import java.net.URLConnection; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.ArrayList; import java.util.Arrays; @@ -82,7 +83,6 @@ import javax.xml.transform.dom.DOMSource; import javax.xml.transform.stream.StreamResult; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.commons.collections.map.UnmodifiableMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -2881,7 +2881,7 @@ public Reader getConfResourceAsReader(String name) { LOG.info("found resource " + name + " at " + url); } - return new InputStreamReader(url.openStream(), Charsets.UTF_8); + return new InputStreamReader(url.openStream(), StandardCharsets.UTF_8); } catch (Exception e) { return null; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java index 56a2baedc9b4d..1c451ca6d30b9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ReconfigurableBase.java @@ -19,7 +19,7 @@ package org.apache.hadoop.conf; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.Time; import org.apache.hadoop.conf.ReconfigurationUtil.PropertyChange; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java index a1ddca6e20967..0733fde022683 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/AesCtrCryptoCodec.java @@ -20,7 +20,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.io.IOException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java index 21e06f26c31d2..067abde9dfbb8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoInputStream.java @@ -30,7 +30,7 @@ import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.ByteBufferPositionedReadable; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java index df36bd6fe698e..a1ae869f8601e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoOutputStream.java @@ -32,7 +32,7 @@ import org.apache.hadoop.fs.statistics.IOStatisticsSource; import org.apache.hadoop.fs.impl.StoreImplementationUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.retrieveIOStatistics; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java index 5bf66c7c4a601..4860ca694b166 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/CryptoStreamUtils.java @@ -29,7 +29,7 @@ import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.util.CleanerUtil; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java index 1c670f76f4859..69f28dc2e884f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/JceAesCtrCryptoCodec.java @@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java index 0963cb6005ed8..3f94d9c5c4d5b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslAesCtrCryptoCodec.java @@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.crypto.random.OpensslSecureRandom; import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java index fcb3730ca5b30..6a512792d77a6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/OpensslCipher.java @@ -29,7 +29,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.NativeCodeLoader; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.PerformanceAdvisory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java index e980be08e0c64..f0cf710981b2a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/JavaKeyStoreProvider.java @@ -18,7 +18,7 @@ package org.apache.hadoop.crypto.key; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java index 27e5f87432001..cf2e52d08ea28 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProviderCryptoExtension.java @@ -29,7 +29,7 @@ import javax.crypto.spec.IvParameterSpec; import javax.crypto.spec.SecretKeySpec; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.crypto.CryptoCodec; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java index f73d69d7e62b2..a3293620ab9e4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java @@ -81,7 +81,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import static org.apache.hadoop.util.KMSUtil.checkNotEmpty; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java index 57219b2130f04..8a1754f7817f0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * A simple LoadBalancing KMSClientProvider that round-robins requests diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java index 46749433b49c0..3841a21beb6d7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java @@ -33,7 +33,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java index a7a609ce440b6..101cb0e637f45 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/random/OpensslSecureRandom.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.NativeCodeLoader; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.PerformanceAdvisory; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java index ab052029eeb93..8851d49b721a3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ByteBufferUtil.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.ByteBufferPool; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java index 8ec2a1c67b2c1..4c7569d6ecd81 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java @@ -34,7 +34,7 @@ import java.util.function.IntFunction; import java.util.zip.CRC32; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java index 3c3870723e652..16938a83a69c7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSDataOutputStreamBuilder.java @@ -28,7 +28,7 @@ import java.io.IOException; import java.util.EnumSet; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java index ad2642f7db963..b0e1b10b3d37c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputStream.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.io.InputStream; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.statistics.IOStatisticsLogging; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java index a40993673499d..12f16138ce443 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java @@ -65,7 +65,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ShutdownHookManager; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.tracing.Tracer; import org.slf4j.Logger; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java index 059ec4fd63d1a..f50c06cec3810 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileEncryptionInfo.java @@ -24,8 +24,8 @@ import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoProtocolVersion; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkArgument; +import static org.apache.hadoop.util.Preconditions.checkNotNull; /** * FileEncryptionInfo encapsulates all the encryption-related information for diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 18f4597cbb9ca..a13cf4655ad56 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -47,8 +47,8 @@ import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicLong; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -85,7 +85,7 @@ import org.apache.hadoop.tracing.Tracer; import org.apache.hadoop.tracing.TraceScope; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -183,7 +183,7 @@ public abstract class FileSystem extends Configured * so must be considered something to only be changed with care. */ @InterfaceAudience.Private - public static final Log LOG = LogFactory.getLog(FileSystem.class); + public static final Logger LOG = LoggerFactory.getLogger(FileSystem.class); /** * The SLF4J logger to use in logging within the FileSystem class itself. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java index f717e03692378..62806d61b540c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemStorageStatistics.java @@ -20,7 +20,7 @@ import java.util.Iterator; import java.util.NoSuchElementException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.FileSystem.Statistics.StatisticsData; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index 933f56927741d..b9a5f5b548feb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -878,7 +878,7 @@ private static void runCommandOnStream( try (BufferedReader reader = new BufferedReader( new InputStreamReader(process.getInputStream(), - Charset.forName("UTF-8")))) { + StandardCharsets.UTF_8))) { String line; while((line = reader.readLine()) != null) { LOG.debug(line); @@ -901,7 +901,7 @@ private static void runCommandOnStream( try (BufferedReader reader = new BufferedReader( new InputStreamReader(process.getErrorStream(), - Charset.forName("UTF-8")))) { + StandardCharsets.UTF_8))) { String line; while((line = reader.readLine()) != null) { LOG.debug(line); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java index 11b3e91e86c3a..c2a9254f25b2c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsUrlConnection.java @@ -24,7 +24,7 @@ import java.net.URL; import java.net.URLConnection; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java index 5a8497773a69b..d94339034447a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/GlobalStorageStatistics.java @@ -23,7 +23,7 @@ import java.util.NoSuchElementException; import java.util.TreeMap; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java index cfe0610edec8c..b69dcd9757f5b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java @@ -32,7 +32,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; /** * Implementation of {@link FileSystem#globStatus(Path, PathFilter)}. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java index 01730889a2b41..f6de726468af7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PartialListing.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index a3c35b8527af9..42548e24fbda3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -96,11 +96,11 @@ public class RawLocalFileSystem extends FileSystem { public static void useStatIfAvailable() { useDeprecatedFileStatus = !Stat.isAvailable(); } - + public RawLocalFileSystem() { workingDir = getInitialWorkingDirectory(); } - + private Path makeAbsolute(Path f) { if (f.isAbsolute()) { return f; @@ -108,7 +108,7 @@ private Path makeAbsolute(Path f) { return new Path(workingDir, f); } } - + /** * Convert a path to a File. * @@ -125,14 +125,14 @@ public File pathToFile(Path path) { @Override public URI getUri() { return NAME; } - + @Override public void initialize(URI uri, Configuration conf) throws IOException { super.initialize(uri, conf); setConf(conf); defaultBlockSize = getDefaultBlockSize(new Path(uri)); } - + /******************************************************* * For open()'s FSInputStream. *******************************************************/ @@ -172,7 +172,7 @@ public LocalFSFileInputStream(Path f) throws IOException { ioStatisticsAggregator = IOStatisticsContext.getCurrentIOStatisticsContext().getAggregator(); } - + @Override public void seek(long pos) throws IOException { if (pos < 0) { @@ -182,17 +182,17 @@ public void seek(long pos) throws IOException { fis.getChannel().position(pos); this.position = pos; } - + @Override public long getPos() throws IOException { return this.position; } - + @Override public boolean seekToNewSource(long targetPos) throws IOException { return false; } - + /** * Just forward to the fis. */ @@ -228,7 +228,7 @@ public int read() throws IOException { throw new FSError(e); // assume native fs error } } - + @Override public int read(byte[] b, int off, int len) throws IOException { // parameter check @@ -246,7 +246,7 @@ public int read(byte[] b, int off, int len) throws IOException { throw new FSError(e); // assume native fs error } } - + @Override public int read(long position, byte[] b, int off, int len) throws IOException { @@ -269,7 +269,7 @@ public int read(long position, byte[] b, int off, int len) throw new FSError(e); } } - + @Override public long skip(long n) throws IOException { ioStatistics.incrementCounter(STREAM_READ_SKIP_OPERATIONS); @@ -453,7 +453,7 @@ private LocalFSFileOutputStream(Path f, boolean append, success = true; } finally { if (!success) { - IOUtils.cleanup(LOG, this.fos); + IOUtils.cleanupWithLogger(LOG, this.fos); } } } @@ -484,7 +484,7 @@ public void write(byte[] b, int off, int len) throws IOException { throw new FSError(e); // assume native fs error } } - + @Override public void write(int b) throws IOException { try { @@ -564,8 +564,8 @@ private FSDataOutputStream create(Path f, boolean overwrite, createOutputStreamWithMode(f, false, permission), bufferSize, true), statistics); } - - protected OutputStream createOutputStream(Path f, boolean append) + + protected OutputStream createOutputStream(Path f, boolean append) throws IOException { return createOutputStreamWithMode(f, append, null); } @@ -574,7 +574,7 @@ protected OutputStream createOutputStreamWithMode(Path f, boolean append, FsPermission permission) throws IOException { return new LocalFSFileOutputStream(f, append, permission); } - + @Override public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet flags, int bufferSize, short replication, long blockSize, @@ -694,13 +694,13 @@ public boolean truncate(Path f, final long newLength) throws IOException { } return true; } - + /** * Delete the given path to a file or directory. * @param p the path to delete * @param recursive to delete sub-directories * @return true if the file or directory and all its contents were deleted - * @throws IOException if p is non-empty and recursive is false + * @throws IOException if p is non-empty and recursive is false */ @Override public boolean delete(Path p, boolean recursive) throws IOException { @@ -711,13 +711,13 @@ public boolean delete(Path p, boolean recursive) throws IOException { } if (f.isFile()) { return f.delete(); - } else if (!recursive && f.isDirectory() && + } else if (!recursive && f.isDirectory() && (FileUtil.listFiles(f).length != 0)) { throw new IOException("Directory " + f.toString() + " is not empty"); } return FileUtil.fullyDelete(f); } - + /** * {@inheritDoc} * @@ -767,7 +767,7 @@ public FileStatus[] listStatus(Path f) throws IOException { public boolean exists(Path f) throws IOException { return pathToFile(f).exists(); } - + protected boolean mkOneDir(File p2f) throws IOException { return mkOneDirWithMode(new Path(p2f.getAbsolutePath()), p2f, null); } @@ -835,8 +835,8 @@ private boolean mkdirsWithOptionalPermission(Path f, FsPermission permission) return (parent == null || parent2f.exists() || mkdirs(parent)) && (mkOneDirWithMode(f, p2f, permission) || p2f.isDirectory()); } - - + + @Override public Path getHomeDirectory() { return this.makeQualified(new Path(System.getProperty("user.home"))); @@ -850,12 +850,12 @@ public void setWorkingDirectory(Path newDir) { workingDir = makeAbsolute(newDir); checkPath(workingDir); } - + @Override public Path getWorkingDirectory() { return workingDir; } - + @Override protected Path getInitialWorkingDirectory() { return this.makeQualified(new Path(System.getProperty("user.dir"))); @@ -866,40 +866,40 @@ public FsStatus getStatus(Path p) throws IOException { File partition = pathToFile(p == null ? new Path("/") : p); //File provides getUsableSpace() and getFreeSpace() //File provides no API to obtain used space, assume used = total - free - return new FsStatus(partition.getTotalSpace(), + return new FsStatus(partition.getTotalSpace(), partition.getTotalSpace() - partition.getFreeSpace(), partition.getFreeSpace()); } - + // In the case of the local filesystem, we can just rename the file. @Override public void moveFromLocalFile(Path src, Path dst) throws IOException { rename(src, dst); } - + // We can write output directly to the final location @Override public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException { return fsOutputFile; } - + // It's in the right place - nothing to do. @Override public void completeLocalOutput(Path fsWorkingFile, Path tmpLocalFile) throws IOException { } - + @Override public void close() throws IOException { super.close(); } - + @Override public String toString() { return "LocalFS"; } - + @Override public FileStatus getFileStatus(Path f) throws IOException { return getFileLinkStatusInternal(f, true); @@ -923,7 +923,7 @@ static class DeprecatedRawLocalFileStatus extends FileStatus { * onwer.equals(""). */ private boolean isPermissionLoaded() { - return !super.getOwner().isEmpty(); + return !super.getOwner().isEmpty(); } private static long getLastAccessTime(File f) throws IOException { @@ -945,7 +945,7 @@ private static long getLastAccessTime(File f) throws IOException { new Path(f.getPath()).makeQualified(fs.getUri(), fs.getWorkingDirectory())); } - + @Override public FsPermission getPermission() { if (!isPermissionLoaded()) { @@ -1036,7 +1036,7 @@ void loadPermissionInfoByNonNativeIO() { } finally { if (e != null) { throw new RuntimeException("Error while running command to get " + - "file permissions : " + + "file permissions : " + StringUtils.stringifyException(e)); } } @@ -1115,7 +1115,7 @@ public void setPermission(Path p, FsPermission permission) FileUtil.makeShellPath(pathToFile(p), true))); } } - + /** * Sets the {@link Path}'s last modified time and last access time to * the given valid times. @@ -1221,7 +1221,7 @@ public FileStatus getFileLinkStatus(final Path f) throws IOException { * Public {@link FileStatus} methods delegate to this function, which in turn * either call the new {@link Stat} based implementation or the deprecated * methods based on platform support. - * + * * @param f Path to stat * @param dereference whether to dereference the final path component if a * symlink @@ -1285,7 +1285,7 @@ private FileStatus deprecatedGetFileLinkStatusInternal(final Path f) * Calls out to platform's native stat(1) implementation to get file metadata * (permissions, user, group, atime, mtime, etc). This works around the lack * of lstat(2) in Java 6. - * + * * Currently, the {@link Stat} class used to do this only supports Linux * and FreeBSD, so the old {@link #deprecatedGetFileLinkStatusInternal(Path)} * implementation (deprecated) remains further OS support is added. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java index 2497ded48e7e9..8603625af6aee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/UnionStorageStatistics.java @@ -20,7 +20,7 @@ import java.util.Iterator; import java.util.NoSuchElementException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java index 3f1c9d7d92940..1379c1b1f0d86 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/XAttrCodec.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * The value of XAttr is byte[], this class is to diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java index f13c5a4a45182..7a93b34766107 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java @@ -25,7 +25,7 @@ import java.net.URI; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.net.ftp.FTP; import org.apache.commons.net.ftp.FTPClient; import org.apache.commons.net.ftp.FTPFile; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractFSBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractFSBuilderImpl.java index dea564b05e43d..f0a34d05c6831 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractFSBuilderImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractFSBuilderImpl.java @@ -36,8 +36,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathHandle; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkArgument; +import static org.apache.hadoop.util.Preconditions.checkNotNull; /** * Builder for filesystem/filecontext operations of various kinds, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java index 5328e3c712414..fbc9d1f3dc5b0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/AbstractMultipartUploader.java @@ -24,14 +24,14 @@ import java.util.Objects; import java.util.concurrent.CompletableFuture; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.MultipartUploader; import org.apache.hadoop.fs.PartHandle; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UploadHandle; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.util.Preconditions.checkArgument; /** * Standard base class for Multipart Uploaders. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java index 1fafd41b054b9..28a4bce0489cd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FileSystemMultipartUploader.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Comparator; import java.util.HashSet; @@ -30,8 +31,7 @@ import java.util.concurrent.CompletableFuture; import java.util.stream.Collectors; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,7 +104,7 @@ public CompletableFuture startUpload(Path filePath) fs.mkdirs(collectorPath, FsPermission.getDirDefault()); ByteBuffer byteBuffer = ByteBuffer.wrap( - collectorPath.toString().getBytes(Charsets.UTF_8)); + collectorPath.toString().getBytes(StandardCharsets.UTF_8)); return BBUploadHandle.from(byteBuffer); }); } @@ -130,7 +130,7 @@ private PartHandle innerPutPart(Path filePath, byte[] uploadIdByteArray = uploadId.toByteArray(); checkUploadId(uploadIdByteArray); Path collectorPath = new Path(new String(uploadIdByteArray, 0, - uploadIdByteArray.length, Charsets.UTF_8)); + uploadIdByteArray.length, StandardCharsets.UTF_8)); Path partPath = mergePaths(collectorPath, mergePaths(new Path(Path.SEPARATOR), new Path(partNumber + ".part"))); @@ -149,7 +149,7 @@ private PartHandle innerPutPart(Path filePath, cleanupWithLogger(LOG, inputStream); } return BBPartHandle.from(ByteBuffer.wrap( - partPath.toString().getBytes(Charsets.UTF_8))); + partPath.toString().getBytes(StandardCharsets.UTF_8))); } private Path createCollectorPath(Path filePath) { @@ -210,7 +210,7 @@ private PathHandle innerComplete( .map(pair -> { byte[] byteArray = pair.getValue().toByteArray(); return new Path(new String(byteArray, 0, byteArray.length, - Charsets.UTF_8)); + StandardCharsets.UTF_8)); }) .collect(Collectors.toList()); @@ -223,7 +223,7 @@ private PathHandle innerComplete( "Duplicate PartHandles"); byte[] uploadIdByteArray = multipartUploadId.toByteArray(); Path collectorPath = new Path(new String(uploadIdByteArray, 0, - uploadIdByteArray.length, Charsets.UTF_8)); + uploadIdByteArray.length, StandardCharsets.UTF_8)); boolean emptyFile = totalPartsLen(partHandles) == 0; if (emptyFile) { @@ -250,7 +250,7 @@ public CompletableFuture abort(UploadHandle uploadId, byte[] uploadIdByteArray = uploadId.toByteArray(); checkUploadId(uploadIdByteArray); Path collectorPath = new Path(new String(uploadIdByteArray, 0, - uploadIdByteArray.length, Charsets.UTF_8)); + uploadIdByteArray.length, StandardCharsets.UTF_8)); return FutureIO.eval(() -> { // force a check for a file existing; raises FNFE if not found diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java index 8d4bebda15096..0b6906335c43d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/FsLinkResolution.java @@ -20,7 +20,7 @@ import java.io.IOException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java index 1d8c4e5e0beb9..665bcc6a95660 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/MultipartUploaderBuilderImpl.java @@ -34,7 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java index 1e3e43581dccc..68ff1c2ac4211 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/PathCapabilitiesSupport.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathCapabilities; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; +import static org.apache.hadoop.util.Preconditions.checkArgument; @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/WrappedIOException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/WrappedIOException.java index 3f828897b1d6c..f484296ae71d2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/WrappedIOException.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/impl/WrappedIOException.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.io.UncheckedIOException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java index f954f1e275f37..260ee7e570c9b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/AclStatus.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.thirdparty.com.google.common.base.Objects; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java index 1ac204f5f8a82..70c7fe381244d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java @@ -22,6 +22,7 @@ import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.Iterator; import java.util.LinkedList; @@ -114,7 +115,7 @@ protected void processArguments(LinkedList items) private void writeDelimiter(FSDataOutputStream out) throws IOException { if (delimiter != null) { - out.write(delimiter.getBytes("UTF-8")); + out.write(delimiter.getBytes(StandardCharsets.UTF_8)); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java index 75dc86ec87c18..e19fd668c2673 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Snapshot related operations diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java index 2fe7c858e4e66..dfbf31ff8298c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/XAttrCommands.java @@ -23,7 +23,7 @@ import java.util.Map; import java.util.Map.Entry; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSnapshot.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSnapshot.java index 988d386e29877..4551c97665ba3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSnapshot.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/IOStatisticsSnapshot.java @@ -37,7 +37,7 @@ import org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding; import org.apache.hadoop.util.JsonSerialization; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; import static org.apache.hadoop.fs.statistics.IOStatisticsLogging.ioStatisticsToString; import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.aggregateMaps; import static org.apache.hadoop.fs.statistics.impl.IOStatisticsBinding.snapshotMap; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/DynamicIOStatisticsBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/DynamicIOStatisticsBuilder.java index 47a317076dcf2..46d7a77075500 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/DynamicIOStatisticsBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/DynamicIOStatisticsBuilder.java @@ -27,7 +27,7 @@ import org.apache.hadoop.fs.statistics.MeanStatistic; import org.apache.hadoop.metrics2.lib.MutableCounterLong; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkState; +import static org.apache.hadoop.util.Preconditions.checkState; /** * Builder of {@link DynamicIOStatistics}. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/WrappedIOStatistics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/WrappedIOStatistics.java index 4e5fc6a6a1071..6bc12fbb9d3b6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/WrappedIOStatistics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/statistics/impl/WrappedIOStatistics.java @@ -20,7 +20,7 @@ import java.util.Map; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.statistics.IOStatistics; import org.apache.hadoop.fs.statistics.MeanStatistic; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/DataBlocks.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/DataBlocks.java index a267ce67660f5..c70d0ee91e15e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/DataBlocks.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/store/DataBlocks.java @@ -32,7 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java index 05834718811eb..9716147fd11d6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/InodeTree.java @@ -18,7 +18,7 @@ package org.apache.hadoop.fs.viewfs; import java.util.function.Function; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java index 3d405e86f2be1..39fa68c918922 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFileSystem.java @@ -47,7 +47,7 @@ import java.util.Set; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java index da793f5d3e49d..de6b093ae1006 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ViewFs.java @@ -36,7 +36,7 @@ import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java index 6cabcd04019ea..88ef5bdc808b5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java @@ -44,7 +44,7 @@ import org.apache.zookeeper.KeeperException.Code; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java index e7ed7304988cb..7fd58cf443bc7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java index 1d76d0ab76e65..b315e6c4c81d3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.util.Daemon; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java index 78ee202fd350a..953cd9de80ee1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java @@ -54,7 +54,7 @@ import org.apache.zookeeper.data.ACL; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java index 5f47ddb339212..ca2687ce5f2d1 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HtmlQuoting.java @@ -120,7 +120,7 @@ public static String quoteHtmlChars(String item) { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); try { quoteHtmlChars(buffer, bytes, 0, bytes.length); - return buffer.toString("UTF-8"); + return new String(buffer.toByteArray(), StandardCharsets.UTF_8); } catch (IOException ioe) { // Won't happen, since it is a bytearrayoutputstream return null; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java index b2f18538b6c7d..fb4616df25c5b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java @@ -19,16 +19,11 @@ import java.util.HashMap; -import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogConfigurationException; -import org.apache.commons.logging.LogFactory; -import org.apache.log4j.Appender; -import org.eclipse.jetty.server.AsyncRequestLogWriter; -import org.eclipse.jetty.server.CustomRequestLog; -import org.eclipse.jetty.server.RequestLog; +import org.eclipse.jetty.server.Slf4jRequestLogWriter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.eclipse.jetty.server.CustomRequestLog; +import org.eclipse.jetty.server.RequestLog; /** * RequestLog object for use with Http @@ -47,57 +42,16 @@ public class HttpRequestLog { } public static RequestLog getRequestLog(String name) { - String lookup = serverToComponent.get(name); if (lookup != null) { name = lookup; } String loggerName = "http.requests." + name; - String appenderName = name + "requestlog"; - Log logger = LogFactory.getLog(loggerName); - - boolean isLog4JLogger;; - try { - isLog4JLogger = logger instanceof Log4JLogger; - } catch (NoClassDefFoundError err) { - // In some dependent projects, log4j may not even be on the classpath at - // runtime, in which case the above instanceof check will throw - // NoClassDefFoundError. - LOG.debug("Could not load Log4JLogger class", err); - isLog4JLogger = false; - } - if (isLog4JLogger) { - Log4JLogger httpLog4JLog = (Log4JLogger)logger; - org.apache.log4j.Logger httpLogger = httpLog4JLog.getLogger(); - Appender appender = null; - - try { - appender = httpLogger.getAppender(appenderName); - } catch (LogConfigurationException e) { - LOG.warn("Http request log for {} could not be created", loggerName); - throw e; - } - - if (appender == null) { - LOG.info("Http request log for {} is not defined", loggerName); - return null; - } + Slf4jRequestLogWriter writer = new Slf4jRequestLogWriter(); + writer.setLoggerName(loggerName); + return new CustomRequestLog(writer, CustomRequestLog.EXTENDED_NCSA_FORMAT); + } - if (appender instanceof HttpRequestLogAppender) { - HttpRequestLogAppender requestLogAppender - = (HttpRequestLogAppender)appender; - AsyncRequestLogWriter logWriter = new AsyncRequestLogWriter(); - logWriter.setFilename(requestLogAppender.getFilename()); - logWriter.setRetainDays(requestLogAppender.getRetainDays()); - return new CustomRequestLog(logWriter, - CustomRequestLog.EXTENDED_NCSA_FORMAT); - } else { - LOG.warn("Jetty request log for {} was of the wrong class", loggerName); - return null; - } - } else { - LOG.warn("Jetty request log can only be enabled using Log4j"); - return null; - } + private HttpRequestLog() { } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 4cc70c201eee0..871731ef814d6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -54,7 +54,7 @@ import javax.servlet.http.HttpServletRequestWrapper; import javax.servlet.http.HttpServletResponse; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.util.Lists; import com.sun.jersey.spi.container.servlet.ServletContainer; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputBuffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputBuffer.java index c5746e6a3001a..6776b67d9e836 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputBuffer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DataOutputBuffer.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** A reusable {@link DataOutput} implementation that writes to an in-memory * buffer. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java index 7be50b0c539b9..92eff36ced289 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/DefaultStringifier.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.nio.charset.UnsupportedCharsetException; import java.util.ArrayList; import org.apache.commons.codec.binary.Base64; @@ -75,14 +74,10 @@ public DefaultStringifier(Configuration conf, Class c) { @Override public T fromString(String str) throws IOException { - try { - byte[] bytes = Base64.decodeBase64(str.getBytes("UTF-8")); - inBuf.reset(bytes, bytes.length); - T restored = deserializer.deserialize(null); - return restored; - } catch (UnsupportedCharsetException ex) { - throw new IOException(ex.toString()); - } + byte[] bytes = Base64.decodeBase64(str.getBytes(StandardCharsets.UTF_8)); + inBuf.reset(bytes, bytes.length); + T restored = deserializer.deserialize(null); + return restored; } @Override diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java index f0a9b0b6952f2..c58f83011b58f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java @@ -32,7 +32,6 @@ import java.util.ArrayList; import java.util.List; -import org.apache.commons.logging.Log; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -45,7 +44,7 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; /** - * An utility class for I/O related functionality. + * An utility class for I/O related functionality. */ @InterfaceAudience.Public @InterfaceStability.Evolving @@ -57,8 +56,8 @@ public class IOUtils { * * @param in InputStrem to read from * @param out OutputStream to write to - * @param buffSize the size of the buffer - * @param close whether or not close the InputStream and + * @param buffSize the size of the buffer + * @param close whether or not close the InputStream and * OutputStream at the end. The streams are closed in the finally clause. * @throws IOException raised on errors performing I/O. */ @@ -80,16 +79,16 @@ public static void copyBytes(InputStream in, OutputStream out, } } } - + /** * Copies from one stream to another. - * + * * @param in InputStrem to read from * @param out OutputStream to write to * @param buffSize the size of the buffer. * @throws IOException raised on errors performing I/O. */ - public static void copyBytes(InputStream in, OutputStream out, int buffSize) + public static void copyBytes(InputStream in, OutputStream out, int buffSize) throws IOException { PrintStream ps = out instanceof PrintStream ? (PrintStream)out : null; byte buf[] = new byte[buffSize]; @@ -104,7 +103,7 @@ public static void copyBytes(InputStream in, OutputStream out, int buffSize) } /** - * Copies from one stream to another. closes the input and output streams + * Copies from one stream to another. closes the input and output streams * at the end. * * @param in InputStrem to read from @@ -117,14 +116,14 @@ public static void copyBytes(InputStream in, OutputStream out, Configuration con copyBytes(in, out, conf.getInt( IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT), true); } - + /** * Copies from one stream to another. * * @param in InputStream to read from * @param out OutputStream to write to * @param conf the Configuration object - * @param close whether or not close the InputStream and + * @param close whether or not close the InputStream and * OutputStream at the end. The streams are closed in the finally clause. * @throws IOException raised on errors performing I/O. */ @@ -174,12 +173,12 @@ public static void copyBytes(InputStream in, OutputStream out, long count, } } } - + /** * Utility wrapper for reading from {@link InputStream}. It catches any errors * thrown by the underlying stream (either IO or decompression-related), and * re-throws as an IOException. - * + * * @param is - InputStream to be read from * @param buf - buffer the data is read into * @param off - offset within buf @@ -205,7 +204,7 @@ public static int wrappedReadForCompressedData(InputStream is, byte[] buf, * @param buf The buffer to fill * @param off offset from the buffer * @param len the length of bytes to read - * @throws IOException if it could not read requested number of bytes + * @throws IOException if it could not read requested number of bytes * for any reason (including EOF) */ public static void readFully(InputStream in, byte[] buf, @@ -220,12 +219,12 @@ public static void readFully(InputStream in, byte[] buf, off += ret; } } - + /** * Similar to readFully(). Skips bytes in a loop. * @param in The InputStream to skip bytes from * @param len number of bytes to skip. - * @throws IOException if it could not skip requested number of bytes + * @throws IOException if it could not skip requested number of bytes * for any reason (including EOF) */ public static void skipFully(InputStream in, long len) throws IOException { @@ -233,7 +232,7 @@ public static void skipFully(InputStream in, long len) throws IOException { while (amt > 0) { long ret = in.skip(amt); if (ret == 0) { - // skip may return 0 even if we're not at EOF. Luckily, we can + // skip may return 0 even if we're not at EOF. Luckily, we can // use the read() method to figure out if we're at the end. int b = in.read(); if (b == -1) { @@ -245,30 +244,6 @@ public static void skipFully(InputStream in, long len) throws IOException { amt -= ret; } } - - /** - * Close the Closeable objects and ignore any {@link Throwable} or - * null pointers. Must only be used for cleanup in exception handlers. - * - * @param log the log to record problems to at debug level. Can be null. - * @param closeables the objects to close - * @deprecated use {@link #cleanupWithLogger(Logger, java.io.Closeable...)} - * instead - */ - @Deprecated - public static void cleanup(Log log, java.io.Closeable... closeables) { - for (java.io.Closeable c : closeables) { - if (c != null) { - try { - c.close(); - } catch(Throwable e) { - if (log != null && log.isDebugEnabled()) { - log.debug("Exception in closing " + c, e); - } - } - } - } - } /** * Close the Closeable objects and ignore any {@link Throwable} or @@ -330,7 +305,7 @@ public static void closeSocket(Socket sock) { } } } - + /** * The /dev/null of OutputStreams. */ @@ -342,11 +317,11 @@ public void write(byte[] b, int off, int len) throws IOException { @Override public void write(int b) throws IOException { } - } - + } + /** * Write a ByteBuffer to a WritableByteChannel, handling short writes. - * + * * @param bc The WritableByteChannel to write to * @param buf The input buffer * @throws IOException On I/O error @@ -359,9 +334,9 @@ public static void writeFully(WritableByteChannel bc, ByteBuffer buf) } /** - * Write a ByteBuffer to a FileChannel at a given offset, + * Write a ByteBuffer to a FileChannel at a given offset, * handling short writes. - * + * * @param fc The FileChannel to write to * @param buf The input buffer * @param offset The offset in the file to start writing at diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java index 4738513de6e1e..2a6fafce545f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java @@ -30,7 +30,7 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_WILLNEED; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java index 187398de0ec86..b46aed1208d76 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.ReflectionUtils; +import java.nio.charset.StandardCharsets; import java.util.zip.GZIPInputStream; import java.util.zip.GZIPOutputStream; @@ -86,12 +87,12 @@ public static int writeCompressedByteArray(DataOutput out, public static String readCompressedString(DataInput in) throws IOException { byte[] bytes = readCompressedByteArray(in); if (bytes == null) return null; - return new String(bytes, "UTF-8"); + return new String(bytes, StandardCharsets.UTF_8); } public static int writeCompressedString(DataOutput out, String s) throws IOException { - return writeCompressedByteArray(out, (s != null) ? s.getBytes("UTF-8") : null); + return writeCompressedByteArray(out, (s != null) ? s.getBytes(StandardCharsets.UTF_8) : null); } /* @@ -103,7 +104,7 @@ public static int writeCompressedString(DataOutput out, String s) throws IOExce */ public static void writeString(DataOutput out, String s) throws IOException { if (s != null) { - byte[] buffer = s.getBytes("UTF-8"); + byte[] buffer = s.getBytes(StandardCharsets.UTF_8); int len = buffer.length; out.writeInt(len); out.write(buffer, 0, len); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java index 85aebd8c74bf5..f89a0d9812d7c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/CodecUtil.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.io.erasurecode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.erasurecode.codec.ErasureCodec; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java index d5cd637ac33c1..3ebbcd912dc71 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/AsyncCallHandler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.io.retry; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.ipc.Client; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java index 7ccd6deb7f913..9311bf40fd208 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/CallReturn.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.io.retry; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** The call return from a method invocation. */ class CallReturn { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 9cd94feba07c3..16f550407c850 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -21,7 +21,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java index 1873ef47cf4eb..38641d30aded7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ClientId.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * A class defining a set of static helper methods to provide conversion between diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java index 1cf4a11651817..35ddd3e40a6da 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java @@ -38,7 +38,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectWriter; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.AtomicDoubleArray; import org.apache.hadoop.security.UserGroupInformation; import org.apache.commons.lang3.exception.ExceptionUtils; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java index 0e15f6860b62b..1107342f268a5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java @@ -29,7 +29,7 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java index e2ad16fce2c57..22b57a52a31ae 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/LogLevel.java @@ -23,6 +23,7 @@ import java.io.PrintWriter; import java.net.URL; import java.net.URLConnection; +import java.nio.charset.StandardCharsets; import java.util.regex.Pattern; import javax.net.ssl.HttpsURLConnection; @@ -33,11 +34,8 @@ import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Jdk14Logger; -import org.apache.commons.logging.impl.Log4JLogger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -48,9 +46,12 @@ import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; import org.apache.hadoop.security.ssl.SSLFactory; import org.apache.hadoop.util.GenericOptionsParser; +import org.apache.hadoop.util.GenericsUtil; import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; /** * Change log level in runtime. @@ -296,7 +297,7 @@ private void process(String urlString) throws Exception { // read from the servlet BufferedReader in = new BufferedReader( - new InputStreamReader(connection.getInputStream(), Charsets.UTF_8)); + new InputStreamReader(connection.getInputStream(), StandardCharsets.UTF_8)); for (String line;;) { line = in.readLine(); if (line == null) { @@ -340,21 +341,17 @@ public void doGet(HttpServletRequest request, HttpServletResponse response out.println(MARKER + "Submitted Class Name: " + logName + "
"); - Log log = LogFactory.getLog(logName); + org.slf4j.Logger log = LoggerFactory.getLogger(logName); out.println(MARKER + "Log Class: " + log.getClass().getName() +"
"); if (level != null) { out.println(MARKER + "Submitted Level: " + level + "
"); } - if (log instanceof Log4JLogger) { - process(((Log4JLogger)log).getLogger(), level, out); - } - else if (log instanceof Jdk14Logger) { - process(((Jdk14Logger)log).getLogger(), level, out); - } - else { - out.println("Sorry, " + log.getClass() + " not supported.
"); + if (GenericsUtil.isLog4jLogger(logName)) { + process(Logger.getLogger(logName), level, out); + } else { + out.println("Sorry, setting log level is only supported for log4j loggers.
"); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java index a9e777bcba952..b302ef3145479 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/AbstractMetric.java @@ -24,7 +24,7 @@ import java.util.StringJoiner; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; /** * The immutable metric diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java index 26973f8fb9870..7d073c45300df 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/MetricsTag.java @@ -24,7 +24,7 @@ import java.util.StringJoiner; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; /** * Immutable tag for metrics (for grouping on host/queue/username etc.) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java index 2d22b75841b33..f4848fed519d8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsConfig.java @@ -23,6 +23,8 @@ import java.net.URL; import java.net.URLClassLoader; import static java.security.AccessController.*; + +import java.nio.charset.StandardCharsets; import java.security.PrivilegedAction; import java.util.Iterator; import java.util.Map; @@ -289,7 +291,7 @@ static String toString(Configuration c) { PropertiesConfiguration tmp = new PropertiesConfiguration(); tmp.copy(c); tmp.write(pw); - return buffer.toString("UTF-8"); + return new String(buffer.toByteArray(), StandardCharsets.UTF_8); } catch (Exception e) { throw new MetricsConfigException(e); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java index 14b930e830d77..9ffceaaa0ddda 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsRecordImpl.java @@ -20,7 +20,7 @@ import java.util.List; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.AbstractMetric; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java index 836d9d5cf816f..c8843f2812e57 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java @@ -22,7 +22,7 @@ import java.util.Random; import java.util.concurrent.*; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java index efc8127493fcd..2acdc9a16bfb5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSourceAdapter.java @@ -29,9 +29,9 @@ import javax.management.ObjectName; import javax.management.ReflectionException; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.metrics2.AbstractMetric; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java index 595f45566acd4..298c76ec7ecaf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java @@ -33,7 +33,7 @@ import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.classification.VisibleForTesting; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.commons.configuration2.PropertiesConfiguration; import org.apache.commons.math3.util.ArithmeticUtils; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java index 96eb5026be179..434e3a586b546 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MethodMetric.java @@ -20,7 +20,7 @@ import java.lang.reflect.Method; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.metrics2.MetricsException; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java index e86398f544edf..f31a3b27c37e7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsInfoImpl.java @@ -23,7 +23,7 @@ import java.util.StringJoiner; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; /** * Making implementing metric info a little easier diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java index f400f02d256f2..84e24ad49a17e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MetricsSourceBuilder.java @@ -22,7 +22,7 @@ import java.lang.reflect.Field; import java.lang.reflect.Method; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.metrics2.MetricsCollector; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounter.java index e616bb6d934dd..d72923148183c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableCounter.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGauge.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGauge.java index 6c77e97353869..516e47b53d6b6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGauge.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableGauge.java @@ -18,7 +18,7 @@ package org.apache.hadoop.metrics2.lib; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java index a83b68bab7aa5..19696bd839400 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java @@ -21,7 +21,7 @@ import java.lang.reflect.Method; import java.util.Set; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.hadoop.util.Sets; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java index 4694946d6fc11..3bbb7f325acff 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRollingAverages.java @@ -39,7 +39,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.util.Time; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java index d0eddd0b9d475..605cb8f0f66e3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/source/JvmMetrics.java @@ -28,7 +28,7 @@ import java.util.concurrent.ConcurrentHashMap; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java index 98fd9cce57fe5..230e651ff10e4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java @@ -33,7 +33,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java index 4c1c412c93f29..46a9d35f9d242 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/SampleQuantiles.java @@ -28,7 +28,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Implementation of the Cormode, Korn, Muthukrishnan, and Srivastava algorithm diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index 6db755743b9cc..11423d5e13bea 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -60,7 +60,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.ReflectionUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index df7208dfbdac7..b1b281451f663 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -18,7 +18,7 @@ package org.apache.hadoop.net; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java index 45f776e692ac4..70093a4d12b5d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java @@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * A wrapper stream around a socket which allows setting of its timeout. If the @@ -85,4 +85,4 @@ public ReadableByteChannel getReadableByteChannel() { this.socket); return (SocketInputStream)in; } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java index ff3410c9e9d02..531391452ab28 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/unix/DomainSocketWatcher.java @@ -36,7 +36,7 @@ import org.apache.hadoop.util.NativeCodeLoader; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java index 270b579324c86..8e3de21064e7e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslPlainServer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.security; +import java.nio.charset.StandardCharsets; import java.security.Provider; import java.util.Map; @@ -82,7 +83,7 @@ public byte[] evaluateResponse(byte[] response) throws SaslException { try { String payload; try { - payload = new String(response, "UTF-8"); + payload = new String(response, StandardCharsets.UTF_8); } catch (Exception e) { throw new IllegalArgumentException("Received corrupt response", e); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java index 260f1d22496f0..d594d26515bfb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/AbstractJavaKeyStoreProvider.java @@ -24,7 +24,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.ProviderUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -33,6 +32,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.URI; +import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.security.KeyStoreException; @@ -199,7 +199,7 @@ public CredentialEntry getCredentialEntry(String alias) public static char[] bytesToChars(byte[] bytes) throws IOException { String pass; - pass = new String(bytes, Charsets.UTF_8); + pass = new String(bytes, StandardCharsets.UTF_8); return pass.toCharArray(); } @@ -268,7 +268,7 @@ CredentialEntry innerSetCredential(String alias, char[] material) writeLock.lock(); try { keyStore.setKeyEntry(alias, - new SecretKeySpec(new String(material).getBytes("UTF-8"), + new SecretKeySpec(new String(material).getBytes(StandardCharsets.UTF_8), getAlgorithm()), password, null); } catch (KeyStoreException e) { throw new IOException("Can't store credential " + alias + " in " + this, diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java index 0c960d891b53b..2ae98f033735e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/alias/UserProvider.java @@ -70,7 +70,7 @@ public synchronized CredentialEntry createCredentialEntry(String name, char[] cr " already exists in " + this); } credentials.addSecretKey(new Text(name), - new String(credential).getBytes("UTF-8")); + new String(credential).getBytes(StandardCharsets.UTF_8)); return new CredentialEntry(name, credential); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java index 17d4e478aafd0..cc80708f1854e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ProxyUsers.java @@ -20,7 +20,7 @@ import java.net.InetAddress; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java index 770e3826581af..1f213d59d706b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/ssl/FileMonitoringTimerTask.java @@ -18,7 +18,7 @@ package org.apache.hadoop.security.ssl; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index 61c3312c1078a..ffada2bf5ad2e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -55,7 +55,7 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.Time; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.functional.InvocationRaisingIOE; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java index 8a635c3aeb4ee..06c03d42bdb91 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/ZKDelegationTokenSecretManager.java @@ -64,7 +64,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * An implementation of {@link AbstractDelegationTokenSecretManager} that diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java index 7797bfe496f50..31eef61c8f57c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticatedURL.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.security.token.delegation.web; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.Text; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java index 571e54c5f907c..1400e572d94f0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java @@ -51,7 +51,7 @@ import javax.servlet.http.HttpServletResponse; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.security.Principal; import java.util.Enumeration; import java.util.List; @@ -94,8 +94,6 @@ public class DelegationTokenAuthenticationFilter public static final String DELEGATION_TOKEN_SECRET_MANAGER_ATTR = "hadoop.http.delegation-token-secret-manager"; - private static final Charset UTF8_CHARSET = Charset.forName("UTF-8"); - private static final ThreadLocal UGI_TL = new ThreadLocal(); public static final String PROXYUSER_PREFIX = "proxyuser"; @@ -226,7 +224,7 @@ static String getDoAs(HttpServletRequest request) { if (queryString == null) { return null; } - List list = URLEncodedUtils.parse(queryString, UTF8_CHARSET); + List list = URLEncodedUtils.parse(queryString, StandardCharsets.UTF_8); if (list != null) { for (NameValuePair nv : list) { if (DelegationTokenAuthenticatedURL.DO_AS. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/MultiSchemeDelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/MultiSchemeDelegationTokenAuthenticationHandler.java index 865977e67d07a..dbfaa86dfd4fc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/MultiSchemeDelegationTokenAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/MultiSchemeDelegationTokenAuthenticationHandler.java @@ -36,7 +36,7 @@ import org.apache.hadoop.security.authentication.server.HttpConstants; import org.apache.hadoop.security.authentication.server.MultiSchemeAuthenticationHandler; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; /** @@ -180,4 +180,4 @@ public AuthenticationToken authenticate(HttpServletRequest request, return super.authenticate(request, response); } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java index 078dfa44bddae..e43668e2c60d5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/ServletUtils.java @@ -23,7 +23,7 @@ import javax.servlet.http.HttpServletRequest; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.List; /** @@ -31,7 +31,6 @@ */ @InterfaceAudience.Private class ServletUtils { - private static final Charset UTF8_CHARSET = Charset.forName("UTF-8"); /** * Extract a query string parameter without triggering http parameters @@ -49,7 +48,7 @@ public static String getParameter(HttpServletRequest request, String name) if (queryString == null) { return null; } - List list = URLEncodedUtils.parse(queryString, UTF8_CHARSET); + List list = URLEncodedUtils.parse(queryString, StandardCharsets.UTF_8); if (list != null) { for (NameValuePair nv : list) { if (name.equals(nv.getName())) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java index 726a83da2572b..5964d614fe9d4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/ServiceOperations.java @@ -21,10 +21,9 @@ import java.util.ArrayList; import java.util.List; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; -import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** @@ -75,9 +74,10 @@ public static Exception stopQuietly(Service service) { * @param log the log to warn at * @param service a service; may be null * @return any exception that was caught; null if none was. - * @see ServiceOperations#stopQuietly(Service) + * @deprecated to be removed with 3.4.0. Use {@link #stopQuietly(Logger, Service)} instead. */ - public static Exception stopQuietly(Log log, Service service) { + @Deprecated + public static Exception stopQuietly(org.apache.commons.logging.Log log, Service service) { try { stop(service); } catch (Exception e) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java index ad92d4c6d7a24..4d43c3a106f5e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/InterruptEscalator.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java index bcb589f24885f..d423e59aa9759 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/IrqHandler.java @@ -20,7 +20,7 @@ import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import sun.misc.Signal; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java index 2909ab42951b4..65f0f237e29ac 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/launcher/ServiceLauncher.java @@ -26,7 +26,7 @@ import java.util.List; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java index 8accddabe091e..2934df2c574b5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ChunkedArrayList.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; import org.apache.hadoop.util.Lists; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java index f81a429b5d422..6cfcfad5b4fce 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/CloseableReferenceCount.java @@ -21,7 +21,7 @@ import java.nio.channels.ClosedChannelException; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * A closeable object that maintains a reference count. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java index 042f95b2c081f..ffb647fe43fee 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GcTimeMonitor.java @@ -18,7 +18,7 @@ package org.apache.hadoop.util; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.lang.management.GarbageCollectorMXBean; import java.lang.management.ManagementFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java index df81bf1209360..02b3be65792dd 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericsUtil.java @@ -20,6 +20,7 @@ import java.lang.reflect.Array; import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -27,15 +28,23 @@ import org.slf4j.LoggerFactory; /** - * Contains utility methods for dealing with Java Generics. + * Contains utility methods for dealing with Java Generics. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class GenericsUtil { + private static final String SLF4J_LOG4J_ADAPTER_CLASS = "org.slf4j.impl.Reload4jLoggerAdapter"; + + /** + * Set to false only if log4j adapter class is not found in the classpath. Once set to false, + * the utility method should not bother re-loading class again. + */ + private static final AtomicBoolean IS_LOG4J_LOGGER = new AtomicBoolean(true); + /** - * Returns the Class object (of type Class<T>) of the - * argument of type T. + * Returns the Class object (of type Class<T>) of the + * argument of type T. * @param The type of the argument * @param t the object to get it class * @return Class<T> @@ -47,7 +56,7 @@ public static Class getClass(T t) { } /** - * Converts the given List<T> to a an array of + * Converts the given List<T> to a an array of * T[]. * @param c the Class object of the items in the list * @param list the list to convert @@ -66,11 +75,11 @@ public static T[] toArray(Class c, List list) /** - * Converts the given List<T> to a an array of - * T[]. + * Converts the given List<T> to a an array of + * T[]. * @param list the list to convert * @param Generics Type T. - * @throws ArrayIndexOutOfBoundsException if the list is empty. + * @throws ArrayIndexOutOfBoundsException if the list is empty. * Use {@link #toArray(Class, List)} if the list may be empty. * @return T Array. */ @@ -87,12 +96,27 @@ public static boolean isLog4jLogger(Class clazz) { if (clazz == null) { return false; } - Logger log = LoggerFactory.getLogger(clazz); + return isLog4jLogger(clazz.getName()); + } + + /** + * Determine whether the log of the given logger is of Log4J implementation. + * + * @param logger the logger name, usually class name as string. + * @return true if the logger uses Log4J implementation. + */ + public static boolean isLog4jLogger(String logger) { + if (logger == null || !IS_LOG4J_LOGGER.get()) { + return false; + } + Logger log = LoggerFactory.getLogger(logger); try { - Class log4jClass = Class.forName("org.slf4j.impl.Reload4jLoggerAdapter"); + Class log4jClass = Class.forName(SLF4J_LOG4J_ADAPTER_CLASS); return log4jClass.isInstance(log); } catch (ClassNotFoundException e) { + IS_LOG4J_LOGGER.set(false); return false; } } + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java index 5c4bfb15697a8..1522eb67494fa 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IdentityHashStore.java @@ -21,7 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * The IdentityHashStore stores (key, value) mappings in an array. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java index ff478484f9a7a..5ca8806de6f3e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/IntrusiveCollection.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java index d634bef644c35..6788728c90eb2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JsonSerialization.java @@ -36,7 +36,7 @@ import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; import com.fasterxml.jackson.databind.SerializationFeature; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java index 821620ef3733e..d80b58a607a88 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LightWeightCache.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * A low memory footprint Cache which extends {@link LightWeightGSet}. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java index de95e98e1f4dd..5936722b7c063 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LimitInputStream.java @@ -19,8 +19,8 @@ package org.apache.hadoop.util; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkArgument; +import static org.apache.hadoop.util.Preconditions.checkNotNull; import java.io.FilterInputStream; import java.io.IOException; @@ -32,7 +32,7 @@ * Copied from guava source code v15 (LimitedInputStream) * Guava deprecated LimitInputStream in v14 and removed it in v15. Copying this class here * allows to be compatible with guava 11 to 15+. - * + * * Originally: org.apache.hadoop.hbase.io.LimitInputStream */ @Unstable diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java index b2bcbf57ef25a..ab52d307cb618 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/LogAdapter.java @@ -17,61 +17,40 @@ */ package org.apache.hadoop.util; -import org.apache.commons.logging.Log; import org.slf4j.Logger; class LogAdapter { - private Log LOG; private Logger LOGGER; - private LogAdapter(Log LOG) { - this.LOG = LOG; - } private LogAdapter(Logger LOGGER) { this.LOGGER = LOGGER; } - /** - * @deprecated use {@link #create(Logger)} instead - */ - @Deprecated - public static LogAdapter create(Log LOG) { - return new LogAdapter(LOG); - } - public static LogAdapter create(Logger LOGGER) { return new LogAdapter(LOGGER); } public void info(String msg) { - if (LOG != null) { - LOG.info(msg); - } else if (LOGGER != null) { + if (LOGGER != null) { LOGGER.info(msg); } } public void warn(String msg, Throwable t) { - if (LOG != null) { - LOG.warn(msg, t); - } else if (LOGGER != null) { + if (LOGGER != null) { LOGGER.warn(msg, t); } } public void debug(Throwable t) { - if (LOG != null) { - LOG.debug(t); - } else if (LOGGER != null) { + if (LOGGER != null) { LOGGER.debug("", t); } } public void error(String msg) { - if (LOG != null) { - LOG.error(msg); - } else if (LOGGER != null) { + if (LOGGER != null) { LOGGER.error(msg); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java index c32d5ca5ada19..0471b5adadf83 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReflectionUtils.java @@ -37,7 +37,7 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; @@ -56,20 +56,20 @@ @InterfaceAudience.Public @InterfaceStability.Evolving public class ReflectionUtils { - + private static final Class[] EMPTY_ARRAY = new Class[]{}; volatile private static SerializationFactory serialFactory = null; - /** + /** * Cache of constructors for each class. Pins the classes so they * can't be garbage collected until ReflectionUtils can be collected. */ - private static final Map, Constructor> CONSTRUCTOR_CACHE = + private static final Map, Constructor> CONSTRUCTOR_CACHE = new ConcurrentHashMap, Constructor>(); /** * Check and set 'configuration' if necessary. - * + * * @param theObject object for which to set configuration * @param conf Configuration */ @@ -81,11 +81,11 @@ public static void setConf(Object theObject, Configuration conf) { setJobConf(theObject, conf); } } - + /** - * This code is to support backward compatibility and break the compile + * This code is to support backward compatibility and break the compile * time dependency of core on mapred. - * This should be made deprecated along with the mapred package HADOOP-1230. + * This should be made deprecated along with the mapred package HADOOP-1230. * Should be removed when mapred package is removed. */ private static void setJobConf(Object theObject, Configuration conf) { @@ -94,20 +94,20 @@ private static void setJobConf(Object theObject, Configuration conf) { //conf is of type JobConf then //invoke configure on theObject try { - Class jobConfClass = + Class jobConfClass = conf.getClassByNameOrNull("org.apache.hadoop.mapred.JobConf"); if (jobConfClass == null) { return; } - - Class jobConfigurableClass = + + Class jobConfigurableClass = conf.getClassByNameOrNull("org.apache.hadoop.mapred.JobConfigurable"); if (jobConfigurableClass == null) { return; } if (jobConfClass.isAssignableFrom(conf.getClass()) && jobConfigurableClass.isAssignableFrom(theObject.getClass())) { - Method configureMethod = + Method configureMethod = jobConfigurableClass.getMethod("configure", jobConfClass); configureMethod.invoke(theObject, conf); } @@ -141,23 +141,23 @@ public static T newInstance(Class theClass, Configuration conf) { return result; } - static private ThreadMXBean threadBean = + static private ThreadMXBean threadBean = ManagementFactory.getThreadMXBean(); - + public static void setContentionTracing(boolean val) { threadBean.setThreadContentionMonitoringEnabled(val); } - + private static String getTaskName(long id, String name) { if (name == null) { return Long.toString(id); } return id + " (" + name + ")"; } - + /** * Print all of the thread's information and stack traces. - * + * * @param stream the stream to * @param title a string title for the stack trace */ @@ -174,7 +174,7 @@ public synchronized static void printThreadInfo(PrintStream stream, stream.println(" Inactive"); continue; } - stream.println("Thread " + + stream.println("Thread " + getTaskName(info.getThreadId(), info.getThreadName()) + ":"); Thread.State state = info.getThreadState(); @@ -189,7 +189,7 @@ public synchronized static void printThreadInfo(PrintStream stream, stream.println(" Waiting on " + info.getLockName()); } else if (state == Thread.State.BLOCKED) { stream.println(" Blocked on " + info.getLockName()); - stream.println(" Blocked by " + + stream.println(" Blocked by " + getTaskName(info.getLockOwnerId(), info.getLockOwnerName())); } @@ -200,37 +200,8 @@ public synchronized static void printThreadInfo(PrintStream stream, } stream.flush(); } - + private static long previousLogTime = 0; - - /** - * Log the current thread stacks at INFO level. - * @param log the logger that logs the stack trace - * @param title a descriptive title for the call stacks - * @param minInterval the minimum time from the last - */ - public static void logThreadInfo(Log log, - String title, - long minInterval) { - boolean dumpStack = false; - if (log.isInfoEnabled()) { - synchronized (ReflectionUtils.class) { - long now = Time.monotonicNow(); - if (now - previousLogTime >= minInterval * 1000) { - previousLogTime = now; - dumpStack = true; - } - } - if (dumpStack) { - try { - ByteArrayOutputStream buffer = new ByteArrayOutputStream(); - printThreadInfo(new PrintStream(buffer, false, "UTF-8"), title); - log.info(buffer.toString(StandardCharsets.UTF_8.name())); - } catch (UnsupportedEncodingException ignored) { - } - } - } - } /** * Log the current thread stacks at INFO level. @@ -272,12 +243,12 @@ public static void logThreadInfo(Logger log, public static Class getClass(T o) { return (Class)o.getClass(); } - + // methods to support testing static void clearCache() { CONSTRUCTOR_CACHE.clear(); } - + static int getCacheSize() { return CONSTRUCTOR_CACHE.size(); } @@ -294,7 +265,7 @@ void moveData() { inBuffer.reset(outBuffer.getData(), outBuffer.getLength()); } } - + /** * Allocate a buffer for each thread that tries to clone objects. */ @@ -312,7 +283,7 @@ private static SerializationFactory getFactory(Configuration conf) { } return serialFactory; } - + /** * Make a copy of the writable object using serialization to a buffer. * @@ -324,7 +295,7 @@ private static SerializationFactory getFactory(Configuration conf) { * @throws IOException raised on errors performing I/O. */ @SuppressWarnings("unchecked") - public static T copy(Configuration conf, + public static T copy(Configuration conf, T src, T dst) throws IOException { CopyInCopyOutBuffer buffer = CLONE_BUFFERS.get(); buffer.outBuffer.reset(); @@ -341,7 +312,7 @@ public static T copy(Configuration conf, } @Deprecated - public static void cloneWritableInto(Writable dst, + public static void cloneWritableInto(Writable dst, Writable src) throws IOException { CopyInCopyOutBuffer buffer = CLONE_BUFFERS.get(); buffer.outBuffer.reset(); @@ -349,7 +320,7 @@ public static void cloneWritableInto(Writable dst, buffer.moveData(); dst.readFields(buffer.inBuffer); } - + /** * @return Gets all the declared fields of a class including fields declared in * superclasses. @@ -369,10 +340,10 @@ public int compare(Field a, Field b) { } clazz = clazz.getSuperclass(); } - + return fields; } - + /** * @return Gets all the declared methods of a class including methods declared in * superclasses. @@ -386,7 +357,7 @@ public static List getDeclaredMethodsIncludingInherited(Class clazz) } clazz = clazz.getSuperclass(); } - + return methods; } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java index 455de4cb73994..5f2ce002fdc43 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ServletUtil.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; @InterfaceAudience.Private @InterfaceStability.Unstable @@ -110,4 +110,4 @@ public static String getRawPath(final HttpServletRequest request, String servlet Preconditions.checkArgument(request.getRequestURI().startsWith(servletName+"/")); return request.getRequestURI().substring(servletName.length()); } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java index 605352443e565..5341a213f7acf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SignalLogger.java @@ -21,14 +21,15 @@ import sun.misc.Signal; import sun.misc.SignalHandler; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; /** * This class logs a message whenever we're about to exit on a UNIX signal. * This is helpful for determining the root cause of a process' exit. - * For example, if the process exited because the system administrator + * For example, if the process exited because the system administrator * ran a standard "kill," you would see 'EXITING ON SIGNAL SIGTERM' in the log. */ @InterfaceAudience.Private @@ -42,11 +43,11 @@ public enum SignalLogger { * Our signal handler. */ private static class Handler implements SignalHandler { - final private LogAdapter LOG; + final private Logger log; final private SignalHandler prevHandler; - Handler(String name, LogAdapter LOG) { - this.LOG = LOG; + Handler(String name, Logger log) { + this.log = log; prevHandler = Signal.handle(new Signal(name), this); } @@ -57,8 +58,7 @@ private static class Handler implements SignalHandler { */ @Override public void handle(Signal signal) { - LOG.error("RECEIVED SIGNAL " + signal.getNumber() + - ": SIG" + signal.getName()); + log.error("RECEIVED SIGNAL {}: SIG{}", signal.getNumber(), signal.getName()); prevHandler.handle(signal); } } @@ -68,30 +68,26 @@ public void handle(Signal signal) { * * @param LOG The log4j logfile to use in the signal handlers. */ - public void register(final Log LOG) { - register(LogAdapter.create(LOG)); - } - - void register(final LogAdapter LOG) { + public void register(final Logger log) { if (registered) { throw new IllegalStateException("Can't re-install the signal handlers."); } registered = true; StringBuilder bld = new StringBuilder(); bld.append("registered UNIX signal handlers for ["); - final String SIGNALS[] = { "TERM", "HUP", "INT" }; + final String[] SIGNALS = {"TERM", "HUP", "INT"}; String separator = ""; for (String signalName : SIGNALS) { try { - new Handler(signalName, LOG); + new Handler(signalName, log); bld.append(separator) .append(signalName); separator = ", "; } catch (Exception e) { - LOG.debug(e); + log.debug("Error: ", e); } } bld.append("]"); - LOG.info(bld.toString()); + log.info(bld.toString()); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index b88ca62d78413..de4d7f1a2fe97 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -39,11 +39,12 @@ import org.apache.commons.lang3.time.FastDateFormat; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.fs.Path; import org.apache.hadoop.net.NetUtils; import org.apache.log4j.LogManager; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; /** @@ -79,6 +80,18 @@ public class StringUtils { public static final Pattern ENV_VAR_PATTERN = Shell.WINDOWS ? WIN_ENV_VAR_PATTERN : SHELL_ENV_VAR_PATTERN; + /** + * {@link #getTrimmedStringCollectionSplitByEquals(String)} throws + * {@link IllegalArgumentException} with error message starting with this string + * if the argument provided is not valid representation of non-empty key-value + * pairs. + * Value = {@value} + */ + @VisibleForTesting + public static final String STRING_COLLECTION_SPLIT_EQUALS_INVALID_ARG = + "Trimmed string split by equals does not correctly represent " + + "non-empty key-value pairs."; + /** * Make a string representation of the exception. * @param e The exception to stringify @@ -91,7 +104,7 @@ public static String stringifyException(Throwable e) { wrt.close(); return stm.toString(); } - + /** * Given a full hostname, return the word upto the first dot. * @param fullHostname the full hostname @@ -107,10 +120,10 @@ public static String simpleHostname(String fullHostname) { } return fullHostname; } - + /** - * Given an integer, return a string that is in an approximate, but human - * readable format. + * Given an integer, return a string that is in an approximate, but human + * readable format. * @param number the number to format * @return a human readable form of the integer * @@ -140,14 +153,14 @@ public static String format(final String format, final Object... objects) { public static String formatPercent(double fraction, int decimalPlaces) { return format("%." + decimalPlaces + "f%%", fraction*100); } - + /** * Given an array of strings, return a comma-separated list of its elements. * @param strs Array of strings * @return Empty string if strs.length is 0, comma separated list of strings * otherwise */ - + public static String arrayToString(String[] strs) { if (strs.length == 0) { return ""; } StringBuilder sbuf = new StringBuilder(); @@ -171,7 +184,7 @@ public static String byteToHexString(byte[] bytes, int start, int end) { if (bytes == null) { throw new IllegalArgumentException("bytes == null"); } - StringBuilder s = new StringBuilder(); + StringBuilder s = new StringBuilder(); for(int i = start; i < end; i++) { s.append(format("%02x", bytes[i])); } @@ -228,7 +241,7 @@ public static String uriToString(URI[] uris){ } return ret.toString(); } - + /** * @param str * The string array to be parsed into an URI array. @@ -238,7 +251,7 @@ public static String uriToString(URI[] uris){ * If any string in str violates RFC 2396. */ public static URI[] stringToURI(String[] str){ - if (str == null) + if (str == null) return null; URI[] uris = new URI[str.length]; for (int i = 0; i < str.length;i++){ @@ -251,7 +264,7 @@ public static URI[] stringToURI(String[] str){ } return uris; } - + /** * stringToPath. * @param str str. @@ -268,26 +281,26 @@ public static Path[] stringToPath(String[] str){ return p; } /** - * - * Given a finish and start time in long milliseconds, returns a - * String in the format Xhrs, Ymins, Z sec, for the time difference between two times. - * If finish time comes before start time then negative valeus of X, Y and Z wil return. - * + * + * Given a finish and start time in long milliseconds, returns a + * String in the format Xhrs, Ymins, Z sec, for the time difference between two times. + * If finish time comes before start time then negative valeus of X, Y and Z wil return. + * * @param finishTime finish time * @param startTime start time * @return a String in the format Xhrs, Ymins, Z sec, * for the time difference between two times. */ public static String formatTimeDiff(long finishTime, long startTime){ - long timeDiff = finishTime - startTime; - return formatTime(timeDiff); + long timeDiff = finishTime - startTime; + return formatTime(timeDiff); } - + /** - * - * Given the time in long milliseconds, returns a - * String in the format Xhrs, Ymins, Z sec. - * + * + * Given the time in long milliseconds, returns a + * String in the format Xhrs, Ymins, Z sec. + * * @param timeDiff The time difference to format * @return formatTime String. */ @@ -298,7 +311,7 @@ public static String formatTime(long timeDiff){ long minutes = rem / (60*1000); rem = rem % (60*1000); long seconds = rem / 1000; - + if (hours != 0){ buf.append(hours); buf.append("hrs, "); @@ -310,7 +323,7 @@ public static String formatTime(long timeDiff){ // return "0sec if no difference buf.append(seconds); buf.append("sec"); - return buf.toString(); + return buf.toString(); } /** @@ -385,7 +398,7 @@ public static String getFormattedTimeWithDiff(String formattedFinishTime, } return buf.toString(); } - + /** * Returns an arraylist of strings. * @param str the comma separated string values @@ -422,7 +435,7 @@ public static Collection getStringCollection(String str){ /** * Returns a collection of strings. - * + * * @param str * String to parse * @param delim @@ -480,7 +493,7 @@ public static Collection getTrimmedStringCollection(String str){ set.remove(""); return set; } - + /** * Splits a comma or newline separated value String, trimming * leading and trailing whitespace on each value. @@ -502,7 +515,7 @@ public static String[] getTrimmedStrings(String str){ final public static char COMMA = ','; final public static String COMMA_STR = ","; final public static char ESCAPE_CHAR = '\\'; - + /** * Split a string using the default separator * @param str a string that may have escaped separator @@ -511,7 +524,7 @@ public static String[] getTrimmedStrings(String str){ public static String[] split(String str) { return split(str, ESCAPE_CHAR, COMMA); } - + /** * Split a string using the given separator * @param str a string that may have escaped separator @@ -530,7 +543,7 @@ public static String[] split( while ((index = findNext(str, separator, escapeChar, index, split)) >= 0) { ++index; // move over the separator for next search strList.add(split.toString()); - split.setLength(0); // reset the buffer + split.setLength(0); // reset the buffer } strList.add(split.toString()); // remove trailing empty split(s) @@ -569,7 +582,7 @@ public static String[] split( } return strList.toArray(new String[strList.size()]); } - + /** * Finds the first occurrence of the separator character ignoring the escaped * separators starting from the index. Note the substring between the index @@ -581,12 +594,12 @@ public static String[] split( * @param split used to pass back the extracted string * @return index. */ - public static int findNext(String str, char separator, char escapeChar, + public static int findNext(String str, char separator, char escapeChar, int start, StringBuilder split) { int numPreEscapes = 0; for (int i = start; i < str.length(); i++) { char curChar = str.charAt(i); - if (numPreEscapes == 0 && curChar == separator) { // separator + if (numPreEscapes == 0 && curChar == separator) { // separator return i; } else { split.append(curChar); @@ -597,7 +610,7 @@ public static int findNext(String str, char separator, char escapeChar, } return -1; } - + /** * Escape commas in the string using the default escape char * @param str a string @@ -606,11 +619,11 @@ public static int findNext(String str, char separator, char escapeChar, public static String escapeString(String str) { return escapeString(str, ESCAPE_CHAR, COMMA); } - + /** - * Escape charToEscape in the string + * Escape charToEscape in the string * with the escape char escapeChar - * + * * @param str string * @param escapeChar escape char * @param charToEscape the char to be escaped @@ -620,8 +633,8 @@ public static String escapeString( String str, char escapeChar, char charToEscape) { return escapeString(str, escapeChar, new char[] {charToEscape}); } - - // check if the character array has the character + + // check if the character array has the character private static boolean hasChar(char[] chars, char character) { for (char target : chars) { if (character == target) { @@ -630,7 +643,7 @@ private static boolean hasChar(char[] chars, char character) { } return false; } - + /** * escapeString. * @@ -639,7 +652,7 @@ private static boolean hasChar(char[] chars, char character) { * @param charsToEscape array of characters to be escaped * @return escapeString. */ - public static String escapeString(String str, char escapeChar, + public static String escapeString(String str, char escapeChar, char[] charsToEscape) { if (str == null) { return null; @@ -655,7 +668,7 @@ public static String escapeString(String str, char escapeChar, } return result.toString(); } - + /** * Unescape commas in the string using the default escape char * @param str a string @@ -664,11 +677,11 @@ public static String escapeString(String str, char escapeChar, public static String unEscapeString(String str) { return unEscapeString(str, ESCAPE_CHAR, COMMA); } - + /** - * Unescape charToEscape in the string + * Unescape charToEscape in the string * with the escape char escapeChar - * + * * @param str string * @param escapeChar escape char * @param charToEscape the escaped char @@ -678,7 +691,7 @@ public static String unEscapeString( String str, char escapeChar, char charToEscape) { return unEscapeString(str, escapeChar, new char[] {charToEscape}); } - + /** * unEscapeString. * @param str str. @@ -686,7 +699,7 @@ public static String unEscapeString( * @param charsToEscape array of characters to unescape * @return escape string. */ - public static String unEscapeString(String str, char escapeChar, + public static String unEscapeString(String str, char escapeChar, char[] charsToEscape) { if (str == null) { return null; @@ -698,15 +711,15 @@ public static String unEscapeString(String str, char escapeChar, if (hasPreEscape) { if (curChar != escapeChar && !hasChar(charsToEscape, curChar)) { // no special char - throw new IllegalArgumentException("Illegal escaped string " + str + + throw new IllegalArgumentException("Illegal escaped string " + str + " unescaped " + escapeChar + " at " + (i-1)); - } + } // otherwise discard the escape char result.append(curChar); hasPreEscape = false; } else { if (hasChar(charsToEscape, curChar)) { - throw new IllegalArgumentException("Illegal escaped string " + str + + throw new IllegalArgumentException("Illegal escaped string " + str + " unescaped " + curChar + " at " + i); } else if (curChar == escapeChar) { hasPreEscape = true; @@ -716,12 +729,12 @@ public static String unEscapeString(String str, char escapeChar, } } if (hasPreEscape ) { - throw new IllegalArgumentException("Illegal escaped string " + str + + throw new IllegalArgumentException("Illegal escaped string " + str + ", not expecting " + escapeChar + " in the end." ); } return result.toString(); } - + /** * Return a message for logging. * @param prefix prefix keyword for the message @@ -741,42 +754,26 @@ public static String toStartupShutdownString(String prefix, String[] msg) { * Print a log message for starting up and shutting down * @param clazz the class of the server * @param args arguments - * @param LOG the target log object - */ - public static void startupShutdownMessage(Class clazz, String[] args, - final org.apache.commons.logging.Log LOG) { - startupShutdownMessage(clazz, args, LogAdapter.create(LOG)); - } - - /** - * Print a log message for starting up and shutting down - * @param clazz the class of the server - * @param args arguments - * @param LOG the target log object + * @param log the target log object */ public static void startupShutdownMessage(Class clazz, String[] args, - final org.slf4j.Logger LOG) { - startupShutdownMessage(clazz, args, LogAdapter.create(LOG)); - } - - static void startupShutdownMessage(Class clazz, String[] args, - final LogAdapter LOG) { + final org.slf4j.Logger log) { final String hostname = NetUtils.getHostname(); final String classname = clazz.getSimpleName(); - LOG.info(createStartupShutdownMessage(classname, hostname, args)); + log.info(createStartupShutdownMessage(classname, hostname, args)); if (SystemUtils.IS_OS_UNIX) { try { - SignalLogger.INSTANCE.register(LOG); + SignalLogger.INSTANCE.register(log); } catch (Throwable t) { - LOG.warn("failed to register any UNIX signal loggers: ", t); + log.warn("failed to register any UNIX signal loggers: ", t); } } ShutdownHookManager.get().addShutdownHook( new Runnable() { @Override public void run() { - LOG.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ + log.info(toStartupShutdownString("SHUTDOWN_MSG: ", new String[]{ "Shutting down " + classname + " at " + hostname})); LogManager.shutdown(); } @@ -800,7 +797,7 @@ public static String createStartupShutdownMessage(String classname, " version = " + VersionInfo.getVersion(), " classpath = " + System.getProperty("java.class.path"), " build = " + VersionInfo.getUrl() + " -r " - + VersionInfo.getRevision() + + VersionInfo.getRevision() + "; compiled by '" + VersionInfo.getUser() + "' on " + VersionInfo.getDate(), " java = " + System.getProperty("java.version") } @@ -810,7 +807,7 @@ public static String createStartupShutdownMessage(String classname, /** * The traditional binary prefixes, kilo, mega, ..., exa, * which can be represented by a 64-bit integer. - * TraditionalBinaryPrefix symbol are case insensitive. + * TraditionalBinaryPrefix symbol are case insensitive. */ public enum TraditionalBinaryPrefix { KILO(10), @@ -885,7 +882,7 @@ public static long string2long(String s) { /** * Convert a long integer to a string with traditional binary prefix. - * + * * @param n the value to be converted * @param unit The unit, e.g. "B" for bytes. * @param decimalPlaces The number of decimal places. @@ -966,7 +963,7 @@ public static String escapeHTML(String string) { } } } - + return sb.toString(); } @@ -991,7 +988,7 @@ public static String byteDesc(long len) { public static String limitDecimalTo2(double d) { return format("%.2f", d); } - + /** * Concatenates strings, using a separator. * @@ -1064,11 +1061,11 @@ public static String camelize(String s) { * must use a capturing group. The value of the first capturing group is used * to look up the replacement. If no replacement is found for the token, then * it is replaced with the empty string. - * + * * For example, assume template is "%foo%_%bar%_%baz%", pattern is "%(.*?)%", * and replacements contains 2 entries, mapping "foo" to "zoo" and "baz" to * "zaz". The result returned would be "zoo__zaz". - * + * * @param template String template to receive replacements * @param pattern Pattern to match for identifying tokens, must use a capturing * group @@ -1090,7 +1087,7 @@ public static String replaceTokens(String template, Pattern pattern, matcher.appendTail(sb); return sb.toString(); } - + /** * Get stack trace for a given thread. * @param t thread. @@ -1106,12 +1103,12 @@ public static String getStackTrace(Thread t) { } /** - * From a list of command-line arguments, remove both an option and the + * From a list of command-line arguments, remove both an option and the * next argument. * * @param name Name of the option to remove. Example: -foo. * @param args List of arguments. - * @return null if the option was not found; the value of the + * @return null if the option was not found; the value of the * option otherwise. * @throws IllegalArgumentException if the option's argument is not present */ @@ -1136,7 +1133,7 @@ public static String popOptionWithArgument(String name, List args) } return val; } - + /** * From a list of command-line arguments, remove an option. * @@ -1157,10 +1154,10 @@ public static boolean popOption(String name, List args) { } return false; } - + /** * From a list of command-line arguments, return the first non-option - * argument. Non-option arguments are those which either come after + * argument. Non-option arguments are those which either come after * a double dash (--) or do not start with a dash. * * @param args List of arguments. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java index 38777d8f66465..574d9062c3851 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SysInfoLinux.java @@ -22,7 +22,7 @@ import java.io.InputStreamReader; import java.io.IOException; import java.math.BigInteger; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.HashMap; @@ -247,7 +247,7 @@ private void readProcMemInfoFile(boolean readAgain) { try { fReader = new InputStreamReader( Files.newInputStream(Paths.get(procfsMemFile)), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); in = new BufferedReader(fReader); } catch (IOException f) { // shouldn't happen.... @@ -319,7 +319,7 @@ private void readProcCpuInfoFile() { try { fReader = new InputStreamReader(Files.newInputStream(Paths.get(procfsCpuFile)), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); in = new BufferedReader(fReader); } catch (IOException f) { // shouldn't happen.... @@ -380,7 +380,7 @@ private void readProcStatFile() { try { fReader = new InputStreamReader( Files.newInputStream(Paths.get(procfsStatFile)), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); in = new BufferedReader(fReader); } catch (IOException f) { // shouldn't happen.... @@ -435,7 +435,7 @@ private void readProcNetInfoFile() { try { fReader = new InputStreamReader( Files.newInputStream(Paths.get(procfsNetFile)), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); in = new BufferedReader(fReader); } catch (IOException f) { return; @@ -490,7 +490,7 @@ private void readProcDisksInfoFile() { try { in = new BufferedReader(new InputStreamReader( Files.newInputStream(Paths.get(procfsDisksFile)), - Charset.forName("UTF-8"))); + StandardCharsets.UTF_8)); } catch (IOException f) { return; } @@ -558,7 +558,7 @@ int readDiskBlockInformation(String diskName, int defSector) { try { in = new BufferedReader(new InputStreamReader( Files.newInputStream(Paths.get(procfsDiskSectorFile)), - Charset.forName("UTF-8"))); + StandardCharsets.UTF_8)); } catch (IOException f) { return defSector; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java index 88f9e63f18755..5c628d2b3a2ac 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java @@ -19,6 +19,7 @@ import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; @@ -27,7 +28,6 @@ import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.data.Id; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.io.Files; @@ -149,7 +149,7 @@ public static List parseAuth(String authString) throws "Auth '" + comp + "' not of expected form scheme:auth"); } ret.add(new ZKAuthInfo(parts[0], - parts[1].getBytes(Charsets.UTF_8))); + parts[1].getBytes(StandardCharsets.UTF_8))); } return ret; } @@ -173,7 +173,7 @@ public static String resolveConfIndirection(String valInConf) return valInConf; } String path = valInConf.substring(1).trim(); - return Files.asCharSource(new File(path), Charsets.UTF_8).read().trim(); + return Files.asCharSource(new File(path), StandardCharsets.UTF_8).read().trim(); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java index 8fcf456c76a82..e90fe558a599f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/curator/ZKCuratorManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.util.curator; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.LinkedList; import java.util.List; @@ -44,7 +44,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Helper class that provides utility methods specific to ZK operations. @@ -212,7 +212,7 @@ public byte[] getData(final String path, Stat stat) throws Exception { public String getStringData(final String path) throws Exception { byte[] bytes = getData(path); if (bytes != null) { - return new String(bytes, Charset.forName("UTF-8")); + return new String(bytes, StandardCharsets.UTF_8); } return null; } @@ -227,7 +227,7 @@ public String getStringData(final String path) throws Exception { public String getStringData(final String path, Stat stat) throws Exception { byte[] bytes = getData(path, stat); if (bytes != null) { - return new String(bytes, Charset.forName("UTF-8")); + return new String(bytes, StandardCharsets.UTF_8); } return null; } @@ -251,7 +251,7 @@ public void setData(String path, byte[] data, int version) throws Exception { * @throws Exception If it cannot contact Zookeeper. */ public void setData(String path, String data, int version) throws Exception { - byte[] bytes = data.getBytes(Charset.forName("UTF-8")); + byte[] bytes = data.getBytes(StandardCharsets.UTF_8); setData(path, bytes, version); } @@ -503,4 +503,4 @@ private void setJaasConfiguration(ZKClientConfig zkClientConfig) throws IOExcept zkClientConfig.setProperty(ZKClientConfig.LOGIN_CONTEXT_NAME_KEY, JAAS_CLIENT_ENTRY); } } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java index 8ca414400c8d0..bc3f786dbb3fc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java @@ -64,7 +64,7 @@ public class TestCommonConfigurationFields extends TestConfigurationFieldsBase { @SuppressWarnings("deprecation") @Override public void initializeMemberVariables() { - xmlFilename = new String("core-default.xml"); + xmlFilename = "core-default.xml"; configurationClasses = new Class[] { CommonConfigurationKeys.class, CommonConfigurationKeysPublic.class, diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java index 50f554ae19d78..e958b187cf36c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java @@ -669,7 +669,7 @@ public void testUnTar() throws Exception { OutputStream os = new FileOutputStream(simpleTar); try (TarOutputStream tos = new TarOutputStream(os)) { TarEntry te = new TarEntry("/bar/foo"); - byte[] data = "some-content".getBytes("UTF-8"); + byte[] data = "some-content".getBytes(StandardCharsets.UTF_8); te.setSize(data.length); tos.putNextEntry(te); tos.write(data); @@ -753,7 +753,7 @@ public void testUnZip() throws Exception { ZipArchiveList.add(new ZipArchiveEntry("foo_" + i)); ZipArchiveEntry archiveEntry = ZipArchiveList.get(i); archiveEntry.setUnixMode(count += 0100); - byte[] data = "some-content".getBytes("UTF-8"); + byte[] data = "some-content".getBytes(StandardCharsets.UTF_8); archiveEntry.setSize(data.length); tos.putArchiveEntry(archiveEntry); tos.write(data); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java index c6d2ff056a746..72186aa9601bb 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java @@ -28,7 +28,7 @@ import java.util.EnumSet; import java.util.Iterator; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.Options.CreateOpts; @@ -42,7 +42,7 @@ public class TestFilterFileSystem { - private static final Log LOG = FileSystem.LOG; + private static final Logger LOG = FileSystem.LOG; private static final Configuration conf = new Configuration(); @BeforeClass diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java index 5ed743f4c3ae9..e4d1c0bdad481 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFs.java @@ -23,14 +23,14 @@ import java.net.URI; import java.util.Iterator; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.viewfs.ConfigUtil; import org.junit.Test; public class TestFilterFs { - private static final Log LOG = FileSystem.LOG; + private static final Logger LOG = FileSystem.LOG; public static class DontCheck { public void checkScheme(URI uri, String supportedScheme) { } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java index 6415df6310fc2..0924138bd549a 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java @@ -30,6 +30,7 @@ import java.io.File; import java.io.IOException; import java.net.URI; +import java.nio.charset.StandardCharsets; import java.util.HashSet; import java.util.Set; @@ -117,7 +118,7 @@ private void writeVersionToMasterIndexImpl(int version, Path masterIndexPath) th final FSDataOutputStream fsdos = localFileSystem.create(masterIndexPath); try { String versionString = version + "\n"; - fsdos.write(versionString.getBytes("UTF-8")); + fsdos.write(versionString.getBytes(StandardCharsets.UTF_8)); fsdos.flush(); } finally { fsdos.close(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index bda9d85832c72..317727b8c3bfe 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.fs; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.permission.FsPermission; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java index c395afdb3779b..7420b47a98495 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/AbstractContractMultipartUploaderTest.java @@ -22,13 +22,13 @@ import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.util.HashMap; import java.util.Map; import java.util.Random; import java.util.concurrent.CompletableFuture; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.assertj.core.api.Assertions; import org.junit.Assume; import org.junit.Test; @@ -596,8 +596,8 @@ public void testMultipartUploadAbort() throws Exception { abortUpload(uploadHandle, file); String contents = "ThisIsPart49\n"; - int len = contents.getBytes(Charsets.UTF_8).length; - InputStream is = IOUtils.toInputStream(contents, "UTF-8"); + int len = contents.getBytes(StandardCharsets.UTF_8).length; + InputStream is = IOUtils.toInputStream(contents, StandardCharsets.UTF_8); intercept(IOException.class, () -> awaitFuture( @@ -624,7 +624,7 @@ public void testMultipartUploadAbort() throws Exception { public void testAbortUnknownUpload() throws Exception { Path file = methodPath(); ByteBuffer byteBuffer = ByteBuffer.wrap( - "invalid-handle".getBytes(Charsets.UTF_8)); + "invalid-handle".getBytes(StandardCharsets.UTF_8)); intercept(FileNotFoundException.class, () -> abortUpload(BBUploadHandle.from(byteBuffer), file)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java index b61abddd43426..bbccbfbc16eef 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/contract/ContractTestUtils.java @@ -45,6 +45,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -806,7 +807,7 @@ public static String readUTF8(FileSystem fs, try (FSDataInputStream in = fs.open(path)) { byte[] buf = new byte[length]; in.readFully(0, buf); - return new String(buf, "UTF-8"); + return new String(buf, StandardCharsets.UTF_8); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java index 8155d8e2b2ba1..050141d0654c0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/ftp/TestFTPFileSystem.java @@ -23,7 +23,7 @@ import java.nio.file.Files; import java.util.Comparator; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.net.ftp.FTP; import org.apache.commons.net.ftp.FTPClient; import org.apache.commons.net.ftp.FTPFile; @@ -235,4 +235,4 @@ public void testFTPSetTimeout() { ftp.setTimeout(client, conf); assertEquals(client.getControlKeepAliveTimeout(), timeout); } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java index 1ccc3400788d1..0b2c9603dbe4e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java @@ -47,7 +47,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java index adc5db87e7725..d09ed626be729 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemLocalFileSystem.java @@ -25,8 +25,8 @@ import java.io.IOException; import java.net.URI; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -51,8 +51,8 @@ */ public class TestViewFileSystemLocalFileSystem extends ViewFileSystemBaseTest { - private static final Log LOG = - LogFactory.getLog(TestViewFileSystemLocalFileSystem.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestViewFileSystemLocalFileSystem.class); @Override @Before diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java index ac7a1a6899425..d54992728e44e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemOverloadSchemeLocalFileSystem.java @@ -21,8 +21,8 @@ import java.net.URI; import java.net.URISyntaxException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -43,8 +43,8 @@ */ public class TestViewFileSystemOverloadSchemeLocalFileSystem { private static final String FILE = "file"; - private static final Log LOG = - LogFactory.getLog(TestViewFileSystemOverloadSchemeLocalFileSystem.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestViewFileSystemOverloadSchemeLocalFileSystem.class); private FileSystem fsTarget; private Configuration conf; private Path targetTestRoot; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java index 3c9713bf5fa1d..5dfe47c9753a7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/MiniZKFCCluster.java @@ -37,7 +37,7 @@ import org.apache.zookeeper.data.Stat; import org.apache.zookeeper.server.ZooKeeperServer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -371,4 +371,4 @@ protected List getAllOtherNodes() { return services; } } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java index 13f7eccd55aea..a027b4d682b9f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAAdmin.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.PrintStream; import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; @@ -30,7 +31,6 @@ import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -97,8 +97,8 @@ private Object runTool(String ... args) throws Exception { outBytes.reset(); LOG.info("Running: HAAdmin " + Joiner.on(" ").join(args)); int ret = tool.run(args); - errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8); - output = new String(outBytes.toByteArray(), Charsets.UTF_8); + errOutput = new String(errOutBytes.toByteArray(), StandardCharsets.UTF_8); + output = new String(outBytes.toByteArray(), StandardCharsets.UTF_8); LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output); return ret; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java index e201b0104660e..8681ab5671b7b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpnego.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.http; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; @@ -55,7 +55,7 @@ */ public class TestHttpServerWithSpnego { - static final Log LOG = LogFactory.getLog(TestHttpServerWithSpnego.class); + static final Logger LOG = LoggerFactory.getLogger(TestHttpServerWithSpnego.class); private static final String SECRET_STR = "secret"; private static final String HTTP_USER = "HTTP"; @@ -279,4 +279,4 @@ private HttpServer2.Builder getCommonBuilder() throws Exception { .addEndpoint(new URI("http://localhost:0")) .setFindPort(true); } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestIsActiveServlet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestIsActiveServlet.java index cfc7e359e94d4..22bea17a7c063 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestIsActiveServlet.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestIsActiveServlet.java @@ -27,6 +27,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; import static org.junit.Assert.assertEquals; import static org.mockito.ArgumentMatchers.anyInt; @@ -90,6 +91,6 @@ protected boolean isActive() { private String doGet() throws IOException { servlet.doGet(req, resp); - return new String(respOut.toByteArray(), "UTF-8"); + return new String(respOut.toByteArray(), StandardCharsets.UTF_8); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSecureIOUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSecureIOUtils.java index 91c0f1b4429d5..f9a5a30966419 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSecureIOUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestSecureIOUtils.java @@ -23,6 +23,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -57,7 +58,7 @@ public static void makeTestFile() throws Exception { for (File f : new File[] { testFilePathIs, testFilePathRaf, testFilePathFadis }) { FileOutputStream fos = new FileOutputStream(f); - fos.write("hello".getBytes("UTF-8")); + fos.write("hello".getBytes(StandardCharsets.UTF_8)); fos.close(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java index bd0105ba2829b..74656512b4a4f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java @@ -24,8 +24,6 @@ import java.nio.charset.CharacterCodingException; import java.nio.charset.StandardCharsets; import java.util.Random; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; - import org.apache.hadoop.constants.ConfigConstants; import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; import org.junit.Test; @@ -108,7 +106,7 @@ public void testCoding() throws Exception { ByteBuffer bb = Text.encode(before); byte[] utf8Text = bb.array(); - byte[] utf8Java = before.getBytes("UTF-8"); + byte[] utf8Java = before.getBytes(StandardCharsets.UTF_8); assertEquals(0, WritableComparator.compareBytes( utf8Text, 0, bb.limit(), utf8Java, 0, utf8Java.length)); @@ -390,7 +388,7 @@ public void testReadWriteOperations() { @Test public void testReadWithKnownLength() throws IOException { String line = "hello world"; - byte[] inputBytes = line.getBytes(Charsets.UTF_8); + byte[] inputBytes = line.getBytes(StandardCharsets.UTF_8); DataInputBuffer in = new DataInputBuffer(); Text text = new Text(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java index 2d60b5ecca184..6899d1cdcabf7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestUTF8.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.UTFDataFormatException; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.Random; import org.apache.hadoop.test.GenericTestUtils; @@ -110,7 +111,7 @@ public void testNullEncoding() throws Exception { DataOutputBuffer dob = new DataOutputBuffer(); new UTF8(s).write(dob); - assertEquals(s, new String(dob.getData(), 2, dob.getLength()-2, "UTF-8")); + assertEquals(s, new String(dob.getData(), 2, dob.getLength()-2, StandardCharsets.UTF_8)); } /** @@ -125,7 +126,7 @@ public void testNonBasicMultilingualPlane() throws Exception { String catFace = "\uD83D\uDC31"; // This encodes to 4 bytes in UTF-8: - byte[] encoded = catFace.getBytes("UTF-8"); + byte[] encoded = catFace.getBytes(StandardCharsets.UTF_8); assertEquals(4, encoded.length); assertEquals("f09f90b1", StringUtils.byteToHexString(encoded)); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java index 631991a03cf9c..a12be5a5512f9 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderBenchmark.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.io.erasurecode.rawcoder; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.io.erasurecode.ErasureCoderOptions; import org.apache.hadoop.util.StopWatch; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java deleted file mode 100644 index d41a58782d0ed..0000000000000 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLog4Json.java +++ /dev/null @@ -1,268 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.log; - -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.node.ContainerNode; -import org.junit.Test; -import static org.junit.Assert.*; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.util.Time; -import org.apache.log4j.Appender; -import org.apache.log4j.Category; -import org.apache.log4j.Level; -import org.apache.log4j.Logger; -import org.apache.log4j.WriterAppender; -import org.apache.log4j.spi.HierarchyEventListener; -import org.apache.log4j.spi.LoggerFactory; -import org.apache.log4j.spi.LoggerRepository; -import org.apache.log4j.spi.ThrowableInformation; - -import java.io.IOException; -import java.io.StringWriter; -import java.io.Writer; -import java.net.NoRouteToHostException; -import java.util.Enumeration; -import java.util.Vector; - -public class TestLog4Json { - - private static final Log LOG = LogFactory.getLog(TestLog4Json.class); - - @Test - public void testConstruction() throws Throwable { - Log4Json l4j = new Log4Json(); - String outcome = l4j.toJson(new StringWriter(), - "name", 0, "DEBUG", "thread1", - "hello, world", null).toString(); - println("testConstruction", outcome); - } - - @Test - public void testException() throws Throwable { - Exception e = - new NoRouteToHostException("that box caught fire 3 years ago"); - ThrowableInformation ti = new ThrowableInformation(e); - Log4Json l4j = new Log4Json(); - long timeStamp = Time.now(); - String outcome = l4j.toJson(new StringWriter(), - "testException", - timeStamp, - "INFO", - "quoted\"", - "new line\n and {}", - ti) - .toString(); - println("testException", outcome); - } - - @Test - public void testNestedException() throws Throwable { - Exception e = - new NoRouteToHostException("that box caught fire 3 years ago"); - Exception ioe = new IOException("Datacenter problems", e); - ThrowableInformation ti = new ThrowableInformation(ioe); - Log4Json l4j = new Log4Json(); - long timeStamp = Time.now(); - String outcome = l4j.toJson(new StringWriter(), - "testNestedException", - timeStamp, - "INFO", - "quoted\"", - "new line\n and {}", - ti) - .toString(); - println("testNestedException", outcome); - ContainerNode rootNode = Log4Json.parse(outcome); - assertEntryEquals(rootNode, Log4Json.LEVEL, "INFO"); - assertEntryEquals(rootNode, Log4Json.NAME, "testNestedException"); - assertEntryEquals(rootNode, Log4Json.TIME, timeStamp); - assertEntryEquals(rootNode, Log4Json.EXCEPTION_CLASS, - ioe.getClass().getName()); - JsonNode node = assertNodeContains(rootNode, Log4Json.STACK); - assertTrue("Not an array: " + node, node.isArray()); - node = assertNodeContains(rootNode, Log4Json.DATE); - assertTrue("Not a string: " + node, node.isTextual()); - //rather than try and make assertions about the format of the text - //message equalling another ISO date, this test asserts that the hypen - //and colon characters are in the string. - String dateText = node.textValue(); - assertTrue("No '-' in " + dateText, dateText.contains("-")); - assertTrue("No '-' in " + dateText, dateText.contains(":")); - - } - - - /** - * Create a log instance and and log to it - * @throws Throwable if it all goes wrong - */ - @Test - public void testLog() throws Throwable { - String message = "test message"; - Throwable throwable = null; - String json = logOut(message, throwable); - println("testLog", json); - } - - /** - * Create a log instance and and log to it - * @throws Throwable if it all goes wrong - */ - @Test - public void testLogExceptions() throws Throwable { - String message = "test message"; - Throwable inner = new IOException("Directory / not found"); - Throwable throwable = new IOException("startup failure", inner); - String json = logOut(message, throwable); - println("testLogExceptions", json); - } - - - void assertEntryEquals(ContainerNode rootNode, String key, String value) { - JsonNode node = assertNodeContains(rootNode, key); - assertEquals(value, node.textValue()); - } - - private JsonNode assertNodeContains(ContainerNode rootNode, String key) { - JsonNode node = rootNode.get(key); - if (node == null) { - fail("No entry of name \"" + key + "\" found in " + rootNode.toString()); - } - return node; - } - - void assertEntryEquals(ContainerNode rootNode, String key, long value) { - JsonNode node = assertNodeContains(rootNode, key); - assertEquals(value, node.numberValue()); - } - - /** - * Print out what's going on. The logging APIs aren't used and the text - * delimited for more details - * - * @param name name of operation - * @param text text to print - */ - private void println(String name, String text) { - System.out.println(name + ": #" + text + "#"); - } - - private String logOut(String message, Throwable throwable) { - StringWriter writer = new StringWriter(); - Logger logger = createLogger(writer); - logger.info(message, throwable); - //remove and close the appender - logger.removeAllAppenders(); - return writer.toString(); - } - - public Logger createLogger(Writer writer) { - TestLoggerRepository repo = new TestLoggerRepository(); - Logger logger = repo.getLogger("test"); - Log4Json layout = new Log4Json(); - WriterAppender appender = new WriterAppender(layout, writer); - logger.addAppender(appender); - return logger; - } - - /** - * This test logger avoids integrating with the main runtimes Logger hierarchy - * in ways the reader does not want to know. - */ - private static class TestLogger extends Logger { - private TestLogger(String name, LoggerRepository repo) { - super(name); - repository = repo; - setLevel(Level.INFO); - } - - } - - public static class TestLoggerRepository implements LoggerRepository { - @Override - public void addHierarchyEventListener(HierarchyEventListener listener) { - } - - @Override - public boolean isDisabled(int level) { - return false; - } - - @Override - public void setThreshold(Level level) { - } - - @Override - public void setThreshold(String val) { - } - - @Override - public void emitNoAppenderWarning(Category cat) { - } - - @Override - public Level getThreshold() { - return Level.ALL; - } - - @Override - public Logger getLogger(String name) { - return new TestLogger(name, this); - } - - @Override - public Logger getLogger(String name, LoggerFactory factory) { - return new TestLogger(name, this); - } - - @Override - public Logger getRootLogger() { - return new TestLogger("root", this); - } - - @Override - public Logger exists(String name) { - return null; - } - - @Override - public void shutdown() { - } - - @Override - public Enumeration getCurrentLoggers() { - return new Vector().elements(); - } - - @Override - public Enumeration getCurrentCategories() { - return new Vector().elements(); - } - - @Override - public void fireAddAppenderEvent(Category logger, Appender appender) { - } - - @Override - public void resetConfiguration() { - } - } -} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java index 3af70e95548ba..78d99073756be 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java @@ -22,9 +22,6 @@ import java.net.URI; import java.util.concurrent.Callable; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -70,8 +67,7 @@ public class TestLogLevel extends KerberosSecurityTestcase { private final String logName = TestLogLevel.class.getName(); private String clientPrincipal; private String serverPrincipal; - private final Log testlog = LogFactory.getLog(logName); - private final Logger log = ((Log4JLogger)testlog).getLogger(); + private final Logger log = Logger.getLogger(logName); private final static String PRINCIPAL = "loglevel.principal"; private final static String KEYTAB = "loglevel.keytab"; private static final String PREFIX = "hadoop.http.authentication."; @@ -436,4 +432,4 @@ public void testLogLevelByHttpsWithSpnego() throws Exception { "Unexpected end of file from server", e.getCause()); } } -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestStatsDMetrics.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestStatsDMetrics.java index 4cf4894ff8352..c1d5f8776a178 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestStatsDMetrics.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/impl/TestStatsDMetrics.java @@ -25,7 +25,7 @@ import java.io.IOException; import java.net.DatagramPacket; import java.net.DatagramSocket; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -75,7 +75,7 @@ public void testPutMetrics() throws IOException, InterruptedException { sock.receive(p); String result =new String(p.getData(), 0, p.getLength(), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); assertTrue( "Received data did not match data sent", result.equals("host.process.jvm.Context.foo1:1.25|c") || @@ -109,7 +109,7 @@ public void testPutMetrics2() throws IOException { sink.putMetrics(record); sock.receive(p); String result = - new String(p.getData(), 0, p.getLength(), Charset.forName("UTF-8")); + new String(p.getData(), 0, p.getLength(), StandardCharsets.UTF_8); assertTrue("Received data did not match data sent", result.equals("process.jvm.Context.foo1:1|c") || diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java index 67889405c1068..420c16bef577e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/metrics2/sink/TestFileSink.java @@ -23,6 +23,7 @@ import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.util.regex.Pattern; import org.apache.hadoop.io.IOUtils; @@ -113,7 +114,7 @@ public void testFileSink() throws IOException { is = new FileInputStream(outFile); baos = new ByteArrayOutputStream((int)outFile.length()); IOUtils.copyBytes(is, baos, 1024, true); - outFileContent = new String(baos.toByteArray(), "UTF-8"); + outFileContent = new String(baos.toByteArray(), StandardCharsets.UTF_8); } finally { IOUtils.cleanupWithLogger(null, baos, is); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java index 86870e1257119..d4aa665ae9f1b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java @@ -21,11 +21,11 @@ import static org.junit.Assert.assertEquals; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.io.Files; import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -41,7 +41,7 @@ public class TestTableMapping { public void testResolve() throws IOException { File mapFile = File.createTempFile(getClass().getSimpleName() + ".testResolve", ".txt"); - Files.asCharSink(mapFile, Charsets.UTF_8).write( + Files.asCharSink(mapFile, StandardCharsets.UTF_8).write( hostName1 + " /rack1\n" + hostName2 + "\t/rack2\n"); mapFile.deleteOnExit(); TableMapping mapping = new TableMapping(); @@ -64,7 +64,7 @@ public void testResolve() throws IOException { public void testTableCaching() throws IOException { File mapFile = File.createTempFile(getClass().getSimpleName() + ".testTableCaching", ".txt"); - Files.asCharSink(mapFile, Charsets.UTF_8).write( + Files.asCharSink(mapFile, StandardCharsets.UTF_8).write( hostName1 + " /rack1\n" + hostName2 + "\t/rack2\n"); mapFile.deleteOnExit(); TableMapping mapping = new TableMapping(); @@ -128,7 +128,7 @@ public void testFileDoesNotExist() { public void testClearingCachedMappings() throws IOException { File mapFile = File.createTempFile(getClass().getSimpleName() + ".testClearingCachedMappings", ".txt"); - Files.asCharSink(mapFile, Charsets.UTF_8).write( + Files.asCharSink(mapFile, StandardCharsets.UTF_8).write( hostName1 + " /rack1\n" + hostName2 + "\t/rack2\n"); mapFile.deleteOnExit(); @@ -147,7 +147,7 @@ public void testClearingCachedMappings() throws IOException { assertEquals("/rack1", result.get(0)); assertEquals("/rack2", result.get(1)); - Files.asCharSink(mapFile, Charsets.UTF_8).write(""); + Files.asCharSink(mapFile, StandardCharsets.UTF_8).write(""); mapping.reloadCachedMappings(); @@ -166,7 +166,7 @@ public void testClearingCachedMappings() throws IOException { public void testBadFile() throws IOException { File mapFile = File.createTempFile(getClass().getSimpleName() + ".testBadFile", ".txt"); - Files.asCharSink(mapFile, Charsets.UTF_8).write("bad contents"); + Files.asCharSink(mapFile, StandardCharsets.UTF_8).write("bad contents"); mapFile.deleteOnExit(); TableMapping mapping = new TableMapping(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java index f082c3b0f607a..8a500cc2b9725 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/ssl/TestReloadingX509KeyManager.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.security.ssl; -import org.apache.hadoop.thirdparty.com.google.common.base.Supplier; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.test.GenericTestUtils; import org.junit.BeforeClass; @@ -32,6 +31,7 @@ import java.security.cert.X509Certificate; import java.util.Timer; import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; import static org.apache.hadoop.security.ssl.KeyStoreTestUtil.*; import static org.junit.Assert.assertEquals; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java index 6dc8c59b25e40..fb1e15bd1bd50 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestZKDelegationTokenSecretManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.security.token.delegation; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -382,7 +383,7 @@ public List getDefaultAcl() { .connectString(connectString) .retryPolicy(retryPolicy) .aclProvider(digestAclProvider) - .authorization("digest", userPass.getBytes("UTF-8")) + .authorization("digest", userPass.getBytes(StandardCharsets.UTF_8)) .build(); curatorFramework.start(); ZKDelegationTokenSecretManager.setCurator(curatorFramework); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java index 44f28003bc33d..d9c7c05773cf7 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java @@ -47,13 +47,11 @@ import java.util.function.Supplier; import java.util.regex.Pattern; -import org.apache.commons.io.IOUtils; import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.commons.io.IOUtils; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.StringUtils; @@ -117,29 +115,11 @@ public abstract class GenericTestUtils { public static final String ERROR_INVALID_ARGUMENT = "Total wait time should be greater than check interval time"; - /** - * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead - */ - @Deprecated - @SuppressWarnings("unchecked") - public static void disableLog(Log log) { - // We expect that commons-logging is a wrapper around Log4j. - disableLog((Log4JLogger) log); - } - @Deprecated public static Logger toLog4j(org.slf4j.Logger logger) { return LogManager.getLogger(logger.getName()); } - /** - * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead - */ - @Deprecated - public static void disableLog(Log4JLogger log) { - log.getLogger().setLevel(Level.OFF); - } - /** * @deprecated use {@link #disableLog(org.slf4j.Logger)} instead */ @@ -152,45 +132,6 @@ public static void disableLog(org.slf4j.Logger logger) { disableLog(toLog4j(logger)); } - /** - * @deprecated - * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead - */ - @Deprecated - @SuppressWarnings("unchecked") - public static void setLogLevel(Log log, Level level) { - // We expect that commons-logging is a wrapper around Log4j. - setLogLevel((Log4JLogger) log, level); - } - - /** - * A helper used in log4j2 migration to accept legacy - * org.apache.commons.logging apis. - *

- * And will be removed after migration. - * - * @param log a log - * @param level level to be set - */ - @Deprecated - public static void setLogLevel(Log log, org.slf4j.event.Level level) { - setLogLevel(log, Level.toLevel(level.toString())); - } - - /** - * @deprecated - * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead - */ - @Deprecated - public static void setLogLevel(Log4JLogger log, Level level) { - log.getLogger().setLevel(level); - } - - /** - * @deprecated - * use {@link #setLogLevel(org.slf4j.Logger, org.slf4j.event.Level)} instead - */ - @Deprecated public static void setLogLevel(Logger logger, Level level) { logger.setLevel(level); } @@ -535,15 +476,17 @@ public static class LogCapturer { private WriterAppender appender; private Logger logger; - public static LogCapturer captureLogs(Log l) { - Logger logger = ((Log4JLogger)l).getLogger(); - return new LogCapturer(logger); - } - public static LogCapturer captureLogs(org.slf4j.Logger logger) { + if (logger.getName().equals("root")) { + return new LogCapturer(org.apache.log4j.Logger.getRootLogger()); + } return new LogCapturer(toLog4j(logger)); } + public static LogCapturer captureLogs(Logger logger) { + return new LogCapturer(logger); + } + private LogCapturer(Logger logger) { this.logger = logger; Appender defaultAppender = Logger.getRootLogger().getAppender("stdout"); @@ -1069,4 +1012,4 @@ public static String filenameOfIndex(final int i) { EXECUTOR_THREAD_COUNT * 2, 30, TimeUnit.SECONDS, "test-operations"); -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java index da6abc5095351..b968fecf4805c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/LambdaTestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.test; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.junit.Assert; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java index eb8d938994735..da440b1d8cdc3 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/MetricsAsserts.java @@ -18,7 +18,7 @@ package org.apache.hadoop.test; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.junit.Assert; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java index 85d95738b5ef5..0cee4d2671334 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java @@ -13,7 +13,7 @@ */ package org.apache.hadoop.util; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java index 529887b297553..716dfe0c36d56 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestClasspath.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.io.PrintStream; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.jar.Attributes; import java.util.jar.JarFile; import java.util.jar.Manifest; @@ -46,7 +47,7 @@ public class TestClasspath { .class); private static final File TEST_DIR = GenericTestUtils.getTestDir( "TestClasspath"); - private static final Charset UTF8 = Charset.forName("UTF-8"); + private static final Charset UTF8 = StandardCharsets.UTF_8; static { ExitUtil.disableSystemExit(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java index 85d649cc0750a..e47c3e57ba76e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestGenericsUtil.java @@ -140,7 +140,7 @@ public void testGetClass() { @Test public void testIsLog4jLogger() throws Exception { - assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger(null)); + assertFalse("False if clazz is null", GenericsUtil.isLog4jLogger((Class) null)); assertTrue("The implementation is Log4j", GenericsUtil.isLog4jLogger(TestGenericsUtil.class)); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java index e58fb3bffde2b..993e67361920b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java @@ -18,7 +18,7 @@ package org.apache.hadoop.util; -import org.apache.commons.logging.LogFactory; +import org.slf4j.LoggerFactory; import org.apache.hadoop.test.GenericTestUtils; import org.junit.Assert; import org.junit.Test; @@ -43,7 +43,7 @@ public class TestJarFinder { public void testJar() throws Exception { //picking a class that is for sure in a JAR in the classpath - String jar = JarFinder.getJar(LogFactory.class); + String jar = JarFinder.getJar(LoggerFactory.class); Assert.assertTrue(new File(jar).exists()); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java index 7fb90a15e3ad5..bf3e58793bb24 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestPureJavaCrc32.java @@ -21,6 +21,7 @@ import java.io.FileOutputStream; import java.io.PrintStream; import java.lang.reflect.Constructor; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Properties; @@ -49,7 +50,7 @@ public void testCorrectness() throws Exception { checkOnBytes(new byte[] {40, 60, 97, -70}, false); - checkOnBytes("hello world!".getBytes("UTF-8"), false); + checkOnBytes("hello world!".getBytes(StandardCharsets.UTF_8), false); for (int i = 0; i < 10000; i++) { byte randomBytes[] = new byte[new Random().nextInt(2048)]; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java index b61cebc0a62b7..9f9b3eca5061e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestSignalLogger.java @@ -28,13 +28,13 @@ public class TestSignalLogger { public static final Logger LOG = LoggerFactory.getLogger(TestSignalLogger.class); - + @Test(timeout=60000) public void testInstall() throws Exception { Assume.assumeTrue(SystemUtils.IS_OS_UNIX); - SignalLogger.INSTANCE.register(LogAdapter.create(LOG)); + SignalLogger.INSTANCE.register(LOG); try { - SignalLogger.INSTANCE.register(LogAdapter.create(LOG)); + SignalLogger.INSTANCE.register(LOG); Assert.fail("expected IllegalStateException from double registration"); } catch (IllegalStateException e) { // fall through diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java index 93790eb1350a7..d12fff2732cd6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java @@ -22,6 +22,7 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.List; import org.apache.hadoop.test.GenericTestUtils; @@ -31,7 +32,6 @@ import org.apache.zookeeper.data.ACL; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.io.Files; public class TestZKUtil { @@ -131,7 +131,7 @@ public void testConfIndirection() throws IOException { assertEquals("x", ZKUtil.resolveConfIndirection("x")); TEST_FILE.getParentFile().mkdirs(); - Files.asCharSink(TEST_FILE, Charsets.UTF_8).write("hello world"); + Files.asCharSink(TEST_FILE, StandardCharsets.UTF_8).write("hello world"); assertEquals("hello world", ZKUtil.resolveConfIndirection( "@" + TEST_FILE.getAbsolutePath())); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java index fd15a0c2b1bf4..86b1e6b5b4770 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/curator/TestZKCuratorManager.java @@ -22,6 +22,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.List; @@ -111,7 +112,7 @@ public void testGetStringData() throws Exception { curator.create(node1); assertNull(curator.getStringData(node1)); - byte[] setData = "setData".getBytes("UTF-8"); + byte[] setData = "setData".getBytes(StandardCharsets.UTF_8); curator.setData(node1, setData, -1); assertEquals("setData", curator.getStringData(node1)); @@ -130,7 +131,7 @@ public void testTransaction() throws Exception { String fencingNodePath = "/fencing"; String node1 = "/node1"; String node2 = "/node2"; - byte[] testData = "testData".getBytes("UTF-8"); + byte[] testData = "testData".getBytes(StandardCharsets.UTF_8); assertFalse(curator.exists(fencingNodePath)); assertFalse(curator.exists(node1)); assertFalse(curator.exists(node2)); @@ -148,7 +149,7 @@ public void testTransaction() throws Exception { assertTrue(Arrays.equals(testData, curator.getData(node1))); assertTrue(Arrays.equals(testData, curator.getData(node2))); - byte[] setData = "setData".getBytes("UTF-8"); + byte[] setData = "setData".getBytes(StandardCharsets.UTF_8); txn = curator.createTransaction(zkAcl, fencingNodePath); txn.setData(node1, setData, -1); txn.delete(node2); diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java index be0f8d3fbc5d4..d719f5516b70c 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMS.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.crypto.key.kms.server; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.KMSUtil; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java index 596140dbc160c..14c2ae907b14d 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSAudit.java @@ -21,7 +21,7 @@ import static org.apache.hadoop.crypto.key.kms.server.KMSAuditLogger.OpStatus; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.kms.server.KMSACLs.Type; import org.apache.hadoop.crypto.key.kms.server.KeyAuthorizationKeyProvider.KeyOpType; diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java index b9b8d9cee6673..e29ce4ba5f0f5 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSJSONWriter.java @@ -33,7 +33,7 @@ import java.io.Writer; import java.lang.annotation.Annotation; import java.lang.reflect.Type; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; @@ -64,8 +64,7 @@ public void writeTo(Object obj, Class aClass, Type type, Annotation[] annotations, MediaType mediaType, MultivaluedMap stringObjectMultivaluedMap, OutputStream outputStream) throws IOException, WebApplicationException { - Writer writer = new OutputStreamWriter(outputStream, Charset - .forName("UTF-8")); + Writer writer = new OutputStreamWriter(outputStream, StandardCharsets.UTF_8); JsonSerialization.writer().writeValue(writer, obj); } diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java index a92dd1045c01a..34fa0d42f67a2 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java @@ -26,7 +26,7 @@ import com.codahale.metrics.JmxReporter; import com.codahale.metrics.Meter; import com.codahale.metrics.MetricRegistry; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.CachingKeyProvider; diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java index fe3207b31c27a..b4427529a7b2f 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KeyAuthorizationKeyProvider.java @@ -32,7 +32,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AuthorizationException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; diff --git a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java index bc4bbc3df70bd..0fc135faea6fa 100644 --- a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java +++ b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/MiniKMS.java @@ -26,7 +26,7 @@ import java.io.Writer; import java.net.URL; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java index 97b8a444ac28a..313872e5f17fe 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java @@ -32,7 +32,7 @@ import org.apache.hadoop.util.LightWeightGSet.LinkedElement; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java index 985629e0285cb..978ed02d102ad 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcReply.java @@ -20,7 +20,7 @@ import org.apache.hadoop.oncrpc.security.RpcAuthInfo; import org.apache.hadoop.oncrpc.security.Verifier; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Represents an RPC message of type RPC reply as defined in RFC 1831 diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java index 38d82967e689f..1bf860d49b45d 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java @@ -24,7 +24,7 @@ import io.netty.buffer.Unpooled; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Utility class for building XDR messages based on RFC 4506. diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java index f62dc6bd223b7..71935d2616d4b 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java @@ -19,7 +19,7 @@ import org.apache.hadoop.oncrpc.XDR; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** Credential used by AUTH_NONE */ public class CredentialsNone extends Credentials { diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java index 005fe838a3149..555b2e0701229 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java @@ -19,7 +19,7 @@ import org.apache.hadoop.oncrpc.XDR; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** Verifier used by AUTH_NONE. */ public class VerifierNone extends Verifier { diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java index a1349f3e26f00..db7270bb92ed4 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/cli/RegistryCli.java @@ -27,7 +27,7 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.CommandLineParser; import org.apache.commons.cli.GnuParser; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java index 8a26b4b450def..23467ebd15c8c 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/DNSOperationsFactory.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.client.api; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.registry.server.dns.RegistryDNS; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java index 786bec040b22d..43b54f5625e59 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryOperationsFactory.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.client.api; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.ServiceStateException; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java index 04aabfc635bed..dafd3e2d00b4a 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/JsonSerDeser.java @@ -28,6 +28,7 @@ import java.io.EOFException; import java.io.IOException; +import java.nio.charset.StandardCharsets; /** * Support for marshalling objects to and from JSON. @@ -47,7 +48,6 @@ @InterfaceStability.Evolving public class JsonSerDeser extends JsonSerialization { - private static final String UTF_8 = "UTF-8"; public static final String E_NO_DATA = "No data at path"; public static final String E_DATA_TOO_SHORT = "Data at path too short"; public static final String E_MISSING_MARKER_STRING = @@ -102,7 +102,7 @@ public T fromBytes(String path, byte[] bytes, String marker) if (StringUtils.isNotEmpty(marker) && len < marker.length()) { throw new NoRecordException(path, E_DATA_TOO_SHORT); } - String json = new String(bytes, 0, len, UTF_8); + String json = new String(bytes, 0, len, StandardCharsets.UTF_8); if (StringUtils.isNotEmpty(marker) && !json.contains(marker)) { throw new NoRecordException(path, E_MISSING_MARKER_STRING + marker); diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java index 09df00d083c3e..9fa4b8d84d85e 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryPathUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.client.binding; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.PathNotFoundException; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java index 9a4369cdda385..1ec6a05868361 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryTypeUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.client.binding; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.registry.client.exceptions.InvalidRecordException; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java index 241324800b948..cff70a613783a 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/binding/RegistryUtils.java @@ -19,7 +19,7 @@ package org.apache.hadoop.registry.client.binding; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java index cff6a7d8a9ada..d5e3bde0cbed3 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/FSRegistryOperationsService.java @@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Filesystem-based implementation of RegistryOperations. This class relies diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java index 7bc21122c0c0e..a3095e3a3a2e9 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/CuratorService.java @@ -22,7 +22,7 @@ import org.apache.curator.framework.recipes.cache.CuratorCacheBridge; import org.apache.curator.framework.recipes.cache.CuratorCacheListener; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.curator.ensemble.EnsembleProvider; import org.apache.curator.ensemble.fixed.FixedEnsembleProvider; import org.apache.curator.framework.CuratorFramework; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java index e46a016baa07d..49ea16bba7d62 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistryOperationsService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.client.impl.zk; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.registry.client.api.BindFlags; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java index 045a1a99188d5..018cca2f5ddba 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java @@ -19,7 +19,7 @@ package org.apache.hadoop.registry.client.impl.zk; import org.apache.hadoop.security.authentication.util.JaasConfiguration; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; import org.apache.hadoop.util.Lists; import org.apache.commons.lang3.StringUtils; @@ -43,6 +43,7 @@ import javax.security.auth.login.AppConfigurationEntry; import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Collections; @@ -296,7 +297,7 @@ private void initSecurity() throws IOException { digestAuthUser = id; digestAuthPassword = pass; String authPair = id + ":" + pass; - digestAuthData = authPair.getBytes("UTF-8"); + digestAuthData = authPair.getBytes(StandardCharsets.UTF_8); if (LOG.isDebugEnabled()) { LOG.debug("Auth is Digest ACL: {}", aclToString(acl)); } diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java index 43503e8fc7a7a..e1979457e16be 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/ZKPathDumper.java @@ -19,7 +19,7 @@ package org.apache.hadoop.registry.client.impl.zk; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.api.GetChildrenBuilder; import org.apache.zookeeper.data.ACL; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java index b92b93df7be55..f81feadd640e5 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/Endpoint.java @@ -20,7 +20,7 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.registry.client.binding.JsonSerDeser; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java index 1a85436ed17ef..467b61e7d77ec 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/client/types/ServiceRecord.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonAnyGetter; import com.fasterxml.jackson.annotation.JsonAnySetter; import com.fasterxml.jackson.annotation.JsonInclude; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java index f14c323971e44..e99c49f7dc6a8 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java @@ -80,6 +80,7 @@ import java.nio.channels.DatagramChannel; import java.nio.channels.ServerSocketChannel; import java.nio.channels.SocketChannel; +import java.nio.charset.StandardCharsets; import java.security.KeyFactory; import java.security.NoSuchAlgorithmException; import java.security.PrivateKey; @@ -628,7 +629,7 @@ private void enableDNSSECIfNecessary(Zone zone, Configuration conf, Name zoneName = zone.getOrigin(); DNSKEYRecord dnskeyRecord = dnsKeyRecs.get(zoneName); if (dnskeyRecord == null) { - byte[] key = Base64.decodeBase64(publicKey.getBytes("UTF-8")); + byte[] key = Base64.decodeBase64(publicKey.getBytes(StandardCharsets.UTF_8)); dnskeyRecord = new DNSKEYRecord(zoneName, DClass.IN, ttl, DNSKEYRecord.Flags.ZONE_KEY, diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java index 8d0a38cfd47f9..446d6ea2ff06e 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNSServer.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.registry.server.dns; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.registry.client.api.DNSOperationsFactory; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java index 8d395e4c5c763..9b2d0d404309c 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/integration/SelectByYarnPersistence.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.server.integration; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.registry.client.types.RegistryPathStatus; import org.apache.hadoop.registry.client.types.ServiceRecord; diff --git a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java index 994a2565c309a..dc359e889c943 100644 --- a/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java +++ b/hadoop-common-project/hadoop-registry/src/main/java/org/apache/hadoop/registry/server/services/MicroZookeeperService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.registry.server.services; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.curator.ensemble.fixed.FixedEnsembleProvider; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml index aa09c610c6fe8..9bfa094dcc034 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml @@ -213,6 +213,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop.hdfs.protocol.proto + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided VisibleForTesting rather than the one provided by Guava + + org.apache.hadoop.thirdparty.com.google.common.base.Preconditions + com.google.common.base.Preconditions + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java index 7b4e03566c51d..2377baa4fec25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/ClientContext.java @@ -43,7 +43,7 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 7a9ab0992159e..7eb9ae0a0c496 100755 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -185,6 +185,7 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DataChecksum.Type; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; import org.apache.hadoop.tracing.TraceScope; @@ -194,8 +195,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; /******************************************************** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java index 1208bdaef07f2..558867ce37dc4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java @@ -73,7 +73,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /**************************************************************** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java index f310709b6fad0..0297f192f9570 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.CreateFlag; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index 2b3c67683c730..f867488320781 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.primitives.SignedBytes; import java.net.URISyntaxException; @@ -143,7 +143,10 @@ public static byte[][] bytes2byteArray(byte[] bytes) { */ public static byte[][] bytes2byteArray(byte[] bytes, int len, byte separator) { - Preconditions.checkPositionIndex(len, bytes.length); + if (len < 0 || len > bytes.length) { + throw new IndexOutOfBoundsException( + "Incorrect index [len, size] [" + len + ", " + bytes.length + "]"); + } if (len == 0) { return new byte[][]{null}; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index acbe8123e2ecb..42f2cb3d69cd4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -19,8 +19,9 @@ package org.apache.hadoop.hdfs; import org.apache.hadoop.ipc.RpcNoSuchMethodException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.util.Lists; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.collections.list.TreeList; import org.apache.hadoop.fs.LeaseRecoverable; import org.apache.hadoop.fs.SafeMode; @@ -118,6 +119,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.util.ChunkedArrayList; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java index 1d8c8e632a38b..aa9577330cfae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java @@ -36,7 +36,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java index abcc7e011ade5..a26a518a8395d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PeerCache.java @@ -24,7 +24,7 @@ import java.util.Map.Entry; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PositionStripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PositionStripeReader.java index efadedb8f082f..12328ebb0f01b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PositionStripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/PositionStripeReader.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.util.StripedBlockUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java index bff13bfdc8957..730307b4434cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.util.StripedBlockUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java index 78e8b7ecde54a..ab9d138ff1d8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripeReader.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java index f3af0b580d804..ea9df7cab1c0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/XAttrHelper.java @@ -26,8 +26,10 @@ import org.apache.hadoop.fs.XAttr.NameSpace; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.StringUtils; + +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java index 5a615bbd62de4..af1e92e2fb449 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * The Hdfs implementation of {@link FSDataInputStream}. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java index 8af3417ca9fdf..cc7e7cd21b090 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataOutputStream.java @@ -28,7 +28,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSOutputStream; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * The Hdfs implementation of {@link FSDataOutputStream}. diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java index a7c49128ba524..b2cf5348cfa2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderFactory.java @@ -75,7 +75,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java index f013be6c3a6c0..50b1d61282d23 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/BlockReaderLocal.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.client.impl; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.ReadOption; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java index e133f985da64a..c72ee3b93628d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/impl/DfsClientConf.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.client.impl; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java index 3e5a4431372ef..0661a40bbb304 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java @@ -25,7 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.DFSUtilClient; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java index b3354344d7237..20ecfd4e1ec83 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java @@ -26,7 +26,7 @@ import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.ipc.RemoteException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.tracing.TraceScope; import org.apache.hadoop.tracing.Tracer; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java index 02c4f9a1f21b1..dc84bbbeae197 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicy.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java index 48b581dfe6acb..79f10429a41d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ErasureCodingPolicyInfo.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPartialListing.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPartialListing.java index d96c7892b76ea..f64ca17a23b7b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPartialListing.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsPartialListing.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.protocol; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.ipc.RemoteException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java index f8865b4247451..d7ee5b0698649 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java @@ -22,7 +22,7 @@ import java.util.Comparator; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java index 3eff5b0e61b04..18ea736ac12f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ReencryptionStatus.java @@ -18,12 +18,12 @@ package org.apache.hadoop.hdfs.protocol; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto; import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus.State; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReportListing.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReportListing.java index 74329bc1e8a12..3fc13dec298d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReportListing.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/SnapshotDiffReportListing.java @@ -20,7 +20,7 @@ import java.util.Collections; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.DFSUtilClient; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java index 10884f27f90a2..858dff6204104 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ZoneReencryptionStatus.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.protocol; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java index cc958e35df116..ef1a3658305b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketHeader.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.PacketHeaderProto; import org.apache.hadoop.hdfs.util.ByteBufferOutputStream; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.primitives.Shorts; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java index dc6d590ce630d..55ca2b4e4e4c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java @@ -28,7 +28,7 @@ import org.apache.hadoop.util.DirectBufferPool; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java index a5171885de151..c174dd53bdbaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/DataTransferSaslUtil.java @@ -30,6 +30,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.InetAddress; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.List; import java.util.Map; @@ -59,7 +60,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; @@ -147,7 +147,7 @@ public static Map createSaslPropertiesForEncryption( * @return key encoded as SASL password */ public static char[] encryptionKeyToPassword(byte[] encryptionKey) { - return new String(Base64.encodeBase64(encryptionKey, false), Charsets.UTF_8) + return new String(Base64.encodeBase64(encryptionKey, false), StandardCharsets.UTF_8) .toCharArray(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java index b2ad60e41ceff..5fb75121abdd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferClient.java @@ -30,6 +30,7 @@ import java.io.OutputStream; import java.net.InetAddress; import java.net.Socket; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; @@ -64,7 +65,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.util.Lists; /** @@ -347,7 +347,7 @@ private static String getUserNameFromEncryptionKey( return encryptionKey.keyId + NAME_DELIMITER + encryptionKey.blockPoolId + NAME_DELIMITER + new String(Base64.encodeBase64(encryptionKey.nonce, false), - Charsets.UTF_8); + StandardCharsets.UTF_8); } /** @@ -450,7 +450,7 @@ private IOStreamPair getSaslStreams(InetAddress addr, private void updateToken(Token accessToken, SecretKey secretKey, Map saslProps) throws IOException { - byte[] newSecret = saslProps.get(Sasl.QOP).getBytes(Charsets.UTF_8); + byte[] newSecret = saslProps.get(Sasl.QOP).getBytes(StandardCharsets.UTF_8); BlockTokenIdentifier bkid = accessToken.decodeIdentifier(); bkid.setHandshakeMsg(newSecret); byte[] bkidBytes = bkid.getBytes(); @@ -471,7 +471,7 @@ private void updateToken(Token accessToken, */ private static String buildUserName(Token blockToken) { return new String(Base64.encodeBase64(blockToken.getIdentifier(), false), - Charsets.UTF_8); + StandardCharsets.UTF_8); } /** @@ -483,7 +483,7 @@ private static String buildUserName(Token blockToken) { */ private char[] buildClientPassword(Token blockToken) { return new String(Base64.encodeBase64(blockToken.getPassword(), false), - Charsets.UTF_8).toCharArray(); + StandardCharsets.UTF_8).toCharArray(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index 7b02acb352dcc..b7e5907183458 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -28,7 +28,7 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java index 130e8c1c9c728..d1ad5a2079f5f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java @@ -22,7 +22,7 @@ import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java index 7e3954c562a58..5a5da7326a4e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java @@ -23,7 +23,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.SerializationFeature; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java index 7bf768935d9d9..ae445efaf566b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShm.java @@ -26,7 +26,7 @@ import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.DomainSocketWatcher; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * DfsClientShm is a subclass of ShortCircuitShm which is used by the diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java index f165ab90a1d4a..2b9bf68e469c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DfsClientShmManager.java @@ -44,7 +44,7 @@ import org.apache.hadoop.net.unix.DomainSocketWatcher; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java index 59c9098099a6e..2d7acc3f5bafb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/DomainSocketFactory.java @@ -30,7 +30,7 @@ import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.util.PerformanceAdvisory; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java index eb4fd525651a1..df2a92c75c962 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java @@ -55,7 +55,7 @@ import org.apache.hadoop.util.Waitable; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java index fd58ef187525b..efc70b60ed54c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitReplica.java @@ -32,7 +32,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java index 1cb123bb58f3b..c6f7a50368152 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitShm.java @@ -39,7 +39,7 @@ import sun.misc.Unsafe; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java index 059280e494678..f076969c9b17b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/ByteArrayManager.java @@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.util.Time; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java index 91ab48fa9d471..c7724ce6db486 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileReader.java @@ -27,6 +27,7 @@ import java.io.InputStreamReader; import java.io.IOException; import java.io.Reader; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; @@ -84,7 +85,7 @@ private CombinedHostsFileReader() { if (hostFile.length() > 0) { try (Reader input = new InputStreamReader( - Files.newInputStream(hostFile.toPath()), "UTF-8")) { + Files.newInputStream(hostFile.toPath()), StandardCharsets.UTF_8)) { allDNs = objectMapper.readValue(input, DatanodeAdminProperties[].class); } catch (JsonMappingException jme) { // The old format doesn't have json top-level token to enclose @@ -103,7 +104,7 @@ private CombinedHostsFileReader() { List all = new ArrayList<>(); try (Reader input = new InputStreamReader(Files.newInputStream(Paths.get(hostsFilePath)), - "UTF-8")) { + StandardCharsets.UTF_8)) { Iterator iterator = objectReader.readValues(jsonFactory.createParser(input)); while (iterator.hasNext()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java index 7897dc1ebf6e2..de4c12d556cc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/CombinedHostsFileWriter.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.io.OutputStreamWriter; import java.io.Writer; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.Set; @@ -62,7 +63,7 @@ public static void writeFile(final String hostsFile, try (Writer output = new OutputStreamWriter(Files.newOutputStream(Paths.get(hostsFile)), - "UTF-8")) { + StandardCharsets.UTF_8)) { objectMapper.writeValue(output, allDNs); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java index 0b4f5fe977e38..25ee8d30c9290 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.DFSStripedOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index 004dc770fdd55..fe3d288d188bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -19,8 +19,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileChecksum; @@ -56,6 +55,7 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.util.ChunkedArrayList; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; import java.io.ByteArrayInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 9dfcee20cb520..5adc3f7e327ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -129,15 +129,16 @@ import org.apache.hadoop.security.token.DelegationTokenIssuer; import org.apache.hadoop.util.JsonSerialization; import org.apache.hadoop.util.KMSUtil; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.Preconditions; + +import static org.apache.hadoop.fs.impl.PathCapabilitiesSupport.validatePathCapabilityArgs; /** A FileSystem for HDFS over the web. */ public class WebHdfsFileSystem extends FileSystem @@ -834,7 +835,7 @@ private T runWithRetry() throws IOException { newIoe.initCause(ioe.getCause()); newIoe.setStackTrace(ioe.getStackTrace()); ioe = newIoe; - } catch (NoSuchMethodException | SecurityException + } catch (NoSuchMethodException | SecurityException | InstantiationException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { } @@ -1733,7 +1734,7 @@ public DirectoryEntries listStatusBatch(Path f, byte[] token) throws } DirectoryListing listing = new FsPathResponseRunner( GetOpParam.Op.LISTSTATUS_BATCH, - f, new StartAfterParam(new String(prevKey, Charsets.UTF_8))) { + f, new StartAfterParam(new String(prevKey, StandardCharsets.UTF_8))) { @Override DirectoryListing decodeResponse(Map json) throws IOException { return JsonUtilClient.toDirectoryListing(json); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java index 6ee8858df991e..5577bb6266486 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsContentLength.java @@ -22,6 +22,7 @@ import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; +import java.nio.charset.StandardCharsets; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -204,7 +205,7 @@ public String call() throws Exception { if (n <= 0) { break; } - sb.append(new String(buf, 0, n, "UTF-8")); + sb.append(new String(buf, 0, n, StandardCharsets.UTF_8)); } return sb.toString(); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index 9769b220c12c8..616ee88dcbab3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -342,6 +342,38 @@ ${basedir}/dev-support/findbugsExcludeFile.xml + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided VisibleForTesting rather than the one provided by Guava + + org.apache.hadoop.thirdparty.com.google.common.base.Preconditions + com.google.common.base.Preconditions + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 54fceff6dd082..ee9662e5334d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -23,7 +23,7 @@ import java.util.EnumSet; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonPathCapabilities; @@ -71,7 +71,7 @@ import org.json.simple.parser.JSONParser; import org.json.simple.parser.ParseException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; @@ -636,7 +636,7 @@ public FSDataOutputStream append(Path f, int bufferSize, /** * Truncate a file. - * + * * @param f the file to be truncated. * @param newLength The size the file is to be truncated to. * @@ -771,7 +771,7 @@ public DirectoryEntries listStatusBatch(Path f, byte[] token) throws Map params = new HashMap(); params.put(OP_PARAM, Operation.LISTSTATUS_BATCH.toString()); if (token != null) { - params.put(START_AFTER_PARAM, new String(token, Charsets.UTF_8)); + params.put(START_AFTER_PARAM, new String(token, StandardCharsets.UTF_8)); } HttpURLConnection conn = getConnection( Operation.LISTSTATUS_BATCH.getMethod(), @@ -786,7 +786,7 @@ public DirectoryEntries listStatusBatch(Path f, byte[] token) throws byte[] newToken = null; if (statuses.length > 0) { newToken = statuses[statuses.length - 1].getPath().getName().toString() - .getBytes(Charsets.UTF_8); + .getBytes(StandardCharsets.UTF_8); } // Parse the remainingEntries boolean into hasMore final long remainingEntries = (Long) listing.get(REMAINING_ENTRIES_JSON); @@ -1325,7 +1325,7 @@ public void setXAttr(Path f, String name, byte[] value, params.put(OP_PARAM, Operation.SETXATTR.toString()); params.put(XATTR_NAME_PARAM, name); if (value != null) { - params.put(XATTR_VALUE_PARAM, + params.put(XATTR_VALUE_PARAM, XAttrCodec.encodeValue(value, XAttrCodec.HEX)); } params.put(XATTR_SET_FLAG_PARAM, EnumSetParam.toString(flag)); @@ -1349,7 +1349,7 @@ public byte[] getXAttr(Path f, String name) throws IOException { } /** Convert xAttrs json to xAttrs map */ - private Map createXAttrMap(JSONArray jsonArray) + private Map createXAttrMap(JSONArray jsonArray) throws IOException { Map xAttrs = Maps.newHashMap(); for (Object obj : jsonArray) { @@ -1393,7 +1393,7 @@ public Map getXAttrs(Path f) throws IOException { @Override public Map getXAttrs(Path f, List names) throws IOException { - Preconditions.checkArgument(names != null && !names.isEmpty(), + Preconditions.checkArgument(names != null && !names.isEmpty(), "XAttr names cannot be null or empty."); Map params = new HashMap(); params.put(OP_PARAM, Operation.GETXATTRS.toString()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java index 16a95c31d2b1e..5f9f9b2147e4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java @@ -18,7 +18,6 @@ package org.apache.hadoop.fs.http.server; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -90,6 +89,7 @@ import java.io.IOException; import java.io.InputStream; import java.net.URI; +import java.nio.charset.StandardCharsets; import java.security.AccessControlException; import java.security.PrivilegedExceptionAction; import java.text.MessageFormat; @@ -404,7 +404,7 @@ public InputStream run() throws Exception { HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { - token = startAfter.getBytes(Charsets.UTF_8); + token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index 705a046ab73a9..903bc91d476f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -219,6 +219,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided VisibleForTesting rather than the one provided by Guava + + org.apache.hadoop.thirdparty.com.google.common.base.Preconditions + com.google.common.base.Preconditions + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java index 75c29d30314b7..dc1eb8746964e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/DFSClientCache.java @@ -29,7 +29,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.hdfs.DFSClient; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OffsetRange.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OffsetRange.java index 3995fa5566bb0..5fb1f1a01200b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OffsetRange.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OffsetRange.java @@ -19,7 +19,7 @@ import java.util.Comparator; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * OffsetRange is the range of read/write request. A single point (e.g.,[5,5]) diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index 0e07b1bfb62aa..8e4026926c83b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -58,7 +58,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java index feb681d27a811..70ae4b29e9f96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java @@ -31,7 +31,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 30a6a844e28a9..9b96a92cb4bf0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -25,7 +25,7 @@ import java.net.InetSocketAddress; import java.net.SocketAddress; import java.nio.ByteBuffer; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.EnumSet; import io.netty.buffer.ByteBuf; @@ -681,15 +681,15 @@ READLINK3Response readlink(XDR xdr, SecurityHandler securityHandler, } int rtmax = config.getInt(NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY, NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_DEFAULT); - if (rtmax < target.getBytes(Charset.forName("UTF-8")).length) { + if (rtmax < target.getBytes(StandardCharsets.UTF_8).length) { LOG.error("Link size: {} is larger than max transfer size: {}", - target.getBytes(Charset.forName("UTF-8")).length, rtmax); + target.getBytes(StandardCharsets.UTF_8).length, rtmax); return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr, new byte[0]); } return new READLINK3Response(Nfs3Status.NFS3_OK, postOpAttr, - target.getBytes(Charset.forName("UTF-8"))); + target.getBytes(StandardCharsets.UTF_8)); } catch (IOException e) { LOG.warn("Readlink error", e); @@ -1515,7 +1515,7 @@ private DirectoryListing listPaths(DFSClient dfsClient, String dirFileIdPath, } // This happens when startAfter was just deleted LOG.info("Cookie couldn't be found: {}, do listing from beginning", - new String(startAfter, Charset.forName("UTF-8"))); + new String(startAfter, StandardCharsets.UTF_8)); dlisting = dfsClient .listPaths(dirFileIdPath, HdfsFileStatus.EMPTY_NAME); } @@ -1628,7 +1628,7 @@ public READDIR3Response readdir(XDR xdr, SecurityHandler securityHandler, startAfter = HdfsFileStatus.EMPTY_NAME; } else { String inodeIdPath = Nfs3Utils.getFileIdPath(cookie); - startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8")); + startAfter = inodeIdPath.getBytes(StandardCharsets.UTF_8); } dlisting = listPaths(dfsClient, dirFileIdPath, startAfter); @@ -1800,7 +1800,7 @@ READDIRPLUS3Response readdirplus(XDR xdr, SecurityHandler securityHandler, startAfter = HdfsFileStatus.EMPTY_NAME; } else { String inodeIdPath = Nfs3Utils.getFileIdPath(cookie); - startAfter = inodeIdPath.getBytes(Charset.forName("UTF-8")); + startAfter = inodeIdPath.getBytes(StandardCharsets.UTF_8); } dlisting = listPaths(dfsClient, dirFileIdPath, startAfter); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java index 882e9cda86aa0..ed1856c9a851c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteCtx.java @@ -30,7 +30,7 @@ import org.apache.hadoop.nfs.nfs3.Nfs3Constant.WriteStableHow; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * WriteCtx saves the context of one write request, such as request, channel, diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml index 99ec4e80bf8c2..215c6b3b30055 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/pom.xml @@ -283,6 +283,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided VisibleForTesting rather than the one provided by Guava + + org.apache.hadoop.thirdparty.com.google.common.base.Preconditions + com.google.common.base.Preconditions + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java index 88e20649506dc..591ac5b3c37ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/resolver/order/AvailableSpaceResolver.java @@ -39,7 +39,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Order the destinations based on available space. This resolver uses a diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java index 441f01345c4e4..531ad28c84afa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterAdminServer.java @@ -29,7 +29,7 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java index dc7ebbf0d3475..3c0319cb4bde4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterUserMappings.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.federation.router; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; @@ -56,6 +57,9 @@ import java.net.URL; import java.net.URLDecoder; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; import java.util.concurrent.TimeUnit; diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 563f48536fee3..ed1489a35b0aa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -442,6 +442,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.maven.plugins + maven-enforcer-plugin + + + de.skuzzle.enforcer + restrict-imports-enforcer-rule + ${restrict-imports.enforcer.version} + + + + + banned-illegal-imports + process-sources + + enforce + + + + + true + Use hadoop-common provided VisibleForTesting rather than the one provided by Guava + + org.apache.hadoop.thirdparty.com.google.common.base.Preconditions + com.google.common.base.Preconditions + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 690adb8866a6f..6d28bae5ce39b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -75,6 +75,8 @@ import org.apache.hadoop.hdfs.server.namenode.INodesInPath; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -107,8 +109,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Sets; import org.apache.hadoop.thirdparty.protobuf.BlockingService; @@ -309,7 +310,11 @@ public static String byteArray2PathString(final byte[][] components, // specifically not using StringBuilder to more efficiently build // string w/o excessive byte[] copies and charset conversions. final int range = offset + length; - Preconditions.checkPositionIndexes(offset, range, components.length); + if (offset < 0 || range < offset || range > components.length) { + throw new IndexOutOfBoundsException( + "Incorrect index [offset, range, size] [" + + offset + ", " + range + ", " + components.length + "]"); + } if (length == 0) { return ""; } @@ -684,8 +689,9 @@ public static Map> getNNServiceRpcAddress } else { // Ensure that the internal service is indeed in the list of all available // nameservices. - Set availableNameServices = Sets.newHashSet(conf - .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES)); + Collection namespaces = conf + .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES); + Set availableNameServices = new HashSet<>(namespaces); for (String nsId : parentNameServices) { if (!availableNameServices.contains(nsId)) { throw new IOException("Unknown nameservice: " + nsId); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 58034ddad4705..e1c2fcec7f285 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -57,7 +57,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java index e2b28b6708555..cfff21f45b032 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.net; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java index 0d0d209d977f3..5f23a5b6b3e48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSTopologyNodeImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.net; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.net.InnerNode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java index eadbf030b766f..e40e6702e7f86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.datanode.Replica; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.protobuf.ByteString; import org.apache.hadoop.thirdparty.protobuf.CodedInputStream; import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java index aef009a03a1dc..d03ad3d4d7ce6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirective.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.protocol; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; - import java.util.Date; import org.apache.hadoop.classification.InterfaceAudience; @@ -28,7 +26,7 @@ import org.apache.hadoop.util.IntrusiveCollection; import org.apache.hadoop.util.IntrusiveCollection.Element; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Namenode class that tracks state related to a cached path. @@ -63,7 +61,7 @@ public CacheDirective(long id, String path, short replication, long expiryTime) { Preconditions.checkArgument(id > 0); this.id = id; - this.path = checkNotNull(path); + this.path = Preconditions.checkNotNull(path); Preconditions.checkArgument(replication > 0); this.replication = replication; this.expiryTime = expiryTime; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java index b1fdec6fe63dc..31aedfbf3ae3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LayoutFlags.java @@ -24,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; import org.apache.hadoop.util.Sets; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java index 37ab0a7ee4166..bf032bef5cca5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferServer.java @@ -28,6 +28,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; @@ -61,7 +62,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.util.Lists; /** @@ -326,7 +326,7 @@ private char[] buildServerPassword(String userName) throws IOException { byte[] tokenPassword = blockPoolTokenSecretManager.retrievePassword( identifier); return (new String(Base64.encodeBase64(tokenPassword, false), - Charsets.UTF_8)).toCharArray(); + StandardCharsets.UTF_8)).toCharArray(); } /** @@ -381,7 +381,7 @@ private IOStreamPair doSaslHandshake(Peer peer, OutputStream underlyingOut, if (secret != null || bpid != null) { // sanity check, if one is null, the other must also not be null assert(secret != null && bpid != null); - String qop = new String(secret, Charsets.UTF_8); + String qop = new String(secret, StandardCharsets.UTF_8); saslProps.put(Sasl.QOP, qop); } SaslParticipant sasl = SaslParticipant.createServerSaslParticipant( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 9244b9fef8571..89cc6c79a1214 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -61,7 +61,7 @@ import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.protobuf.RpcController; import org.apache.hadoop.thirdparty.protobuf.ServiceException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java index c1771a68c07cd..624e574024c0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java @@ -36,7 +36,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 5205ef787dd1e..8cf394b3ff61f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -58,7 +58,7 @@ import org.apache.hadoop.util.StopWatch; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java index e2a169aeb3c5f..c3ad872f30d0d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumCall.java @@ -30,7 +30,7 @@ import org.apache.hadoop.util.Timer; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumException.java index 1f60e3d468821..65b227ccea8d4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumException.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumException.java @@ -23,7 +23,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Exception thrown when too many exceptions occur while gathering diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index 856ffe521620a..e660e86addfe7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -60,7 +60,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.protobuf.TextFormat; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/SegmentRecoveryComparator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/SegmentRecoveryComparator.java index 4b2a518ac0dff..38006a499d97e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/SegmentRecoveryComparator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/SegmentRecoveryComparator.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; import org.apache.hadoop.thirdparty.com.google.common.primitives.Booleans; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 296d4af59a496..d8a1fe7bce1ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -25,6 +25,7 @@ import java.io.OutputStreamWriter; import java.net.URL; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.StandardCopyOption; import java.security.PrivilegedExceptionAction; @@ -72,8 +73,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.protobuf.TextFormat; @@ -92,9 +92,9 @@ public class Journal implements Closeable { private int curSegmentLayoutVersion = 0; private long nextTxId = HdfsServerConstants.INVALID_TXID; private long highestWrittenTxId = 0; - + private final String journalId; - + private final JNStorage storage; /** @@ -115,7 +115,7 @@ public class Journal implements Closeable { * request to resurface and confuse things. */ private long currentEpochIpcSerial = -1; - + /** * The epoch number of the last writer to actually write a transaction. * This is used to differentiate log segments after a crash at the very @@ -123,7 +123,7 @@ public class Journal implements Closeable { * test case. */ private PersistentLongFile lastWriterEpoch; - + /** * Lower-bound on the last committed transaction ID. This is not * depended upon for correctness, but acts as a sanity check @@ -131,11 +131,11 @@ public class Journal implements Closeable { * for clients reading in-progress logs. */ private BestEffortLongFile committedTxnId; - + public static final String LAST_PROMISED_FILENAME = "last-promised-epoch"; public static final String LAST_WRITER_EPOCH = "last-writer-epoch"; private static final String COMMITTED_TXID_FILENAME = "committed-txid"; - + private final FileJournalManager fjm; private JournaledEditsCache cache; @@ -164,13 +164,13 @@ public class Journal implements Closeable { this.journalId = journalId; refreshCachedData(); - + this.fjm = storage.getJournalManager(); this.cache = createCache(); this.metrics = JournalMetrics.create(this); - + EditLogFile latest = scanStorageForLatestEdits(); if (latest != null) { updateHighestWrittenTxId(latest.getLastTxId()); @@ -201,7 +201,7 @@ public boolean getTriedJournalSyncerStartedwithnsId() { */ private synchronized void refreshCachedData() { IOUtils.closeStream(committedTxnId); - + File currentDir = storage.getSingularStorageDir().getCurrentDir(); this.lastPromisedEpoch = new PersistentLongFile( new File(currentDir, LAST_PROMISED_FILENAME), 0); @@ -211,7 +211,7 @@ private synchronized void refreshCachedData() { new File(currentDir, COMMITTED_TXID_FILENAME), HdfsServerConstants.INVALID_TXID); } - + /** * Scan the local storage directory, and return the segment containing * the highest transaction. @@ -222,10 +222,10 @@ private synchronized EditLogFile scanStorageForLatestEdits() throws IOException if (!fjm.getStorageDirectory().getCurrentDir().exists()) { return null; } - + LOG.info("Scanning storage " + fjm); List files = fjm.getLogFiles(0); - + while (!files.isEmpty()) { EditLogFile latestLog = files.remove(files.size() - 1); latestLog.scanLog(Long.MAX_VALUE, false); @@ -240,7 +240,7 @@ private synchronized EditLogFile scanStorageForLatestEdits() throws IOException return latestLog; } } - + LOG.info("No files in " + fjm); return null; } @@ -268,11 +268,11 @@ public void close() throws IOException { IOUtils.closeStream(curSegment); storage.close(); } - + JNStorage getStorage() { return storage; } - + String getJournalId() { return journalId; } @@ -304,10 +304,10 @@ synchronized long getCurrentLagTxns() throws IOException { if (committed == 0) { return 0; } - + return Math.max(committed - highestWrittenTxId, 0L); } - + synchronized long getHighestWrittenTxId() { return highestWrittenTxId; } @@ -343,15 +343,15 @@ synchronized NewEpochResponseProto newEpoch( storage.checkConsistentNamespace(nsInfo); // Check that the new epoch being proposed is in fact newer than - // any other that we've promised. + // any other that we've promised. if (epoch <= getLastPromisedEpoch()) { throw new IOException("Proposed epoch " + epoch + " <= last promise " + getLastPromisedEpoch() + " ; journal id: " + journalId); } - + updateLastPromisedEpoch(epoch); abortCurSegment(); - + NewEpochResponseProto.Builder builder = NewEpochResponseProto.newBuilder(); @@ -360,7 +360,7 @@ synchronized NewEpochResponseProto newEpoch( if (latestFile != null) { builder.setLastSegmentTxId(latestFile.getFirstTxId()); } - + return builder.build(); } @@ -369,7 +369,7 @@ private void updateLastPromisedEpoch(long newEpoch) throws IOException { " to " + newEpoch + " for client " + Server.getRemoteIp() + " ; journal id: " + journalId); lastPromisedEpoch.set(newEpoch); - + // Since we have a new writer, reset the IPC serial - it will start // counting again from 0 for this writer. currentEpochIpcSerial = -1; @@ -379,7 +379,7 @@ private void abortCurSegment() throws IOException { if (curSegment == null) { return; } - + curSegment.abort(); curSegment = null; curSegmentTxId = HdfsServerConstants.INVALID_TXID; @@ -419,11 +419,11 @@ synchronized void journal(RequestInfo reqInfo, abortCurSegment(); throw e; } - + checkSync(nextTxId == firstTxnId, "Can't write txid " + firstTxnId + " expecting nextTxId=" + nextTxId + " ; journal id: " + journalId); - + long lastTxnId = firstTxnId + numTxns - 1; if (LOG.isTraceEnabled()) { LOG.trace("Writing txid " + firstTxnId + "-" + lastTxnId + @@ -438,7 +438,7 @@ synchronized void journal(RequestInfo reqInfo, // "catching up" with the rest. Hence we do not need to fsync. boolean isLagging = lastTxnId <= committedTxnId.get(); boolean shouldFsync = !isLagging; - + curSegment.writeRaw(records, 0, records.length); curSegment.setReadyToFlush(); StopWatch sw = new StopWatch(); @@ -462,11 +462,11 @@ synchronized void journal(RequestInfo reqInfo, // nodes. So, we are in "catch up" mode. This gets its own metric. metrics.batchesWrittenWhileLagging.incr(1); } - + metrics.batchesWritten.incr(1); metrics.bytesWritten.incr(records.length); metrics.txnsWritten.incr(numTxns); - + updateHighestWrittenTxId(lastTxnId); nextTxId = lastTxnId + 1; lastJournalTimestamp = Time.now(); @@ -475,7 +475,7 @@ synchronized void journal(RequestInfo reqInfo, public void heartbeat(RequestInfo reqInfo) throws IOException { checkRequest(reqInfo); } - + /** * Ensure that the given request is coming from the correct writer and in-order. * @param reqInfo the request info @@ -492,7 +492,7 @@ private synchronized void checkRequest(RequestInfo reqInfo) throws IOException { // the promise. updateLastPromisedEpoch(reqInfo.getEpoch()); } - + // Ensure that the IPCs are arriving in-order as expected. checkSync(reqInfo.getIpcSerialNumber() > currentEpochIpcSerial, "IPC serial %s from client %s was not higher than prior highest " + @@ -506,21 +506,21 @@ private synchronized void checkRequest(RequestInfo reqInfo) throws IOException { "Client trying to move committed txid backward from " + committedTxnId.get() + " to " + reqInfo.getCommittedTxId() + " ; journal id: " + journalId); - + committedTxnId.set(reqInfo.getCommittedTxId()); } } - + private synchronized void checkWriteRequest(RequestInfo reqInfo) throws IOException { checkRequest(reqInfo); - + if (reqInfo.getEpoch() != lastWriterEpoch.get()) { throw new IOException("IPC's epoch " + reqInfo.getEpoch() + " is not the current writer epoch " + lastWriterEpoch.get() + " ; journal id: " + journalId); } } - + public synchronized boolean isFormatted() { return storage.isFormatted(); } @@ -549,10 +549,10 @@ private void checkSync(boolean expression, String msg, * @throws AssertionError if the given expression is not true. * The message of the exception is formatted using the 'msg' and * 'formatArgs' parameters. - * + * * This should be used in preference to Java's built-in assert in * non-performance-critical paths, where a failure of this invariant - * might cause the protocol to lose data. + * might cause the protocol to lose data. */ private void alwaysAssert(boolean expression, String msg, Object... formatArgs) { @@ -560,7 +560,7 @@ private void alwaysAssert(boolean expression, String msg, throw new AssertionError(String.format(msg, formatArgs)); } } - + /** * Start a new segment at the given txid. The previous segment * must have already been finalized. @@ -570,9 +570,9 @@ public synchronized void startLogSegment(RequestInfo reqInfo, long txid, assert fjm != null; checkFormatted(); checkRequest(reqInfo); - + if (curSegment != null) { - LOG.warn("Client is requesting a new log segment " + txid + + LOG.warn("Client is requesting a new log segment " + txid + " though we are already writing " + curSegment + ". " + "Aborting the current segment in order to begin the new one." + " ; journal id: " + journalId); @@ -591,10 +591,10 @@ public synchronized void startLogSegment(RequestInfo reqInfo, long txid, throw new IllegalStateException("Already have a finalized segment " + existing + " beginning at " + txid + " ; journal id: " + journalId); } - + // If it's in-progress, it should only contain one transaction, // because the "startLogSegment" transaction is written alone at the - // start of each segment. + // start of each segment. existing.scanLog(Long.MAX_VALUE, false); if (existing.getLastTxId() != existing.getFirstTxId()) { throw new IllegalStateException("The log file " + @@ -602,7 +602,7 @@ public synchronized void startLogSegment(RequestInfo reqInfo, long txid, " ; journal id: " + journalId); } } - + long curLastWriterEpoch = lastWriterEpoch.get(); if (curLastWriterEpoch != reqInfo.getEpoch()) { LOG.info("Updating lastWriterEpoch from " + curLastWriterEpoch + @@ -616,13 +616,13 @@ public synchronized void startLogSegment(RequestInfo reqInfo, long txid, // Otherwise, no writer would have started writing. So, we can // remove the record of the older segment here. purgePaxosDecision(txid); - + curSegment = fjm.startLogSegment(txid, layoutVersion); curSegmentTxId = txid; curSegmentLayoutVersion = layoutVersion; nextTxId = txid; } - + /** * Finalize the log segment at the given transaction ID. */ @@ -641,7 +641,7 @@ public synchronized void finalizeLogSegment(RequestInfo reqInfo, long startTxId, curSegmentTxId = HdfsServerConstants.INVALID_TXID; curSegmentLayoutVersion = 0; } - + checkSync(nextTxId == endTxId + 1, "Trying to finalize in-progress log segment %s to end at " + "txid %s but only written up to txid %s ; journal id: %s", @@ -650,7 +650,7 @@ public synchronized void finalizeLogSegment(RequestInfo reqInfo, long startTxId, // the log segment that it was just writing to. needsValidation = false; } - + FileJournalManager.EditLogFile elf = fjm.getLogFile(startTxId); if (elf == null) { throw new JournalOutOfSyncException("No log file to finalize at " + @@ -662,7 +662,7 @@ public synchronized void finalizeLogSegment(RequestInfo reqInfo, long startTxId, LOG.info("Validating log segment " + elf.getFile() + " about to be " + "finalized ; journal id: " + journalId); elf.scanLog(Long.MAX_VALUE, false); - + checkSync(elf.getLastTxId() == endTxId, "Trying to finalize in-progress log segment %s to end at " + "txid %s but log %s on disk only contains up to txid %s " + @@ -683,7 +683,7 @@ public synchronized void finalizeLogSegment(RequestInfo reqInfo, long startTxId, // accepted decision. The existence of the finalized log segment is enough. purgePaxosDecision(elf.getFirstTxId()); } - + /** * @see JournalManager#purgeLogsOlderThan(long) */ @@ -691,13 +691,13 @@ public synchronized void purgeLogsOlderThan(RequestInfo reqInfo, long minTxIdToKeep) throws IOException { checkFormatted(); checkRequest(reqInfo); - + storage.purgeDataOlderThan(minTxIdToKeep); } - + /** * Remove the previously-recorded 'accepted recovery' information - * for a given log segment, once it is no longer necessary. + * for a given log segment, once it is no longer necessary. * @param segmentTxId the transaction ID to purge * @throws IOException if the file could not be deleted */ @@ -719,20 +719,20 @@ public RemoteEditLogManifest getEditLogManifest(long sinceTxId, // No need to checkRequest() here - anyone may ask for the list // of segments. checkFormatted(); - + List logs = fjm.getRemoteEditLogs(sinceTxId, inProgressOk); - + if (inProgressOk) { - RemoteEditLog log = null; + RemoteEditLog LOG = null; for (Iterator iter = logs.iterator(); iter.hasNext();) { - log = iter.next(); - if (log.isInProgress()) { + LOG = iter.next(); + if (LOG.isInProgress()) { iter.remove(); break; } } - if (log != null && log.isInProgress()) { - logs.add(new RemoteEditLog(log.getStartTxId(), + if (LOG != null && LOG.isInProgress()) { + logs.add(new RemoteEditLog(LOG.getStartTxId(), getHighestWrittenTxId(), true)); } } @@ -815,9 +815,9 @@ public synchronized PrepareRecoveryResponseProto prepareRecovery( RequestInfo reqInfo, long segmentTxId) throws IOException { checkFormatted(); checkRequest(reqInfo); - + abortCurSegment(); - + PrepareRecoveryResponseProto.Builder builder = PrepareRecoveryResponseProto.newBuilder(); @@ -832,7 +832,7 @@ public synchronized PrepareRecoveryResponseProto prepareRecovery( assert acceptedState.getEndTxId() == segInfo.getEndTxId() : "prev accepted: " + TextFormat.shortDebugString(previouslyAccepted)+ "\n" + "on disk: " + TextFormat.shortDebugString(segInfo); - + builder.setAcceptedInEpoch(previouslyAccepted.getAcceptedInEpoch()) .setSegmentState(previouslyAccepted.getSegmentState()); } else { @@ -840,18 +840,18 @@ public synchronized PrepareRecoveryResponseProto prepareRecovery( builder.setSegmentState(segInfo); } } - + builder.setLastWriterEpoch(lastWriterEpoch.get()); if (committedTxnId.get() != HdfsServerConstants.INVALID_TXID) { builder.setLastCommittedTxId(committedTxnId.get()); } - + PrepareRecoveryResponseProto resp = builder.build(); LOG.info("Prepared recovery for segment " + segmentTxId + ": " + TextFormat.shortDebugString(resp) + " ; journal id: " + journalId); return resp; } - + /** * @see QJournalProtocol#acceptRecovery(RequestInfo, QJournalProtocolProtos.SegmentStateProto, URL) */ @@ -860,7 +860,7 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, throws IOException { checkFormatted(); checkRequest(reqInfo); - + abortCurSegment(); long segmentTxId = segment.getStartTxId(); @@ -871,13 +871,13 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, segment.getEndTxId() >= segmentTxId, "bad recovery state for segment %s: %s ; journal id: %s", segmentTxId, TextFormat.shortDebugString(segment), journalId); - + PersistedRecoveryPaxosData oldData = getPersistedPaxosData(segmentTxId); PersistedRecoveryPaxosData newData = PersistedRecoveryPaxosData.newBuilder() .setAcceptedInEpoch(reqInfo.getEpoch()) .setSegmentState(segment) .build(); - + // If we previously acted on acceptRecovery() from a higher-numbered writer, // this call is out of sync. We should never actually trigger this, since the // checkRequest() call above should filter non-increasing epoch numbers. @@ -887,16 +887,16 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, "%s\nJournalId: %s\n", oldData, newData, journalId); } - + File syncedFile = null; - + SegmentStateProto currentSegment = getSegmentInfo(segmentTxId); if (currentSegment == null || currentSegment.getEndTxId() != segment.getEndTxId()) { if (currentSegment == null) { LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) + ": no current segment in place ; journal id: " + journalId); - + // Update the highest txid for lag metrics updateHighestWrittenTxId(Math.max(segment.getEndTxId(), highestWrittenTxId)); @@ -904,7 +904,7 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, LOG.info("Synchronizing log " + TextFormat.shortDebugString(segment) + ": old segment " + TextFormat.shortDebugString(currentSegment) + " is not the right length ; journal id: " + journalId); - + // Paranoid sanity check: if the new log is shorter than the log we // currently have, we should not end up discarding any transactions // which are already Committed. @@ -914,18 +914,18 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, "Cannot replace segment " + TextFormat.shortDebugString(currentSegment) + " with new segment " + - TextFormat.shortDebugString(segment) + + TextFormat.shortDebugString(segment) + ": would discard already-committed txn " + committedTxnId.get() + " ; journal id: " + journalId); } - + // Another paranoid check: we should not be asked to synchronize a log // on top of a finalized segment. alwaysAssert(currentSegment.getIsInProgress(), "Should never be asked to synchronize a different log on top of " + "an already-finalized segment ; journal id: " + journalId); - + // If we're shortening the log, update our highest txid // used for lag metrics. if (txnRange(currentSegment).contains(highestWrittenTxId)) { @@ -933,13 +933,13 @@ public synchronized void acceptRecovery(RequestInfo reqInfo, } } syncedFile = syncLog(reqInfo, segment, fromUrl); - + } else { LOG.info("Skipping download of log " + TextFormat.shortDebugString(segment) + ": already have up-to-date logs ; journal id: " + journalId); } - + // This is one of the few places in the protocol where we have a single // RPC that results in two distinct actions: // @@ -1019,7 +1019,7 @@ public Void run() throws IOException { }); return tmpFile; } - + /** * In the case the node crashes in between downloading a log segment @@ -1027,7 +1027,7 @@ public Void run() throws IOException { * will be left in its temporary location on disk. Given the paxos data, * we can check if this was indeed the case, and "roll forward" * the atomic operation. - * + * * See the inline comments in * {@link #acceptRecovery(RequestInfo, SegmentStateProto, URL)} for more * details. @@ -1043,9 +1043,9 @@ private void completeHalfDoneAcceptRecovery( long segmentId = paxosData.getSegmentState().getStartTxId(); long epoch = paxosData.getAcceptedInEpoch(); - + File tmp = storage.getSyncLogTemporaryFile(segmentId, epoch); - + if (tmp.exists()) { File dst = storage.getInProgressEditLog(segmentId); LOG.info("Rolling forward previously half-completed synchronization: " + @@ -1064,7 +1064,7 @@ private PersistedRecoveryPaxosData getPersistedPaxosData(long segmentTxId) // Default instance has no fields filled in (they're optional) return null; } - + InputStream in = Files.newInputStream(f.toPath()); try { PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData.parseDelimitedFrom(in); @@ -1092,12 +1092,12 @@ private void persistPaxosData(long segmentTxId, // Write human-readable data after the protobuf. This is only // to assist in debugging -- it's not parsed at all. try(OutputStreamWriter writer = - new OutputStreamWriter(fos, Charsets.UTF_8)) { + new OutputStreamWriter(fos, StandardCharsets.UTF_8)) { writer.write(String.valueOf(newData)); writer.write('\n'); writer.flush(); } - + fos.flush(); success = true; } finally { @@ -1128,11 +1128,11 @@ public synchronized void doUpgrade(StorageInfo sInfo) throws IOException { + "; new CTime = " + storage.getCTime()); storage.getJournalManager().doUpgrade(storage); storage.getOrCreatePaxosDir(); - + // Copy over the contents of the epoch data files to the new dir. File currentDir = storage.getSingularStorageDir().getCurrentDir(); File previousDir = storage.getSingularStorageDir().getPreviousDir(); - + PersistentLongFile prevLastPromisedEpoch = new PersistentLongFile( new File(previousDir, LAST_PROMISED_FILENAME), 0); PersistentLongFile prevLastWriterEpoch = new PersistentLongFile( @@ -1159,7 +1159,7 @@ public synchronized void doUpgrade(StorageInfo sInfo) throws IOException { } public synchronized void doFinalize() throws IOException { - LOG.info("Finalizing upgrade for journal " + LOG.info("Finalizing upgrade for journal " + storage.getRoot() + "." + (storage.getLayoutVersion()==0 ? "" : "\n cur LV = " + storage.getLayoutVersion() @@ -1184,7 +1184,7 @@ public synchronized void doRollback() throws IOException { synchronized void discardSegments(long startTxId) throws IOException { storage.getJournalManager().discardSegments(startTxId); - // we delete all the segments after the startTxId. let's reset committedTxnId + // we delete all the segments after the startTxId. let's reset committedTxnId committedTxnId.set(startTxId - 1); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 78a21bc37294d..446791b2a484e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.qjournal.server; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java index ccfbf9f171dd0..b6e82d8e73897 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeSyncer.java @@ -41,6 +41,7 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Daemon; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -52,6 +53,7 @@ import java.net.URISyntaxException; import java.net.URL; import java.security.PrivilegedExceptionAction; +import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.List; @@ -278,7 +280,7 @@ private List getOtherJournalNodeAddrs() { } if (uriStr == null || uriStr.isEmpty()) { - HashSet sharedEditsUri = Sets.newHashSet(); + HashSet sharedEditsUri = new HashSet<>(); if (nameServiceId != null) { Collection nnIds = DFSUtilClient.getNameNodeIds( conf, nameServiceId); @@ -322,7 +324,8 @@ protected List getJournalAddrList(String uriStr) throws URI uri = new URI(uriStr); InetSocketAddress boundIpcAddress = jn.getBoundIpcAddress(); - Set excluded = Sets.newHashSet(boundIpcAddress); + Set excluded = new HashSet<>(); + excluded.add(boundIpcAddress); List addrList = Util.getLoggerAddresses(uri, excluded); // Exclude the current JournalNode instance (a local address and the same port). If the address diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java index be491fcf4df8e..ca8aa8ef0c26d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hdfs.security.token.block; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.security.SecureRandom; import java.util.Arrays; @@ -46,7 +46,7 @@ import org.apache.hadoop.util.Timer; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultiset; import org.apache.hadoop.thirdparty.com.google.common.collect.Multiset; @@ -293,7 +293,7 @@ public Token generateToken(String userId, if (shouldWrapQOP) { String qop = Server.getAuxiliaryPortEstablishedQOP(); if (qop != null) { - id.setHandshakeMsg(qop.getBytes(Charsets.UTF_8)); + id.setHandshakeMsg(qop.getBytes(StandardCharsets.UTF_8)); } } return new Token(id, this); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index 1c216f19270c1..5b626dff85244 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -49,7 +49,7 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; import org.apache.hadoop.security.token.delegation.DelegationKey; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.protobuf.ByteString; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index fb70c943715be..ddc2254040a81 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.balancer; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS; import java.io.IOException; @@ -70,19 +69,19 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /**

The balancer is a tool that balances disk space usage on an HDFS cluster * when some datanodes become full or when new empty nodes join the cluster. - * The tool is deployed as an application program that can be run by the + * The tool is deployed as an application program that can be run by the * cluster administrator on a live HDFS cluster while applications * adding and deleting files. - * + * *

SYNOPSIS *

  * To start:
  *      bin/start-balancer.sh [-threshold {@literal }]
- *      Example: bin/ start-balancer.sh 
+ *      Example: bin/ start-balancer.sh
  *                     start the balancer with a default threshold of 10%
  *               bin/ start-balancer.sh -threshold 5
  *                     start the balancer with a threshold of 5%
@@ -93,62 +92,62 @@
  * To stop:
  *      bin/ stop-balancer.sh
  * 
- * + * *

DESCRIPTION - *

The threshold parameter is a fraction in the range of (1%, 100%) with a - * default value of 10%. The threshold sets a target for whether the cluster - * is balanced. A cluster is balanced if for each datanode, the utilization - * of the node (ratio of used space at the node to total capacity of the node) - * differs from the utilization of the (ratio of used space in the cluster - * to total capacity of the cluster) by no more than the threshold value. - * The smaller the threshold, the more balanced a cluster will become. - * It takes more time to run the balancer for small threshold values. - * Also for a very small threshold the cluster may not be able to reach the + *

The threshold parameter is a fraction in the range of (1%, 100%) with a + * default value of 10%. The threshold sets a target for whether the cluster + * is balanced. A cluster is balanced if for each datanode, the utilization + * of the node (ratio of used space at the node to total capacity of the node) + * differs from the utilization of the (ratio of used space in the cluster + * to total capacity of the cluster) by no more than the threshold value. + * The smaller the threshold, the more balanced a cluster will become. + * It takes more time to run the balancer for small threshold values. + * Also for a very small threshold the cluster may not be able to reach the * balanced state when applications write and delete files concurrently. - * - *

The tool moves blocks from highly utilized datanodes to poorly - * utilized datanodes iteratively. In each iteration a datanode moves or - * receives no more than the lesser of 10G bytes or the threshold fraction + * + *

The tool moves blocks from highly utilized datanodes to poorly + * utilized datanodes iteratively. In each iteration a datanode moves or + * receives no more than the lesser of 10G bytes or the threshold fraction * of its capacity. Each iteration runs no more than 20 minutes. * At the end of each iteration, the balancer obtains updated datanodes * information from the namenode. - * - *

A system property that limits the balancer's use of bandwidth is + * + *

A system property that limits the balancer's use of bandwidth is * defined in the default configuration file: *

  * <property>
  *   <name>dfs.datanode.balance.bandwidthPerSec</name>
  *   <value>1048576</value>
  * <description>  Specifies the maximum bandwidth that each datanode
- * can utilize for the balancing purpose in term of the number of bytes 
+ * can utilize for the balancing purpose in term of the number of bytes
  * per second.
  * </description>
  * </property>
  * 
- * - *

This property determines the maximum speed at which a block will be - * moved from one datanode to another. The default value is 1MB/s. The higher - * the bandwidth, the faster a cluster can reach the balanced state, - * but with greater competition with application processes. If an - * administrator changes the value of this property in the configuration + * + *

This property determines the maximum speed at which a block will be + * moved from one datanode to another. The default value is 1MB/s. The higher + * the bandwidth, the faster a cluster can reach the balanced state, + * but with greater competition with application processes. If an + * administrator changes the value of this property in the configuration * file, the change is observed when HDFS is next restarted. - * + * *

MONITERING BALANCER PROGRESS - *

After the balancer is started, an output file name where the balancer - * progress will be recorded is printed on the screen. The administrator - * can monitor the running of the balancer by reading the output file. - * The output shows the balancer's status iteration by iteration. In each - * iteration it prints the starting time, the iteration number, the total - * number of bytes that have been moved in the previous iterations, - * the total number of bytes that are left to move in order for the cluster - * to be balanced, and the number of bytes that are being moved in this - * iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left + *

After the balancer is started, an output file name where the balancer + * progress will be recorded is printed on the screen. The administrator + * can monitor the running of the balancer by reading the output file. + * The output shows the balancer's status iteration by iteration. In each + * iteration it prints the starting time, the iteration number, the total + * number of bytes that have been moved in the previous iterations, + * the total number of bytes that are left to move in order for the cluster + * to be balanced, and the number of bytes that are being moved in this + * iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left * To Move" is decreasing. - * - *

Running multiple instances of the balancer in an HDFS cluster is + * + *

Running multiple instances of the balancer in an HDFS cluster is * prohibited by the tool. - * - *

The balancer automatically exits when any of the following five + * + *

The balancer automatically exits when any of the following five * conditions is satisfied: *

    *
  1. The cluster is balanced; @@ -157,9 +156,9 @@ *
  2. An IOException occurs while communicating with the namenode; *
  3. Another balancer is running. *
- * - *

Upon exit, a balancer returns an exit code and prints one of the - * following messages to the output file in corresponding to the above exit + * + *

Upon exit, a balancer returns an exit code and prints one of the + * following messages to the output file in corresponding to the above exit * reasons: *

    *
  1. The cluster is balanced. Exiting @@ -168,9 +167,9 @@ *
  2. Received an IO exception: failure reason. Exiting... *
  3. Another balancer is running. Exiting... *
- * - *

The administrator can interrupt the execution of the balancer at any - * time by running the command "stop-balancer.sh" on the machine where the + * + *

The administrator can interrupt the execution of the balancer at any + * time by running the command "stop-balancer.sh" on the machine where the * balancer is running. */ @@ -278,7 +277,7 @@ static int getFailedTimesSinceLastSuccessfulBalance() { /** * Construct a balancer. - * Initialize balancer. It sets the value of the threshold, and + * Initialize balancer. It sets the value of the threshold, and * builds the communication proxies to * namenode as a client and a secondary namenode and retry proxies * when connection fails. @@ -339,7 +338,7 @@ static int getFailedTimesSinceLastSuccessfulBalance() { DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); } - + private static long getCapacity(DatanodeStorageReport report, StorageType t) { long capacity = 0L; for(StorageReport r : report.getStorageReports()) { @@ -364,8 +363,8 @@ private long getRemaining(DatanodeStorageReport report, StorageType t) { /** * Given a datanode storage set, build a network topology and decide - * over-utilized storages, above average utilized storages, - * below average utilized storages, and underutilized storages. + * over-utilized storages, above average utilized storages, + * below average utilized storages, and underutilized storages. * The input datanode storage set is shuffled in order to randomize * to the storage matching later on. * @@ -378,7 +377,7 @@ private long init(List reports) { } policy.initAvgUtilization(); - // create network topology and classify utilization collections: + // create network topology and classify utilization collections: // over-utilized, above-average, below-average and under-utilized. long overLoadedBytes = 0L, underLoadedBytes = 0L; for(DatanodeStorageReport r : reports) { @@ -386,10 +385,10 @@ private long init(List reports) { final boolean isSource = Util.isIncluded(sourceNodes, dn.getDatanodeInfo()); for(StorageType t : StorageType.getMovableTypes()) { final Double utilization = policy.getUtilization(r, t); - if (utilization == null) { // datanode does not have such storage type + if (utilization == null) { // datanode does not have such storage type continue; } - + final double average = policy.getAvgUtilization(t); if (utilization >= average && !isSource) { LOG.info(dn + "[" + t + "] has utilization=" + utilization @@ -428,12 +427,12 @@ private long init(List reports) { } logUtilizationCollections(); - + Preconditions.checkState(dispatcher.getStorageGroupMap().size() == overUtilized.size() + underUtilized.size() + aboveAvgUtilized.size() + belowAvgUtilized.size(), "Mismatched number of storage groups"); - + // return number of bytes to be moved in order to make the cluster balanced return Math.max(overLoadedBytes, underLoadedBytes); } @@ -481,12 +480,12 @@ private long chooseStorageGroups() { if (dispatcher.getCluster().isNodeGroupAware()) { chooseStorageGroups(Matcher.SAME_NODE_GROUP); } - + // Then, match nodes on the same rack chooseStorageGroups(Matcher.SAME_RACK); // At last, match all remaining nodes chooseStorageGroups(Matcher.ANY_OTHER); - + return dispatcher.bytesToMove(); } @@ -497,8 +496,8 @@ private void chooseStorageGroups(final Matcher matcher) { */ LOG.info("chooseStorageGroups for " + matcher + ": overUtilized => underUtilized"); chooseStorageGroups(overUtilized, underUtilized, matcher); - - /* match each remaining overutilized datanode (source) to + + /* match each remaining overutilized datanode (source) to * below average utilized datanodes (targets). * Note only overutilized datanodes that haven't had that max bytes to move * satisfied in step 1 are selected @@ -506,7 +505,7 @@ private void chooseStorageGroups(final Matcher matcher) { LOG.info("chooseStorageGroups for " + matcher + ": overUtilized => belowAvgUtilized"); chooseStorageGroups(overUtilized, belowAvgUtilized, matcher); - /* match each remaining underutilized datanode (target) to + /* match each remaining underutilized datanode (target) to * above average utilized datanodes (source). * Note only underutilized datanodes that have not had that max bytes to * move satisfied in step 1 are selected. @@ -540,7 +539,7 @@ private boolean choose4One(StorageGroup g, Collection candidates, Matcher matcher) { final Iterator i = candidates.iterator(); final C chosen = chooseCandidate(g, i, matcher); - + if (chosen == null) { return false; } @@ -554,7 +553,7 @@ private boolean choose4One(StorageGroup g, } return true; } - + private void matchSourceWithTargetToMove(Source source, StorageGroup target) { long size = Math.min(source.availableSizeToMove(), target.availableSizeToMove()); final Task task = new Task(target, size); @@ -564,7 +563,7 @@ private void matchSourceWithTargetToMove(Source source, StorageGroup target) { LOG.info("Decided to move "+StringUtils.byteDesc(size)+" bytes from " + source.getDisplayName() + " to " + target.getDisplayName()); } - + /** Choose a candidate for the given datanode. */ private C chooseCandidate(G g, Iterator candidates, Matcher matcher) { @@ -693,8 +692,8 @@ Result runOneIteration() { dispatcher.moveTasksTotal()); } - /* For each pair of , start a thread that repeatedly - * decide a block to be moved and its proxy source, + /* For each pair of , start a thread that repeatedly + * decide a block to be moved and its proxy source, * then initiates the move until all bytes are moved or no more block * available to move. * Exit no byte has been moved for 5 consecutive iterations. @@ -722,7 +721,7 @@ Result runOneIteration() { * Balance all namenodes. * For each iteration, * for each namenode, - * execute a {@link Balancer} to work through all datanodes once. + * execute a {@link Balancer} to work through all datanodes once. */ static private int doBalance(Collection namenodes, Collection nsIds, final BalancerParameters p, Configuration conf) @@ -744,7 +743,7 @@ static private int doBalance(Collection namenodes, System.out.println("Time Stamp Iteration#" + " Bytes Already Moved Bytes Left To Move Bytes Being Moved" + " NameNode"); - + List connectors = Collections.emptyList(); try { connectors = NameNodeConnector.newNameNodeConnectors(namenodes, nsIds, @@ -888,7 +887,7 @@ private static String time2Str(long elapsedTime) { static class Cli extends Configured implements Tool { /** * Parse arguments and then run Balancer. - * + * * @param args command specific arguments. * @return exit code. 0 indicates success, non-zero indicates failure. */ @@ -927,7 +926,7 @@ static BalancerParameters parse(String[] args) { try { for(int i = 0; i < args.length; i++) { if ("-threshold".equalsIgnoreCase(args[i])) { - checkArgument(++i < args.length, + Preconditions.checkArgument(++i < args.length, "Threshold value is missing: args = " + Arrays.toString(args)); try { double threshold = Double.parseDouble(args[i]); @@ -944,7 +943,7 @@ static BalancerParameters parse(String[] args) { throw e; } } else if ("-policy".equalsIgnoreCase(args[i])) { - checkArgument(++i < args.length, + Preconditions.checkArgument(++i < args.length, "Policy value is missing: args = " + Arrays.toString(args)); try { b.setBalancingPolicy(BalancingPolicy.parse(args[i])); @@ -965,7 +964,7 @@ static BalancerParameters parse(String[] args) { i = processHostList(args, i, "source", sourceNodes); b.setSourceNodes(sourceNodes); } else if ("-blockpools".equalsIgnoreCase(args[i])) { - checkArgument( + Preconditions.checkArgument( ++i < args.length, "blockpools value is missing: args = " + Arrays.toString(args)); @@ -974,7 +973,7 @@ static BalancerParameters parse(String[] args) { + blockpools.toString()); b.setBlockpools(blockpools); } else if ("-idleiterations".equalsIgnoreCase(args[i])) { - checkArgument(++i < args.length, + Preconditions.checkArgument(++i < args.length, "idleiterations value is missing: args = " + Arrays .toString(args)); int maxIdleIteration = Integer.parseInt(args[i]); @@ -994,7 +993,7 @@ static BalancerParameters parse(String[] args) { + Arrays.toString(args)); } } - checkArgument(excludedNodes == null || includedNodes == null, + Preconditions.checkArgument(excludedNodes == null || includedNodes == null, "-exclude and -include options cannot be specified together."); } catch(RuntimeException e) { printUsage(System.err); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java index 0b2a47f9afb58..31e152a84ed1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Dispatcher.java @@ -84,7 +84,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** Dispatching block replica moves between datanodes. */ @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java index f7ebc464dfc45..1ba7029af912a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java @@ -32,7 +32,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.RateLimiter; import org.apache.hadoop.ha.HAServiceProtocol; import org.apache.hadoop.hdfs.DFSConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java index f4bd87542b969..9e349423daa77 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceBlockPlacementPolicy.java @@ -29,7 +29,7 @@ import java.util.Random; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceRackFaultTolerantBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceRackFaultTolerantBlockPlacementPolicy.java index 365990a7ce942..85de2fee3e2f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceRackFaultTolerantBlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/AvailableSpaceRackFaultTolerantBlockPlacementPolicy.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java index df14107c322dd..a79e4c594ac2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockType; import org.apache.hadoop.hdfs.protocol.HdfsConstants; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java index b8047a8d08015..81a559e0f04c8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java @@ -22,7 +22,7 @@ import java.util.LinkedList; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java index d68b72d6c6277..651f39f2b69a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguous.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.protocol.Block; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java index 53368c98a4820..4b8d092935a00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.BlockType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 3c2af1d340909..26948be1141d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -126,7 +126,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java index ad4333159238a..23ed61e0cb117 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerSafeMode.java @@ -36,7 +36,7 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index 9f717217da538..85468a57bde47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -25,7 +25,7 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.AddBlockFlag; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 089f9a3b12942..39a40f52ae5f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -26,7 +26,7 @@ import java.util.*; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.AddBlockFlag; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java index 2dc4a2b0e6f04..552db53c4e564 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReportLeaseManager.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java index 58090a8a5988c..1ca7de7ed5683 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockToMarkCorrupt.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockToMarkCorrupt.java index 3ce5ef07acdca..bcbd94cd3cc7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockToMarkCorrupt.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockToMarkCorrupt.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.hdfs.server.blockmanagement.CorruptReplicasMap.Reason; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.protocol.Block; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java index eab58124cb11c..4cc404f55c5d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/CacheReplicationMonitor.java @@ -55,7 +55,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Scans the namesystem, scheduling blocks to be cached as appropriate. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java index f43f8cf10d83d..06ed2118b8b7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminDefaultMonitor.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.server.namenode.INode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java index 1fc0bf4abf909..550e7f2bb880e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeAdminManager.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.blockmanagement; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkArgument; import static org.apache.hadoop.util.Time.monotonicNow; import java.util.Queue; @@ -30,6 +29,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; +import static org.apache.hadoop.util.Preconditions.checkArgument; import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -130,8 +130,8 @@ void activate(Configuration conf) { DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES, DFSConfigKeys .DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES_DEFAULT); - checkArgument(maxConcurrentTrackedNodes >= 0, "Cannot set a negative " + - "value for " + checkArgument(maxConcurrentTrackedNodes >= 0, + "Cannot set a negative value for " + DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_MAX_CONCURRENT_TRACKED_NODES); Class cls = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 5a4b2182d7dab..fc56ad2770485 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -24,7 +24,7 @@ import org.apache.hadoop.util.Sets; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java index af0c92df5e272..d12e5fbae1362 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostSet.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; import org.apache.hadoop.thirdparty.com.google.common.collect.UnmodifiableIterator; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java index 03b180121fcf7..69acec90598d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java @@ -32,7 +32,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; import org.apache.hadoop.util.StringUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java index 051e2d2c5259b..26e95fc3e7935 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/MetricsLoggerTask.java @@ -31,7 +31,7 @@ import javax.management.MalformedObjectNameException; import javax.management.ObjectName; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.commons.logging.impl.Log4JLogger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -58,11 +58,11 @@ public class MetricsLoggerTask implements Runnable { } } - private Log metricsLog; + private Logger metricsLog; private String nodeName; private short maxLogLineLength; - public MetricsLoggerTask(Log metricsLog, String nodeName, + public MetricsLoggerTask(Logger metricsLog, String nodeName, short maxLogLineLength) { this.metricsLog = metricsLog; this.nodeName = nodeName; @@ -118,7 +118,7 @@ private String trimLine(String valueStr) { .substring(0, maxLogLineLength) + "..."); } - private static boolean hasAppenders(Log logger) { + private static boolean hasAppenders(Logger logger) { if (!(logger instanceof Log4JLogger)) { // Don't bother trying to determine the presence of appenders. return true; @@ -150,7 +150,7 @@ private static Set getFilteredAttributes(MBeanInfo mBeanInfo) { * Make the metrics logger async and add all pre-existing appenders to the * async appender. */ - public static void makeMetricsLoggerAsync(Log metricsLog) { + public static void makeMetricsLoggerAsync(Logger metricsLog) { if (!(metricsLog instanceof Log4JLogger)) { LOG.warn("Metrics logging will not be async since " + "the logger is not log4j"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java index 83a82566f6c3e..9603958cd0b76 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java @@ -25,6 +25,7 @@ import java.lang.management.ManagementFactory; import java.nio.channels.FileLock; import java.nio.channels.OverlappingFileLockException; +import java.nio.charset.StandardCharsets; import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.attribute.PosixFilePermission; @@ -53,8 +54,7 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -944,7 +944,7 @@ FileLock tryLock() throws IOException { LOG.error("Unable to acquire file lock on path {}", lockF); throw new OverlappingFileLockException(); } - file.write(jvmName.getBytes(Charsets.UTF_8)); + file.write(jvmName.getBytes(StandardCharsets.UTF_8)); LOG.info("Lock on {} acquired by nodename {}", lockF, jvmName); } catch(OverlappingFileLockException oe) { // Cannot read from the locked file on Windows. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java index c834b6c5db027..c30f00d68cfc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java @@ -36,7 +36,7 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,6 +54,11 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.util.Preconditions; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + @InterfaceAudience.Private public final class Util { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index 07dd18128a8d9..576624c4143c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; @@ -40,6 +40,7 @@ import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; @@ -143,11 +144,11 @@ void writeUnlock() { void refreshNNList(String serviceId, List nnIds, ArrayList addrs, ArrayList lifelineAddrs) throws IOException { - Set oldAddrs = Sets.newHashSet(); + Set oldAddrs = new HashSet<>(); for (BPServiceActor actor : bpServices) { oldAddrs.add(actor.getNNSocketAddress()); } - Set newAddrs = Sets.newHashSet(addrs); + Set newAddrs = new HashSet<>(addrs); // Process added NNs Set addedNNs = Sets.difference(newAddrs, oldAddrs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockChecksumHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockChecksumHelper.java index 265267da887d2..eb2bfeb78698f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockChecksumHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockChecksumHelper.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.DFSUtilClient; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java index a31ef1f449414..0bf763f7e9cf1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java @@ -30,10 +30,13 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Lists; -import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.Sets; + +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; +import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.slf4j.Logger; /** @@ -195,9 +198,9 @@ private void doRefreshNamenodes( // Step 2. Any nameservices we currently have but are no longer present // need to be removed. - toRemove = Sets.newHashSet(Sets.difference( - bpByNameserviceId.keySet(), addrMap.keySet())); - + toRemove = Sets.difference( + bpByNameserviceId.keySet(), addrMap.keySet()); + assert toRefresh.size() + toAdd.size() == addrMap.size() : "toAdd: " + Joiner.on(",").useForNull("").join(toAdd) + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java index 9f6b077f6ffbb..f6ee4bb9f09c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java @@ -48,9 +48,11 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.Preconditions; + /** * Manages storage for the set of BlockPoolSlices which share a particular * block pool id, on this DataNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index e0d9c3f033440..21562dae8cec9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -35,7 +35,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.zip.Checksum; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FSOutputSummer; import org.apache.hadoop.fs.StorageType; @@ -73,7 +73,7 @@ **/ class BlockReceiver implements Closeable { public static final Logger LOG = DataNode.LOG; - static final Log ClientTraceLog = DataNode.ClientTraceLog; + static final Logger ClientTraceLog = DataNode.ClientTraceLog; @VisibleForTesting static long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024; @@ -81,7 +81,7 @@ class BlockReceiver implements Closeable { private DataInputStream in = null; // from where data are read private DataChecksum clientChecksum; // checksum used by client private DataChecksum diskChecksum; // checksum we write to disk - + /** * In the case that the client is writing with a different * checksum polynomial than the block is stored with on disk, @@ -91,9 +91,9 @@ class BlockReceiver implements Closeable { private DataOutputStream checksumOut = null; // to crc file at local disk private final int bytesPerChecksum; private final int checksumSize; - + private final PacketReceiver packetReceiver = new PacketReceiver(false); - + protected final String inAddr; protected final String myAddr; private String mirrorAddr; @@ -115,11 +115,11 @@ class BlockReceiver implements Closeable { /** The client name. It is empty if a datanode is the client */ private final String clientname; - private final boolean isClient; + private final boolean isClient; private final boolean isDatanode; /** the block to receive */ - private final ExtendedBlock block; + private final ExtendedBlock block; /** the replica to write */ private ReplicaInPipeline replicaInfo; /** pipeline stage */ @@ -142,7 +142,7 @@ class BlockReceiver implements Closeable { private boolean isReplaceBlock = false; private DataOutputStream replyOut = null; private long maxWriteToDiskMs = 0; - + private boolean pinning; private final AtomicLong lastSentTime = new AtomicLong(0L); private long maxSendIdleTime; @@ -150,8 +150,8 @@ class BlockReceiver implements Closeable { BlockReceiver(final ExtendedBlock block, final StorageType storageType, final DataInputStream in, final String inAddr, final String myAddr, - final BlockConstructionStage stage, - final long newGs, final long minBytesRcvd, final long maxBytesRcvd, + final BlockConstructionStage stage, + final long newGs, final long minBytesRcvd, final long maxBytesRcvd, final String clientname, final DatanodeInfo srcDataNode, final DataNode datanode, DataChecksum requestedChecksum, CachingStrategy cachingStrategy, @@ -244,7 +244,7 @@ class BlockReceiver implements Closeable { replicaHandler = datanode.data.createTemporary(storageType, storageId, block, isTransfer); break; - default: throw new IOException("Unsupported stage " + stage + + default: throw new IOException("Unsupported stage " + stage + " while receiving block " + block + " from " + inAddr); } } @@ -255,8 +255,8 @@ class BlockReceiver implements Closeable { this.syncBehindWrites = datanode.getDnConf().syncBehindWrites; this.syncBehindWritesInBackground = datanode.getDnConf(). syncBehindWritesInBackground; - - final boolean isCreate = isDatanode || isTransfer + + final boolean isCreate = isDatanode || isTransfer || stage == BlockConstructionStage.PIPELINE_SETUP_CREATE; streams = replicaInfo.createStreams(isCreate, requestedChecksum); assert streams != null : "null streams!"; @@ -274,7 +274,7 @@ class BlockReceiver implements Closeable { // write data chunk header if creating a new replica if (isCreate) { BlockMetadataHeader.writeHeader(checksumOut, diskChecksum); - } + } } catch (ReplicaAlreadyExistsException bae) { throw bae; } catch (ReplicaNotFoundException bne) { @@ -285,7 +285,7 @@ class BlockReceiver implements Closeable { } IOUtils.closeStream(this); cleanupBlock(); - + // check if there is a disk error IOException cause = DatanodeUtil.getCauseIfDiskError(ioe); DataNode.LOG @@ -295,7 +295,7 @@ class BlockReceiver implements Closeable { ioe = cause; // Volume error check moved to FileIoProvider } - + throw ioe; } } @@ -332,7 +332,7 @@ public void close() throws IOException { IOException ioe = null; if (syncOnClose && (streams.getDataOut() != null || checksumOut != null)) { - datanode.metrics.incrFsyncCount(); + datanode.metrics.incrFsyncCount(); } long flushTotalNanos = 0; boolean measuredFlushTime = false; @@ -480,9 +480,9 @@ private void handleMirrorOutError(IOException ioe) throws IOException { mirrorError = true; } } - + /** - * Verify multiple CRC chunks. + * Verify multiple CRC chunks. */ private void verifyChunks(ByteBuffer dataBuf, ByteBuffer checksumBuf) throws IOException { @@ -503,7 +503,7 @@ private void verifyChunks(ByteBuffer dataBuf, ByteBuffer checksumBuf) srcDataNode + " to namenode"); datanode.reportRemoteBadBlock(srcDataNode, block); } catch (IOException e) { - LOG.warn("Failed to report bad " + block + + LOG.warn("Failed to report bad " + block + " from datanode " + srcDataNode + " to namenode"); } } @@ -511,12 +511,12 @@ private void verifyChunks(ByteBuffer dataBuf, ByteBuffer checksumBuf) + block + " from " + inAddr); } } - - + + /** * Translate CRC chunks from the client's checksum implementation * to the disk checksum implementation. - * + * * This does not verify the original checksums, under the assumption * that they have already been validated. */ @@ -524,13 +524,13 @@ private void translateChunks(ByteBuffer dataBuf, ByteBuffer checksumBuf) { diskChecksum.calculateChunkedSums(dataBuf, checksumBuf); } - /** + /** * Check whether checksum needs to be verified. - * Skip verifying checksum iff this is not the last one in the + * Skip verifying checksum iff this is not the last one in the * pipeline and clientName is non-null. i.e. Checksum is verified - * on all the datanodes when the data is being written by a - * datanode rather than a client. Whe client is writing the data, - * protocol includes acks and only the last datanode needs to verify + * on all the datanodes when the data is being written by a + * datanode rather than a client. Whe client is writing the data, + * protocol includes acks and only the last datanode needs to verify * checksum. * @return true if checksum verification is needed, otherwise false. */ @@ -538,7 +538,7 @@ private boolean shouldVerifyChecksum() { return (mirrorOut == null || isDatanode || needsChecksumTranslation); } - /** + /** * Receives and processes a packet. It can contain many chunks. * returns the number of data bytes that the packet has. */ @@ -554,15 +554,15 @@ private int receivePacket() throws IOException { // Sanity check the header if (header.getOffsetInBlock() > replicaInfo.getNumBytes()) { - throw new IOException("Received an out-of-sequence packet for " + block + + throw new IOException("Received an out-of-sequence packet for " + block + "from " + inAddr + " at offset " + header.getOffsetInBlock() + ". Expecting packet starting at " + replicaInfo.getNumBytes()); } if (header.getDataLen() < 0) { - throw new IOException("Got wrong length during writeBlock(" + block + - ") from " + inAddr + " at offset " + + throw new IOException("Got wrong length during writeBlock(" + block + + ") from " + inAddr + " at offset " + header.getOffsetInBlock() + ": " + - header.getDataLen()); + header.getDataLen()); } long offsetInBlock = header.getOffsetInBlock(); @@ -585,7 +585,7 @@ private int receivePacket() throws IOException { if (replicaInfo.getNumBytes() < offsetInBlock) { replicaInfo.setNumBytes(offsetInBlock); } - + // put in queue for pending acks, unless sync was requested if (responder != null && !syncBlock && !shouldVerifyChecksum()) { ((PacketResponder) responder.getRunnable()).enqueue(seqno, @@ -623,10 +623,10 @@ private int receivePacket() throws IOException { handleMirrorOutError(e); } } - + ByteBuffer dataBuf = packetReceiver.getDataSlice(); ByteBuffer checksumBuf = packetReceiver.getChecksumSlice(); - + if (lastPacketInBlock || len == 0) { if(LOG.isDebugEnabled()) { LOG.debug("Receiving an empty packet or the end of the block " + block); @@ -661,7 +661,7 @@ private int receivePacket() throws IOException { } throw new IOException("Terminating due to a checksum error." + ioe); } - + if (needsChecksumTranslation) { // overwrite the checksums in the packet buffer with the // appropriate polynomial for the disk storage. @@ -674,7 +674,7 @@ private int receivePacket() throws IOException { checksumBuf = ByteBuffer.allocate(checksumLen); diskChecksum.calculateChunkedSums(dataBuf, checksumBuf); } - + // by this point, the data in the buffer uses the disk checksum final boolean shouldNotWriteChecksum = checksumReceivedLen == 0 @@ -725,7 +725,7 @@ private int receivePacket() throws IOException { Checksum partialCrc = null; if (doCrcRecalc) { if (LOG.isDebugEnabled()) { - LOG.debug("receivePacket for " + block + LOG.debug("receivePacket for " + block + ": previous write did not end at the chunk boundary." + " onDiskLen=" + onDiskLen); } @@ -737,12 +737,12 @@ private int receivePacket() throws IOException { // The data buffer position where write will begin. If the packet // data and on-disk data have no overlap, this will not be at the // beginning of the buffer. - int startByteToDisk = (int)(onDiskLen-firstByteInBlock) + int startByteToDisk = (int)(onDiskLen-firstByteInBlock) + dataBuf.arrayOffset() + dataBuf.position(); // Actual number of data bytes to write. int numBytesToDisk = (int)(offsetInBlock-onDiskLen); - + // Write data to disk. long begin = Time.monotonicNow(); streams.writeDataToDisk(dataBuf.array(), @@ -826,7 +826,7 @@ private int receivePacket() throws IOException { /// flush entire packet, sync if requested flushOrSync(syncBlock); - + replicaInfo.setLastChecksumAndDataLen(offsetInBlock, lastCrc); datanode.metrics.incrBytesWritten(len); @@ -864,7 +864,7 @@ private int receivePacket() throws IOException { if (throttler != null) { // throttle I/O throttler.throttle(len); } - + return lastPacketInBlock?-1:len; } @@ -910,7 +910,7 @@ private void manageWriterOsCache(long offsetInBlock) { // <========= sync ===========> // +-----------------------O--------------------------X // start last curPos - // of file + // of file // if (syncBehindWrites) { if (syncBehindWritesInBackground) { @@ -925,15 +925,15 @@ private void manageWriterOsCache(long offsetInBlock) { } } // - // For POSIX_FADV_DONTNEED, we want to drop from the beginning + // For POSIX_FADV_DONTNEED, we want to drop from the beginning // of the file to a position prior to the current position. // - // <=== drop =====> + // <=== drop =====> // <---W---> // +--------------+--------O--------------------------X // start dropPos last curPos - // of file - // + // of file + // long dropPos = lastCacheManagementOffset - CACHE_DROP_LAG_BYTES; if (dropPos > 0 && dropCacheBehindWrites) { streams.dropCacheBehindWrites(block.getBlockName(), 0, dropPos, @@ -952,7 +952,7 @@ private void manageWriterOsCache(long offsetInBlock) { LOG.warn("Error managing cache for writer of block " + block, t); } } - + public void sendOOB() throws IOException, InterruptedException { if (isDatanode) { return; @@ -982,7 +982,7 @@ void receiveBlock( try { if (isClient && !isTransfer) { - responder = new Daemon(datanode.threadGroup, + responder = new Daemon(datanode.threadGroup, new PacketResponder(replyOut, mirrIn, downstreams)); responder.start(); // start thread to processes responses } @@ -1047,15 +1047,15 @@ void receiveBlock( out.write(Long.toString(Time.now() + restartBudget)); out.flush(); } catch (IOException ioe) { - // The worst case is not recovering this RBW replica. + // The worst case is not recovering this RBW replica. // Client will fall back to regular pipeline recovery. } finally { IOUtils.closeStream(streams.getDataOut()); } - try { + try { // Even if the connection is closed after the ack packet is - // flushed, the client can react to the connection closure - // first. Insert a delay to lower the chance of client + // flushed, the client can react to the connection closure + // first. Insert a delay to lower the chance of client // missing the OOB ack. Thread.sleep(1000); } catch (InterruptedException ie) { @@ -1127,7 +1127,7 @@ private String getVolumeBaseUri() { return "unavailable"; } - /** Cleanup a partial block + /** Cleanup a partial block * if this write is for a replication request (and not from a client) */ private void cleanupBlock() throws IOException { @@ -1241,7 +1241,7 @@ class PacketResponder implements Runnable, Closeable { /** The type of this responder */ private final PacketResponderType type; /** for log and error messages */ - private final String myString; + private final String myString; private boolean sending = false; @Override @@ -1272,7 +1272,7 @@ private boolean isRunning() { // interrupted by the receiver thread. return running && (datanode.shouldRun || datanode.isRestarting()); } - + /** * enqueue the seqno that is still be to acked by the downstream datanode. * @param seqno sequence number of the packet @@ -1304,7 +1304,7 @@ void enqueue(final long seqno, final boolean lastPacketInBlock, void sendOOBResponse(final Status ackStatus) throws IOException, InterruptedException { if (!running) { - LOG.info("Cannot send OOB response " + ackStatus + + LOG.info("Cannot send OOB response " + ackStatus + ". Responder not running."); return; } @@ -1335,7 +1335,7 @@ void sendOOBResponse(final Status ackStatus) throws IOException, } } } - + /** Wait for a packet with given {@code seqno} to be enqueued to ackQueue */ Packet waitForAckHead(long seqno) throws InterruptedException { synchronized (ackQueue) { @@ -1522,7 +1522,7 @@ public void run() { datanode.metrics.decrDataNodePacketResponderCount(); LOG.info(myString + " terminating"); } - + /** * Finalize the block and close the block file * @param startTime time when BlockReceiver started receiving the block @@ -1540,7 +1540,7 @@ private void finalizeBlock(long startTime) throws IOException { if (pinning) { datanode.data.setPinning(block); } - + datanode.closeBlock(block, null, replicaInfo.getStorageUuid(), replicaInfo.isOnTransientStorage()); if (ClientTraceLog.isInfoEnabled() && isClient) { @@ -1555,7 +1555,7 @@ private void finalizeBlock(long startTime) throws IOException { + " from " + inAddr); } } - + /** * The wrapper for the unprotected version. This is only called by * the responder's run() method. @@ -1592,7 +1592,7 @@ private void sendAckUpstream(PipelineAck ack, long seqno, } } catch (InterruptedException ie) { // The responder was interrupted. Make it go down without - // interrupting the receiver(writer) thread. + // interrupting the receiver(writer) thread. running = false; } } @@ -1671,7 +1671,7 @@ private void sendAckUpstreamUnprotected(PipelineAck ack, long seqno, + "response has been sent upstream."); } } - + /** * Remove a packet from the head of the ack queue * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java index d575b592bd443..e4861f9774870 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java @@ -19,7 +19,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeID; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java index 53da12f774293..69e3db11bdf73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockScanner.java @@ -35,7 +35,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.VolumeScanner.ScanResultHandler; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 94cdf449b1066..405741315aa1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -32,7 +32,7 @@ import java.util.Arrays; import java.util.concurrent.TimeUnit; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.FsTracer; import org.apache.hadoop.hdfs.DFSUtilClient; @@ -57,25 +57,25 @@ import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_SEQUENTIAL; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; /** * Reads a block from the disk and sends it to a recipient. - * + * * Data sent from the BlockeSender in the following format: *
Data format:

  *    +--------------------------------------------------+
  *    | ChecksumHeader | Sequence of data PACKETS...     |
- *    +--------------------------------------------------+ 
- * 
+ * +--------------------------------------------------+ + * * ChecksumHeader format:
  *    +--------------------------------------------------+
  *    | 1 byte CHECKSUM_TYPE | 4 byte BYTES_PER_CHECKSUM |
- *    +--------------------------------------------------+ 
- * 
+ * +--------------------------------------------------+ + * * An empty packet is sent to mark the end of block and read completion. - * + * * PACKET Contains a packet header, checksum and data. Amount of data * carried is set by BUFFER_SIZE. *
@@ -86,24 +86,24 @@
  *   +-----------------------------------------------------+
  *   | actual data ......                                  |
  *   +-----------------------------------------------------+
- * 
+ *
  *   Data is made of Chunks. Each chunk is of length <= BYTES_PER_CHECKSUM.
  *   A checksum is calculated for each chunk.
- *  
+ *
  *   x = (length of data + BYTE_PER_CHECKSUM - 1)/BYTES_PER_CHECKSUM *
  *       CHECKSUM_SIZE
- *  
- *   CHECKSUM_SIZE depends on CHECKSUM_TYPE (usually, 4 for CRC32) 
+ *
+ *   CHECKSUM_SIZE depends on CHECKSUM_TYPE (usually, 4 for CRC32)
  *  
- * - * The client reads data until it receives a packet with - * "LastPacketInBlock" set to true or with a zero length. If there is + * + * The client reads data until it receives a packet with + * "LastPacketInBlock" set to true or with a zero length. If there is * no checksum error, it replies to DataNode with OP_STATUS_CHECKSUM_OK. */ class BlockSender implements java.io.Closeable { static final Logger LOG = DataNode.LOG; - static final Log ClientTraceLog = DataNode.ClientTraceLog; - private static final boolean is32Bit = + static final Logger ClientTraceLog = DataNode.ClientTraceLog; + private static final boolean is32Bit = System.getProperty("sun.arch.data.model").equals("32"); /** * Minimum buffer used while sending data to clients. Used only if @@ -118,7 +118,7 @@ class BlockSender implements java.io.Closeable { } private static final int TRANSFERTO_BUFFER_SIZE = Math.max( IO_FILE_BUFFER_SIZE, MIN_BUFFER_WITH_TRANSFERTO); - + /** the block to read from */ private final ExtendedBlock block; @@ -162,17 +162,17 @@ class BlockSender implements java.io.Closeable { private ReadaheadRequest curReadahead; private final boolean alwaysReadahead; - + private final boolean dropCacheBehindLargeReads; - + private final boolean dropCacheBehindAllReads; - + private long lastCacheDropOffset; private final FileIoProvider fileIoProvider; - + @VisibleForTesting static long CACHE_DROP_INTERVAL_BYTES = 1024 * 1024; // 1MB - + /** * See {{@link BlockSender#isLongRead()} */ @@ -188,7 +188,7 @@ class BlockSender implements java.io.Closeable { private static final String EIO_ERROR = "Input/output error"; /** * Constructor - * + * * @param block Block that is being read * @param startOffset starting offset to read from * @param length length of data to read @@ -242,7 +242,7 @@ class BlockSender implements java.io.Closeable { this.readaheadLength = cachingStrategy.getReadahead().longValue(); } this.datanode = datanode; - + if (verifyChecksum) { // To simplify implementation, callers may not specify verification // without sending. @@ -297,10 +297,10 @@ class BlockSender implements java.io.Closeable { // Obtain a reference before reading data volumeRef = datanode.data.getVolume(block).obtainReference(); - /* + /* * (corruptChecksumOK, meta_file_exist): operation - * True, True: will verify checksum - * True, False: No verify, e.g., need to read data from a corrupted file + * True, True: will verify checksum + * True, False: No verify, e.g., need to read data from a corrupted file * False, True: will verify checksum * False, False: throws IOException file not found */ @@ -371,19 +371,19 @@ class BlockSender implements java.io.Closeable { /* * If chunkSize is very large, then the metadata file is mostly * corrupted. For now just truncate bytesPerchecksum to blockLength. - */ + */ int size = csum.getBytesPerChecksum(); if (size > 10*1024*1024 && size > replicaVisibleLength) { csum = DataChecksum.newDataChecksum(csum.getChecksumType(), Math.max((int)replicaVisibleLength, 10*1024*1024)); - size = csum.getBytesPerChecksum(); + size = csum.getBytesPerChecksum(); } chunkSize = size; checksum = csum; checksumSize = checksum.getChecksumSize(); length = length < 0 ? replicaVisibleLength : length; - // end is either last byte on disk or the length for which we have a + // end is either last byte on disk or the length for which we have a // checksum long end = chunkChecksum != null ? chunkChecksum.getDataLength() : replica.getBytesOnDisk(); @@ -395,7 +395,7 @@ class BlockSender implements java.io.Closeable { ":sendBlock() : " + msg); throw new IOException(msg); } - + // Ensure read offset is position at the beginning of chunk offset = startOffset - (startOffset % chunkSize); if (length >= 0) { @@ -408,7 +408,7 @@ class BlockSender implements java.io.Closeable { // will use on-disk checksum here since the end is a stable chunk end = tmpLen; } else if (chunkChecksum != null) { - // last chunk is changing. flag that we need to use in-memory checksum + // last chunk is changing. flag that we need to use in-memory checksum this.lastChunkChecksum = chunkChecksum; } } @@ -497,7 +497,7 @@ public void close() throws IOException { ris = null; } } - + private static Replica getReplica(ExtendedBlock block, DataNode datanode) throws ReplicaNotFoundException { Replica replica = datanode.data.getReplica(block.getBlockPoolId(), @@ -510,8 +510,8 @@ private static Replica getReplica(ExtendedBlock block, DataNode datanode) /** * Converts an IOExcpetion (not subclasses) to SocketException. - * This is typically done to indicate to upper layers that the error - * was a socket error rather than often more serious exceptions like + * This is typically done to indicate to upper layers that the error + * was a socket error rather than often more serious exceptions like * disk errors. */ private static IOException ioeToSocketException(IOException ioe) { @@ -520,7 +520,7 @@ private static IOException ioeToSocketException(IOException ioe) { IOException se = new SocketException("Original Exception : " + ioe); se.initCause(ioe); /* Change the stacktrace so that original trace is not truncated - * when printed.*/ + * when printed.*/ se.setStackTrace(ioe.getStackTrace()); return se; } @@ -529,16 +529,16 @@ private static IOException ioeToSocketException(IOException ioe) { } /** - * @param datalen Length of data + * @param datalen Length of data * @return number of chunks for data of given size */ private int numberOfChunks(long datalen) { return (int) ((datalen + chunkSize - 1)/chunkSize); } - + /** * Sends a packet with up to maxChunks chunks of data. - * + * * @param pkt buffer used for writing packet data * @param maxChunks maximum number of chunks to send * @param out stream to send data to @@ -549,7 +549,7 @@ private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, boolean transferTo, DataTransferThrottler throttler) throws IOException { int dataLen = (int) Math.min(endOffset - offset, (chunkSize * (long) maxChunks)); - + int numChunks = numberOfChunks(dataLen); // Number of chunks be sent in the packet int checksumDataLen = numChunks * checksumSize; int packetLen = dataLen + checksumDataLen + 4; @@ -564,16 +564,16 @@ private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, // H = header and length prefixes // C = checksums // D? = data, if transferTo is false. - + int headerLen = writePacketHeader(pkt, dataLen, packetLen); - + // Per above, the header doesn't start at the beginning of the // buffer int headerOff = pkt.position() - headerLen; - + int checksumOff = pkt.position(); byte[] buf = pkt.array(); - + if (checksumSize > 0 && ris.getChecksumIn() != null) { readChecksum(buf, checksumOff, checksumDataLen); @@ -586,7 +586,7 @@ private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, } } } - + int dataOff = checksumOff + checksumDataLen; if (!transferTo) { // normal transfer try { @@ -602,7 +602,7 @@ private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, verifyChecksum(buf, dataOff, dataLen, numChunks, checksumOff); } } - + try { if (transferTo) { SocketOutputStream sockOut = (SocketOutputStream)out; @@ -629,7 +629,7 @@ private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, * writing to client timed out. This happens if the client reads * part of a block and then decides not to read the rest (but leaves * the socket open). - * + * * Reporting of this case is done in DataXceiver#run */ LOG.warn("Sending packets timed out.", e); @@ -674,7 +674,7 @@ private int sendPacket(ByteBuffer pkt, int maxChunks, OutputStream out, return dataLen; } - + /** * Read checksum into given buffer * @param buf buffer to read the checksum into @@ -708,7 +708,7 @@ private void readChecksum(byte[] buf, final int checksumOffset, /** * Compute checksum for chunks and verify the checksum that is read from * the metadata file is correct. - * + * * @param buf buffer that has checksum and data * @param dataOffset position where data is written in the buf * @param datalen length of data @@ -741,21 +741,21 @@ public void verifyChecksum(final byte[] buf, final int dataOffset, cOff += checksumSize; } } - + /** * sendBlock() is used to read block and its metadata and stream the data to - * either a client or to another datanode. - * + * either a client or to another datanode. + * * @param out stream to which the block is written to - * @param baseStream optional. if non-null, out is assumed to + * @param baseStream optional. if non-null, out is assumed to * be a wrapper over this stream. This enables optimizations for - * sending the data, e.g. - * {@link SocketOutputStream#transferToFully(FileChannel, + * sending the data, e.g. + * {@link SocketOutputStream#transferToFully(FileChannel, * long, int)}. * @param throttler for sending data. * @return total bytes read, including checksum data. */ - long sendBlock(DataOutputStream out, OutputStream baseStream, + long sendBlock(DataOutputStream out, OutputStream baseStream, DataTransferThrottler throttler) throws IOException { final TraceScope scope = FsTracer.get(null) .newScope("sendBlock_" + block.getBlockId()); @@ -774,7 +774,7 @@ private long doSendBlock(DataOutputStream out, OutputStream baseStream, initialOffset = offset; long totalRead = 0; OutputStream streamForSendChunks = out; - + lastCacheDropOffset = initialOffset; if (isLongRead() && ris.getDataInFd() != null) { @@ -782,7 +782,7 @@ private long doSendBlock(DataOutputStream out, OutputStream baseStream, ris.dropCacheBehindReads(block.getBlockName(), 0, 0, POSIX_FADV_SEQUENTIAL); } - + // Trigger readahead of beginning of file if configured. manageOsCache(); @@ -799,7 +799,7 @@ private long doSendBlock(DataOutputStream out, OutputStream baseStream, blockInPosition = fileChannel.position(); streamForSendChunks = baseStream; maxChunksPerPacket = numberOfChunks(TRANSFERTO_BUFFER_SIZE); - + // Smaller packet size to only hold checksum when doing transferTo pktBufSize += checksumSize * maxChunksPerPacket; } else { @@ -884,7 +884,7 @@ private void manageOsCache() throws IOException { * * Note that if the client explicitly asked for dropBehind, we will do it * even on short reads. - * + * * This is also used to determine when to invoke * posix_fadvise(POSIX_FADV_SEQUENTIAL). */ @@ -901,13 +901,13 @@ private int writePacketHeader(ByteBuffer pkt, int dataLen, int packetLen) { // both syncBlock and syncPacket are false PacketHeader header = new PacketHeader(packetLen, offset, seqno, (dataLen == 0), dataLen, false); - + int size = header.getSerializedSize(); pkt.position(PacketHeader.PKT_MAX_HEADER_LEN - size); header.putInBuffer(pkt); return size; } - + boolean didSendEntireByteRange() { return sentEntireByteRange; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index c7dd26cab2e44..0bba74c017bd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -134,8 +134,8 @@ import javax.management.ObjectName; import javax.net.SocketFactory; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -258,7 +258,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; @@ -320,8 +320,8 @@ public class DataNode extends ReconfigurableBase ", blockid: %s" + // block id ", duration(ns): %s"; // duration time - static final Log ClientTraceLog = - LogFactory.getLog(DataNode.class.getName() + ".clienttrace"); + static final Logger ClientTraceLog = + LoggerFactory.getLogger(DataNode.class.getName() + ".clienttrace"); private static final String USAGE = "Usage: hdfs datanode [-regular | -rollback | -rollingupgrade rollback" + @@ -360,7 +360,7 @@ public class DataNode extends ReconfigurableBase FS_GETSPACEUSED_JITTER_KEY, FS_GETSPACEUSED_CLASSNAME)); - public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog"); + public static final Logger METRICS_LOG = LoggerFactory.getLogger("DataNodeMetricsLog"); private static final String DATANODE_HTRACE_PREFIX = "datanode.htrace."; private final FileIoProvider fileIoProvider; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index d5ef9cc31ac9d..7a20003d44d0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -18,10 +18,10 @@ package org.apache.hadoop.hdfs.server.datanode; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.protobuf.ByteString; import javax.crypto.SecretKey; -import org.apache.commons.logging.Log; +import org.slf4j.Logger; import org.apache.hadoop.fs.FsTracer; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSUtil; @@ -106,8 +106,8 @@ */ class DataXceiver extends Receiver implements Runnable { public static final Logger LOG = DataNode.LOG; - static final Log ClientTraceLog = DataNode.ClientTraceLog; - + static final Logger ClientTraceLog = DataNode.ClientTraceLog; + private Peer peer; private final String remoteAddress; // address of remote side private final String remoteAddressWithoutPort; // only the address, no port @@ -129,12 +129,12 @@ class DataXceiver extends Receiver implements Runnable { * on the socket. */ private String previousOpClientName; - + public static DataXceiver create(Peer peer, DataNode dn, DataXceiverServer dataXceiverServer) throws IOException { return new DataXceiver(peer, dn, dataXceiverServer); } - + private DataXceiver(Peer peer, DataNode datanode, DataXceiverServer dataXceiverServer) throws IOException { super(FsTracer.get(null)); @@ -177,7 +177,7 @@ private void updateCurrentThreadName(String status) { /** Return the datanode object. */ DataNode getDataNode() {return datanode;} - + private OutputStream getOutputStream() { return socketOut; } @@ -216,7 +216,7 @@ private synchronized void setCurrentBlockReceiver(BlockReceiver br) { private synchronized BlockReceiver getCurrentBlockReceiver() { return blockReceiver; } - + /** * Read/write data from/to the DataXceiverServer. */ @@ -254,9 +254,9 @@ public void run() { } return; } - + super.initialize(new DataInputStream(input)); - + // We process requests in a loop, and stay around for a short timeout. // This optimistic behaviour allows the other end to reuse connections. // Setting keepalive timeout to 0 disable this behavior. @@ -512,7 +512,7 @@ public void requestShortCircuitShm(String clientName) throws IOException { // socket is managed by the DomainSocketWatcher, not the DataXceiver. releaseSocket(); } catch (UnsupportedOperationException e) { - sendShmErrorResponse(ERROR_UNSUPPORTED, + sendShmErrorResponse(ERROR_UNSUPPORTED, "This datanode has not been configured to support " + "short-circuit shared memory segments."); return; @@ -583,7 +583,7 @@ public void readBlock(final ExtendedBlock block, // send the block BlockSender blockSender = null; - DatanodeRegistration dnR = + DatanodeRegistration dnR = datanode.getDNRegistrationForBP(block.getBlockPoolId()); final String clientTraceFmt = clientName.length() > 0 && ClientTraceLog.isInfoEnabled() @@ -599,12 +599,12 @@ public void readBlock(final ExtendedBlock block, true, false, sendChecksum, datanode, clientTraceFmt, cachingStrategy); } catch(IOException e) { - String msg = "opReadBlock " + block + " received exception " + e; + String msg = "opReadBlock " + block + " received exception " + e; LOG.info(msg); sendResponse(ERROR, msg); throw e; } - + // send op status writeSuccessWithChecksumInfo(blockSender, new DataOutputStream(getOutputStream())); @@ -668,7 +668,7 @@ public void readBlock(final ExtendedBlock block, @Override public void writeBlock(final ExtendedBlock block, - final StorageType storageType, + final StorageType storageType, final Token blockToken, final String clientname, final DatanodeInfo[] targets, @@ -695,7 +695,7 @@ public void writeBlock(final ExtendedBlock block, allowLazyPersist = allowLazyPersist && (dnConf.getAllowNonLocalLazyPersist() || peer.isLocal()); long size = 0; - // reply to upstream datanode or client + // reply to upstream datanode or client final DataOutputStream replyOut = getBufferedOutputStream(); int nst = targetStorageTypes.length; @@ -760,7 +760,7 @@ public void writeBlock(final ExtendedBlock block, final boolean isOnTransientStorage; try { final Replica replica; - if (isDatanode || + if (isDatanode || stage != BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) { // open a block receiver setCurrentBlockReceiver(getBlockReceiver(block, storageType, in, @@ -911,7 +911,7 @@ public void writeBlock(final ExtendedBlock block, blockReceiver.receiveBlock(mirrorOut, mirrorIn, replyOut, mirrorAddr, dataXceiverServer.getWriteThrottler(), targets, false); - // send close-ack for transfer-RBW/Finalized + // send close-ack for transfer-RBW/Finalized if (isTransfer) { LOG.trace("TRANSFER: send close-ack"); writeResponse(SUCCESS, null, replyOut); @@ -919,12 +919,12 @@ public void writeBlock(final ExtendedBlock block, } // update its generation stamp - if (isClient && + if (isClient && stage == BlockConstructionStage.PIPELINE_CLOSE_RECOVERY) { block.setGenerationStamp(latestGenerationStamp); block.setNumBytes(minBytesRcvd); } - + // if this write is for a replication request or recovering // a failed close for client, then confirm block. For other client-writes, // the block is finalized in the PacketResponder. @@ -1093,7 +1093,7 @@ public void copyBlock(final ExtendedBlock block, sendResponse(Status.ERROR_BLOCK_PINNED, msg); return; } - + if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start String msg = "Not able to copy block " + block.getBlockId() + " " + "to " + peer.getRemoteAddressString() + " because threads " + @@ -1108,7 +1108,7 @@ public void copyBlock(final ExtendedBlock block, try { // check if the block exists or not - blockSender = new BlockSender(block, 0, -1, false, false, true, datanode, + blockSender = new BlockSender(block, 0, -1, false, false, true, datanode, null, CachingStrategy.newDropBehind()); OutputStream baseStream = getOutputStream(); @@ -1125,7 +1125,7 @@ public void copyBlock(final ExtendedBlock block, datanode.metrics.incrBlocksRead(); datanode.metrics.incrTotalReadTime(duration); DFSUtil.addTransferRateMetric(datanode.metrics, read, duration); - + LOG.info("Copied {} to {}", block, peer.getRemoteAddressString()); } catch (IOException ioe) { isOpSuccess = false; @@ -1151,13 +1151,13 @@ public void copyBlock(final ExtendedBlock block, IOUtils.closeStream(blockSender); } - //update metrics + //update metrics datanode.metrics.addCopyBlockOp(elapsed()); } @Override public void replaceBlock(final ExtendedBlock block, - final StorageType storageType, + final StorageType storageType, final Token blockToken, final String delHint, final DatanodeInfo proxySource, @@ -1214,19 +1214,19 @@ public void replaceBlock(final ExtendedBlock block, unbufProxyOut, unbufProxyIn, keyFactory, blockToken, proxySource); unbufProxyOut = saslStreams.out; unbufProxyIn = saslStreams.in; - + proxyOut = new DataOutputStream(new BufferedOutputStream(unbufProxyOut, smallBufferSize)); proxyReply = new DataInputStream(new BufferedInputStream(unbufProxyIn, ioFileBufferSize)); - + /* send request to the proxy */ IoeDuringCopyBlockOperation = true; new Sender(proxyOut).copyBlock(block, blockToken); IoeDuringCopyBlockOperation = false; - + // receive the response from the proxy - + BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom( PBHelperClient.vintPrefixed(proxyReply)); @@ -1244,16 +1244,16 @@ public void replaceBlock(final ExtendedBlock block, proxySock.getLocalSocketAddress().toString(), null, 0, 0, 0, "", null, datanode, remoteChecksum, CachingStrategy.newDropBehind(), false, false, storageId)); - + // receive a block - blockReceiver.receiveBlock(null, null, replyOut, null, + blockReceiver.receiveBlock(null, null, replyOut, null, dataXceiverServer.balanceThrottler, null, true); - + // notify name node final Replica r = blockReceiver.getReplica(); datanode.notifyNamenodeReceivedBlock( block, delHint, r.getStorageUuid(), r.isOnTransientStorage()); - + LOG.info("Moved {} from {}, delHint={}", block, peer.getRemoteAddressString(), delHint); } @@ -1262,7 +1262,7 @@ public void replaceBlock(final ExtendedBlock block, if (ioe instanceof BlockPinningException) { opStatus = Status.ERROR_BLOCK_PINNED; } - errMsg = "opReplaceBlock " + block + " received exception " + ioe; + errMsg = "opReplaceBlock " + block + " received exception " + ioe; LOG.info(errMsg); if (!IoeDuringCopyBlockOperation) { // Don't double count IO errors @@ -1277,10 +1277,10 @@ public void replaceBlock(final ExtendedBlock block, } catch (IOException ignored) { } } - + // now release the thread resource dataXceiverServer.balanceThrottler.release(); - + // send response back try { sendResponse(opStatus, errMsg); @@ -1339,7 +1339,7 @@ private long elapsed() { /** * Utility function for sending a response. - * + * * @param status status message to write * @param message message to send to the client or other DN */ @@ -1358,7 +1358,7 @@ private static void writeResponse(Status status, String message, OutputStream ou response.build().writeDelimitedTo(out); out.flush(); } - + private void writeSuccessWithChecksumInfo(BlockSender blockSender, DataOutputStream out) throws IOException { @@ -1366,7 +1366,7 @@ private void writeSuccessWithChecksumInfo(BlockSender blockSender, .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum())) .setChunkOffset(blockSender.getOffset()) .build(); - + BlockOpResponseProto response = BlockOpResponseProto.newBuilder() .setStatus(SUCCESS) .setReadOpChecksumInfo(ckInfo) @@ -1374,7 +1374,7 @@ private void writeSuccessWithChecksumInfo(BlockSender blockSender, response.writeDelimitedTo(out); out.flush(); } - + private void incrDatanodeNetworkErrors() { datanode.incrDatanodeNetworkErrors(remoteAddressWithoutPort); } @@ -1445,7 +1445,7 @@ private void checkAccess(OutputStream out, final boolean reply, BlockOpResponseProto.Builder resp = BlockOpResponseProto.newBuilder() .setStatus(ERROR_ACCESS_TOKEN); if (mode == BlockTokenIdentifier.AccessMode.WRITE) { - DatanodeRegistration dnR = + DatanodeRegistration dnR = datanode.getDNRegistrationForBP(blk.getBlockPoolId()); // NB: Unconditionally using the xfer addr w/o hostname resp.setFirstBadLink(dnR.getXferAddr()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java index e50441f3426a3..04e606ddf7703 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java @@ -36,7 +36,7 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java index 0d4ba433444fc..a3bb2abe4bb2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -43,7 +43,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashMap; import java.util.List; @@ -399,7 +399,7 @@ private NodePlan verifyPlanHash(String planID, String plan) if ((planID == null) || (planID.length() != sha1Length) || - !DigestUtils.shaHex(plan.getBytes(Charset.forName("UTF-8"))) + !DigestUtils.sha1Hex(plan.getBytes(StandardCharsets.UTF_8)) .equalsIgnoreCase(planID)) { LOG.error("Disk Balancer - Invalid plan hash."); throw new DiskBalancerException("Invalid or mis-matched hash.", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java index f58dd97abd9a4..8152d3ac43400 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hdfs.shortcircuit.DfsClientShmManager; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java index efb2ef7e4d198..d8f1e23ec379b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/VolumeScanner.java @@ -31,7 +31,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.cache.Cache; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.hdfs.protocol.Block; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java index cdbba6ef14209..4855c5de85fde 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/AbstractFuture.java @@ -22,13 +22,14 @@ import org.apache.hadoop.thirdparty.com.google.common.annotations.Beta; import org.apache.hadoop.thirdparty.com.google.common.annotations.GwtCompatible; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Uninterruptibles; +import org.apache.hadoop.util.Preconditions; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import static java.util.concurrent.atomic.AtomicReferenceFieldUpdater .newUpdater; @@ -289,7 +290,7 @@ public synchronized Throwable fillInStackTrace() { final Throwable exception; Failure(Throwable exception) { - this.exception = checkNotNull(exception); + this.exception = Preconditions.checkNotNull(exception); } } @@ -678,8 +679,8 @@ protected final boolean wasInterrupted() { */ @Override public void addListener(Runnable listener, Executor executor) { - checkNotNull(listener, "Runnable was null."); - checkNotNull(executor, "Executor was null."); + Preconditions.checkNotNull(listener, "Runnable was null."); + Preconditions.checkNotNull(executor, "Executor was null."); Listener oldHead = listeners; if (oldHead != Listener.TOMBSTONE) { Listener newNode = new Listener(listener, executor); @@ -736,7 +737,7 @@ protected boolean set(@Nullable V value) { * @return true if the attempt was accepted, completing the {@code Future} */ protected boolean setException(Throwable throwable) { - Object valueToSet = new Failure(checkNotNull(throwable)); + Object valueToSet = new Failure(Preconditions.checkNotNull(throwable)); if (ATOMIC_HELPER.casValue(this, null, valueToSet)) { complete(this); return true; @@ -770,7 +771,7 @@ protected boolean setException(Throwable throwable) { */ @Beta protected boolean setFuture(ListenableFuture future) { - checkNotNull(future); + Preconditions.checkNotNull(future); Object localValue = value; if (localValue == null) { if (future.isDone()) { @@ -1096,7 +1097,7 @@ public sun.misc.Unsafe run() throws Exception { } public static void throwIfUnchecked(Throwable throwable) { - checkNotNull(throwable); + Preconditions.checkNotNull(throwable); if (throwable instanceof RuntimeException) { throw (RuntimeException) throwable; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java index 92b801d56b795..d2be072e41831 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.datanode.checker; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Sets; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java index d014e499f912e..6bb2c7a84163a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/TimeoutFuture.java @@ -20,7 +20,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.checker; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture; import org.apache.hadoop.hdfs.server.datanode.checker.AbstractFuture; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java index c2aa77f253a1f..74c4cf1bd5f34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/ErasureCodingWorker.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.erasurecode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java index a302f5e868965..20d5c6f44fb41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedReader.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs.server.datanode.erasurecode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java index b570c666a3c4d..00be1279c8179 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/erasurecode/StripedWriter.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.erasurecode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index facace28604a6..3edf61765a65b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -23,7 +23,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_CACHE_REVOCATION_POLLING_MS_DEFAULT; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import java.io.FileInputStream; @@ -393,7 +393,7 @@ long roundUpPageSize(long count) { * Background worker that mmaps, mlocks, and checksums a block */ private class CachingTask implements Runnable { - private final ExtendedBlockId key; + private final ExtendedBlockId key; private final String blockFileName; private final long length; private final long genstamp; @@ -493,7 +493,7 @@ public void run() { } private class UncachingTask implements Runnable { - private final ExtendedBlockId key; + private final ExtendedBlockId key; private final long revocationTimeMs; UncachingTask(ExtendedBlockId key, long revocationDelayMs) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 90ca8527c7721..8bb6cc53222c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -120,9 +120,8 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Timer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; -import org.apache.hadoop.util.Sets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -424,11 +423,7 @@ public AutoCloseableLock acquireDatasetReadLock() { */ private static List getInitialVolumeFailureInfos( Collection dataLocations, DataStorage storage) { - Set failedLocationSet = Sets.newHashSetWithExpectedSize( - dataLocations.size()); - for (StorageLocation sl: dataLocations) { - failedLocationSet.add(sl); - } + Set failedLocationSet = new HashSet<>(dataLocations); for (Iterator it = storage.dirIterator(); it.hasNext(); ) { Storage.StorageDirectory sd = it.next(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java index 621c2735a267c..fae77123bf8d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetUtil.java @@ -35,7 +35,7 @@ import java.util.Arrays; import com.fasterxml.jackson.databind.util.ByteBufferBackedInputStream; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.Block; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 3a2532ba993cf..0cef7475d99f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -88,7 +88,7 @@ import com.fasterxml.jackson.databind.ObjectWriter; import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java index 96d88345e6b9d..a18b0b1a58694 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/MappableBlockLoader.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java index ec024cda9ab02..eee4fafac847e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/NativePmemMappableBlockLoader.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemVolumeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemVolumeManager.java index 23e5473a2147f..2ce1cc7deb0a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemVolumeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/PmemVolumeManager.java @@ -35,6 +35,7 @@ import java.io.RandomAccessFile; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -333,7 +334,7 @@ static File verifyIfValidPmemVolume(File pmemDir) String uuidStr = UUID.randomUUID().toString(); String testFilePath = realPmemDir.getPath() + "/.verify.pmem." + uuidStr; - byte[] contents = uuidStr.getBytes("UTF-8"); + byte[] contents = uuidStr.getBytes(StandardCharsets.UTF_8); RandomAccessFile testFile = null; MappedByteBuffer out = null; try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java index f7b12ff179941..1103468d3c8b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/RamDiskReplicaTracker.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java index 02ec25c13c874..2b64b51a7ec2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ExceptionHandler.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.server.datanode.web.webhdfs; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import com.sun.jersey.api.ParamException; import com.sun.jersey.api.container.ContainerException; import io.netty.buffer.Unpooled; @@ -32,6 +31,7 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.nio.charset.StandardCharsets; import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_LENGTH; import static io.netty.handler.codec.http.HttpHeaders.Names.CONTENT_TYPE; @@ -83,7 +83,7 @@ static DefaultFullHttpResponse exceptionCaught(Throwable cause) { s = INTERNAL_SERVER_ERROR; } - final byte[] js = JsonUtil.toJsonString(e).getBytes(Charsets.UTF_8); + final byte[] js = JsonUtil.toJsonString(e).getBytes(StandardCharsets.UTF_8); DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HTTP_1_1, s, Unpooled.wrappedBuffer(js)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java index d0c71f6be1606..3a8a4f5f41420 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/WebHdfsHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode.web.webhdfs; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java index f478dff4af9e9..aa8de482a3c7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.codec.digest.DigestUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java index c1aaa319e2cbb..c73f0f8f246a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.commons.cli.CommandLine; @@ -53,6 +53,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.Preconditions; + import java.io.Closeable; import java.io.IOException; import java.io.PrintStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java index 5b5dc2ad5b437..ec7cc9168cfcf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.commons.codec.digest.DigestUtils; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java index e36628edf0eb2..ec180538a9843 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/HelpCommand.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java index e9f9f33e71535..3732de86aa0fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.diskbalancer.command; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Throwables; import org.apache.commons.cli.CommandLine; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java index 520e80f3974c7..6a89e248f5967 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java index 02188618f0243..ef27ea80de751 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java @@ -33,9 +33,11 @@ import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet; import org.apache.hadoop.hdfs.tools.DiskBalancerCLI; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.Preconditions; + + /** * Executes the report command. * diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java index b7bb3f02dce9d..f9bcd5e018065 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.connectors; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java index 268c055a354ac..1cc82253f9885 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/JsonNodeConnector.java @@ -19,7 +19,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java index 0e405ff7bd3c5..c801f36ea5205 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.io.FileUtils; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java index fce858aaca01b..fe9edf6678ac5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerDataNode.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.datamodel; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.util.HashMap; import java.util.Map; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java index bcce012ff84b9..f59f4fc9e3fe3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java index 0ed56afb39a5a..59b908671791a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.planner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hdfs.server.diskbalancer.datamodel diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java index 72df5abe6bcaa..39a7c57bca2cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/NodePlan.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectReader; import com.fasterxml.jackson.databind.ObjectWriter; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.io.IOException; import java.util.LinkedList; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java index 2dfe50742dac2..9c67d3da43ab9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/AclFeature.java @@ -24,7 +24,6 @@ import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.hdfs.util.ReferenceCountMap.ReferenceCounter; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** @@ -56,8 +55,9 @@ int getEntriesSize() { * @throws IndexOutOfBoundsException if pos out of bound */ int getEntryAt(int pos) { - Preconditions.checkPositionIndex(pos, entries.length, - "Invalid position for AclEntry"); + if (pos < 0 || pos > entries.length) { + throw new IndexOutOfBoundsException("Invalid position for AclEntry"); + } return entries[pos]; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java index c5ef0c62ddd6e..504df6068ef3c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java @@ -28,10 +28,10 @@ import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.Storage.StorageState; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.Preconditions; /** * Extension of FSImage for the backup node. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java index dda4789b74e49..8016a1ec11a30 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CachePool.java @@ -32,7 +32,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.IntrusiveCollection; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * A CachePool describes a set of cache resources being managed by the NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java index 7a5963a6c57cd..2819e8dd35496 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.permission.FsAction; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java index d17fd06bc882c..a77075fa4f2e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java @@ -21,7 +21,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java index bc026d1721f6e..1d4686a73ebd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java @@ -46,7 +46,7 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Throwables; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java index ccc233efcbafe..9577a528923ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditsDoubleBuffer.java @@ -32,7 +32,7 @@ import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * A double-buffer for edits. New edits are written into the first buffer diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java index c89f4a16eacf8..81b4cb9812ef1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java @@ -30,7 +30,7 @@ import java.util.TreeMap; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; @@ -55,6 +55,13 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java index 44e2f1e7ab33f..c7a102f005369 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingPolicyManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; @@ -541,4 +541,4 @@ private ErasureCodingPolicyInfo createPolicyInfo(ErasureCodingPolicy p, policyInfo.setState(s); return policyInfo; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java index 6ced588e96882..b17c459cc993a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAclOp.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; - import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntryScope; @@ -228,10 +226,12 @@ private static void unprotectedRemoveAcl(FSDirectory fsd, INodesInPath iip) int groupEntryIndex = Collections.binarySearch( featureEntries, groupEntryKey, AclTransformation.ACL_ENTRY_COMPARATOR); - Preconditions.checkPositionIndex(groupEntryIndex, featureEntries.size(), - "Invalid group entry index after binary-searching inode: " + - inode.getFullPathName() + "(" + inode.getId() + ") " - + "with featureEntries:" + featureEntries); + if (groupEntryIndex < 0 || groupEntryIndex > featureEntries.size()) { + throw new IndexOutOfBoundsException( + "Invalid group entry index after binary-searching inode: " + + inode.getFullPathName() + "(" + inode.getId() + ") " + + "with featureEntries:" + featureEntries); + } FsAction groupPerm = featureEntries.get(groupEntryIndex).getPermission(); FsPermission newPerm = new FsPermission(perm.getUserAction(), groupPerm, perm.getOtherAction(), perm.getStickyBit()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java index 7e90d4bafa8bb..ba00e8ae936a0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAppendOp.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion.Feature; import org.apache.hadoop.ipc.RetriableException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Helper class to perform append operation. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java index 04ae358c67afb..ea5ac38aa8659 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirConcatOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.permission.FsAction; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java index 1c5bf34bb4e4b..82892fb852902 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java @@ -52,8 +52,10 @@ import org.apache.hadoop.hdfs.server.namenode.ReencryptionUpdater.FileEdekInfo; import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Lists; +import org.apache.hadoop.util.Time; + +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.util.Time; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 5b137713b383f..c549eac3f00de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -34,6 +34,9 @@ import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.erasurecode.CodecRegistry; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.util.Preconditions; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java index da324fb46738a..862880d95b2d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirMkdirOp.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.fs.permission.FsCreateModes; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.ParentNotDirectoryException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java index ee0bf8a5fb165..f7b10c478ab60 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.InvalidPathException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index dfacc491eae53..6c881bafec30f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.DirectoryListingStartAfterNotFoundException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 0d9c6aeeb9c45..7b7f4a0f9c070 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.hdfs.AddBlockFlag; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index 357c649994069..f5cb0340d4298 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; @@ -35,6 +35,10 @@ import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.Preconditions; import java.io.FileNotFoundException; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index e2d688d7e8667..9e6b54005fa85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.HadoopIllegalArgumentException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index ed64b4d89ee35..29b11e3232b94 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -110,8 +110,11 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.Preconditions; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java index 14ee6acc7efb5..61b1af70fc666 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogAsync.java @@ -36,7 +36,7 @@ import org.apache.hadoop.ipc.Server; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; class FSEditLogAsync extends FSEditLog implements Runnable { static final Logger LOG = LoggerFactory.getLogger(FSEditLog.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index 26552f62ffd00..5158058a05605 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -118,7 +118,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import static org.apache.hadoop.log.LogThrottlingHelper.LogAction; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index bb1440a07f3b3..75dc2914dd8f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -139,7 +139,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.util.Lists; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index f871bc915dc19..7b67d7b225cf0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -76,12 +76,12 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; /** * FSImage handles checkpointing and logging of the namespace edits. - * + * */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -102,7 +102,7 @@ public class FSImage implements Closeable { private AtomicBoolean exitAfterSave = new AtomicBoolean(false); protected NNStorage storage; - + /** * The last transaction ID that was either loaded from an image * or loaded by loading edits files. @@ -174,7 +174,7 @@ protected FSImage(Configuration conf, archivalManager = new NNStorageRetentionManager(conf, storage, editLog); FSImageFormatProtobuf.initParallelLoad(conf); } - + void format(FSNamesystem fsn, String clusterId, boolean force) throws IOException { long fileCount = fsn.getFilesTotal(); @@ -185,18 +185,18 @@ void format(FSNamesystem fsn, String clusterId, boolean force) NamespaceInfo ns = NNStorage.newNamespaceInfo(); LOG.info("Allocated new BlockPoolId: " + ns.getBlockPoolID()); ns.clusterID = clusterId; - + storage.format(ns); editLog.formatNonFileJournals(ns, force); saveFSImageInAllDirs(fsn, 0); } - + /** * Check whether the storage directories and non-file journals exist. * If running in interactive mode, will prompt the user for each * directory to allow them to format anyway. Otherwise, returns * false, unless 'force' is specified. - * + * * @param force if true, format regardless of whether dirs exist * @param interactive prompt the user when a dir exists * @return true if formatting should proceed @@ -207,38 +207,38 @@ boolean confirmFormat(boolean force, boolean interactive) throws IOException { for (StorageDirectory sd : storage.dirIterable(null)) { confirms.add(sd); } - + confirms.addAll(editLog.getFormatConfirmables()); return Storage.confirmFormat(confirms, force, interactive); } - + /** * Analyze storage directories. - * Recover from previous transitions if required. + * Recover from previous transitions if required. * Perform fs state transition if necessary depending on the namespace info. - * Read storage info. - * + * Read storage info. + * * @throws IOException * @return true if the image needs to be saved or false otherwise */ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, MetaRecoveryContext recovery) throws IOException { - assert startOpt != StartupOption.FORMAT : + assert startOpt != StartupOption.FORMAT : "NameNode formatting should be performed before reading the image"; - + Collection imageDirs = storage.getImageDirectories(); Collection editsDirs = editLog.getEditURIs(); // none of the data dirs exist - if((imageDirs.size() == 0 || editsDirs.size() == 0) - && startOpt != StartupOption.IMPORT) + if((imageDirs.size() == 0 || editsDirs.size() == 0) + && startOpt != StartupOption.IMPORT) throw new IOException( "All specified directories are not accessible or do not exist."); - - // 1. For each data directory calculate its state and + + // 1. For each data directory calculate its state and // check whether all is consistent before transitioning. - Map dataDirStates = + Map dataDirStates = new HashMap(); boolean isFormatted = recoverStorageDirs(startOpt, storage, dataDirStates); @@ -247,10 +247,10 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, Joiner.on("\n ").withKeyValueSeparator(": ") .join(dataDirStates)); } - - if (!isFormatted && startOpt != StartupOption.ROLLBACK + + if (!isFormatted && startOpt != StartupOption.ROLLBACK && startOpt != StartupOption.IMPORT) { - throw new IOException("NameNode is not formatted."); + throw new IOException("NameNode is not formatted."); } @@ -271,7 +271,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, && layoutVersion < Storage.LAST_PRE_UPGRADE_LAYOUT_VERSION && layoutVersion != HdfsServerConstants.NAMENODE_LAYOUT_VERSION) { throw new IOException( - "\nFile system image contains an old layout version " + "\nFile system image contains an old layout version " + storage.getLayoutVersion() + ".\nAn upgrade to version " + HdfsServerConstants.NAMENODE_LAYOUT_VERSION + " is required.\n" + "Please restart NameNode with the \"" @@ -281,7 +281,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, + StartupOption.UPGRADE.getName() + "\" option to start" + " a new upgrade."); } - + storage.processStartupOptionsForUpgrade(startOpt, layoutVersion); // 2. Format unformatted dirs. @@ -290,7 +290,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, StorageState curState = dataDirStates.get(sd); switch(curState) { case NON_EXISTENT: - throw new IOException(StorageState.NON_EXISTENT + + throw new IOException(StorageState.NON_EXISTENT + " state cannot be here"); case NOT_FORMATTED: // Create a dir structure, but not the VERSION file. The presence of @@ -335,7 +335,7 @@ boolean recoverTransitionRead(StartupOption startOpt, FSNamesystem target, default: // just load the image } - + return loadFSImage(target, startOpt, recovery); } @@ -373,7 +373,7 @@ public static boolean recoverStorageDirs(StartupOption startOpt, // This loop needs to be over all storage dirs, even shared dirs, to make // sure that we properly examine their state, but we make sure we don't // mutate the shared dir below in the actual loop. - for (Iterator it = + for (Iterator it = storage.dirIterator(); it.hasNext();) { StorageDirectory sd = it.next(); StorageState curState; @@ -398,7 +398,7 @@ public static boolean recoverStorageDirs(StartupOption startOpt, default: // recovery is possible sd.doRecover(curState); } - if (curState != StorageState.NOT_FORMATTED + if (curState != StorageState.NOT_FORMATTED && startOpt != StartupOption.ROLLBACK) { // read and verify consistency with other directories storage.readProperties(sd, startOpt); @@ -406,7 +406,7 @@ public static boolean recoverStorageDirs(StartupOption startOpt, } if (startOpt == StartupOption.IMPORT && isFormatted) // import of a checkpoint is allowed only into empty image directories - throw new IOException("Cannot import image from a checkpoint. " + throw new IOException("Cannot import image from a checkpoint. " + " NameNode already contains an image in " + sd.getRoot()); } catch (IOException ioe) { sd.unlock(); @@ -419,7 +419,7 @@ public static boolean recoverStorageDirs(StartupOption startOpt, /** Check if upgrade is in progress. */ public static void checkUpgrade(NNStorage storage) throws IOException { - // Upgrade or rolling upgrade is allowed only if there are + // Upgrade or rolling upgrade is allowed only if there are // no previous fs states in any of the local directories for (Iterator it = storage.dirIterator(false); it.hasNext();) { StorageDirectory sd = it.next(); @@ -458,12 +458,12 @@ void doUpgrade(FSNamesystem target) throws IOException { // Do upgrade for each directory this.loadFSImage(target, StartupOption.UPGRADE, null); target.checkRollingUpgrade("upgrade namenode"); - + long oldCTime = storage.getCTime(); storage.cTime = now(); // generate new cTime for the state int oldLV = storage.getLayoutVersion(); storage.layoutVersion = HdfsServerConstants.NAMENODE_LAYOUT_VERSION; - + List errorSDs = Collections.synchronizedList(new ArrayList()); assert !editLog.isSegmentOpen() : "Edits log must not be open."; @@ -506,7 +506,7 @@ void doUpgrade(FSNamesystem target) throws IOException { } } storage.reportErrorsOnDirectories(errorSDs); - + isUpgradeFinalized = false; if (!storage.getRemovedStorageDirs().isEmpty()) { // during upgrade, it's a fatal error to fail any storage directory @@ -517,7 +517,7 @@ void doUpgrade(FSNamesystem target) throws IOException { } void doRollback(FSNamesystem fsns) throws IOException { - // Rollback is allowed only if there is + // Rollback is allowed only if there is // a previous fs states in at least one of the storage directories. // Directories that don't have previous state do not rollback boolean canRollback = false; @@ -533,7 +533,7 @@ void doRollback(FSNamesystem fsns) throws IOException { LOG.info("Can perform rollback for " + sd); canRollback = true; } - + if (fsns.isHaEnabled()) { // If HA is enabled, check if the shared log can be rolled back as well. editLog.initJournalsForWrite(); @@ -544,11 +544,11 @@ void doRollback(FSNamesystem fsns) throws IOException { canRollback = true; } } - + if (!canRollback) throw new IOException("Cannot rollback. None of the storage " + "directories contain previous fs state."); - + // Now that we know all directories are going to be consistent // Do rollback for each directory containing previous state for (Iterator it = storage.dirIterator(false); it.hasNext();) { @@ -562,7 +562,7 @@ void doRollback(FSNamesystem fsns) throws IOException { // If HA is enabled, try to roll back the shared log as well. editLog.doRollback(); } - + isUpgradeFinalized = true; } finally { prevState.close(); @@ -584,14 +584,14 @@ void doImportCheckpoint(FSNamesystem target) throws IOException { throw new IOException("Cannot import image from a checkpoint. " + "\"dfs.namenode.checkpoint.dir\" is not set." ); } - + if (checkpointEditsDirs == null || checkpointEditsDirs.isEmpty()) { throw new IOException("Cannot import image from a checkpoint. " + "\"dfs.namenode.checkpoint.dir\" is not set." ); } FSImage realImage = target.getFSImage(); - FSImage ckptImage = new FSImage(conf, + FSImage ckptImage = new FSImage(conf, checkpointDirs, checkpointEditsDirs); // load from the checkpoint dirs try { @@ -610,10 +610,10 @@ void doImportCheckpoint(FSNamesystem target) throws IOException { saveNamespace(target); updateStorageVersion(); } - + void finalizeUpgrade(boolean finalizeEditLog) throws IOException { LOG.info("Finalizing upgrade for local dirs. " + - (storage.getLayoutVersion() == 0 ? "" : + (storage.getLayoutVersion() == 0 ? "" : "\n cur LV = " + storage.getLayoutVersion() + "; cur CTime = " + storage.getCTime())); for (Iterator it = storage.dirIterator(false); it.hasNext();) { @@ -642,7 +642,7 @@ void openEditLogForWrite(int layoutVersion) throws IOException { editLog.openForWrite(layoutVersion); storage.writeTransactionIdFileToStorage(editLog.getCurSegmentTxId()); } - + /** * Toss the current image and namesystem, reloading from the specified * file. @@ -656,12 +656,12 @@ void reloadFromImageFile(File file, FSNamesystem target) throws IOException { /** * Choose latest image from one of the directories, * load it and merge with the edits. - * - * Saving and loading fsimage should never trigger symlink resolution. - * The paths that are persisted do not have *intermediate* symlinks - * because intermediate symlinks are resolved at the time files, - * directories, and symlinks are created. All paths accessed while - * loading or saving fsimage should therefore only see symlinks as + * + * Saving and loading fsimage should never trigger symlink resolution. + * The paths that are persisted do not have *intermediate* symlinks + * because intermediate symlinks are resolved at the time files, + * directories, and symlinks are created. All paths accessed while + * loading or saving fsimage should therefore only see symlinks as * the final path component, and the functions called below do not * resolve symlinks that are the final path component. * @@ -706,7 +706,7 @@ LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) { // In the meanwhile, for HA upgrade, we will still write editlog thus need // this toAtLeastTxId to be set to the max-seen txid // For rollback in rolling upgrade, we need to set the toAtLeastTxId to - // the txid right before the upgrade marker. + // the txid right before the upgrade marker. long toAtLeastTxId = editLog.isOpenForWrite() ? inspector .getMaxSeenTxId() : 0; if (rollingRollback) { @@ -726,14 +726,14 @@ LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) { for (EditLogInputStream elis : editStreams) { elis.setMaxOpSize(maxOpSize); } - + for (EditLogInputStream l : editStreams) { LOG.debug("Planning to load edit log stream: " + l); } if (!editStreams.iterator().hasNext()) { LOG.info("No edit log streams selected."); } - + FSImageFile imageFile = null; for (int i = 0; i < imageFiles.size(); i++) { try { @@ -756,7 +756,7 @@ LayoutVersion.Feature.TXID_BASED_LAYOUT, getLayoutVersion())) { "above for more info."); } prog.endPhase(Phase.LOADING_FSIMAGE); - + if (!rollingRollback) { prog.beginPhase(Phase.LOADING_EDITS); long txnsAdvanced = loadEdits(editStreams, target, Long.MAX_VALUE, @@ -879,7 +879,7 @@ private boolean needsResaveBasedOnStaleCheckpoint( return (checkpointAge > checkpointPeriod * 1000) || (numEditsLoaded > checkpointTxnCount); } - + /** * Load the specified list of edit files into the image. */ @@ -893,12 +893,12 @@ public long loadEdits(Iterable editStreams, StartupOption startOpt, MetaRecoveryContext recovery) throws IOException { LOG.debug("About to load edits:\n " + Joiner.on("\n ").join(editStreams)); - + long prevLastAppliedTxId = lastAppliedTxId; long remainingReadTxns = maxTxnsToRead; - try { + try { FSEditLogLoader loader = new FSEditLogLoader(target, lastAppliedTxId); - + // Load latest edits for (EditLogInputStream editIn : editStreams) { LogAction logAction = loadEditLogHelper.record(); @@ -949,7 +949,7 @@ private void loadFSImage(File imageFile, FSNamesystem target, loadFSImage(imageFile, expectedMD5, target, recovery, requireSameLayoutVersion); } - + /** * Load in the filesystem image from file. It's a big list of * filenames and blocks. @@ -988,7 +988,7 @@ void saveFSImage(SaveNamespaceContext context, StorageDirectory sd, long txid = context.getTxId(); File newFile = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid); File dstFile = NNStorage.getStorageFile(sd, dstType, txid); - + FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context, conf); FSImageCompression compression = FSImageCompression.createCompression(conf); @@ -1078,14 +1078,14 @@ public void run() { } } } - + @Override public String toString() { return "FSImageSaver for " + sd.getRoot() + " of type " + sd.getStorageDirType(); } } - + private void waitForThreads(List threads) { for (Thread thread : threads) { while (thread.isAlive()) { @@ -1094,11 +1094,11 @@ private void waitForThreads(List threads) { } catch (InterruptedException iex) { LOG.error("Caught interrupted exception while waiting for thread " + thread.getName() + " to finish. Retrying join"); - } + } } } } - + /** * Update version of all storage directories. */ @@ -1137,7 +1137,7 @@ public synchronized boolean saveNamespace(long timeWindow, long txGap, public void saveNamespace(FSNamesystem source) throws IOException { saveNamespace(0, 0, source); } - + /** * Save the contents of the FS image to a new image file in each of the * current storage directories. @@ -1149,7 +1149,7 @@ public synchronized void saveNamespace(FSNamesystem source, NameNodeFile nnf, storage.attemptRestoreRemovedStorage(); boolean editLogWasOpen = editLog.isSegmentOpen(); - + if (editLogWasOpen) { editLog.endCurrentLogSegment(true); } @@ -1223,7 +1223,7 @@ private synchronized void saveFSImageInAllDirs(FSNamesystem source, } SaveNamespaceContext ctx = new SaveNamespaceContext( source, txid, canceler); - + try { List saveThreads = new ArrayList(); // save images into current @@ -1238,7 +1238,7 @@ private synchronized void saveFSImageInAllDirs(FSNamesystem source, waitForThreads(saveThreads); saveThreads.clear(); storage.reportErrorsOnDirectories(ctx.getErrorSDs()); - + if (storage.getNumStorageDirs(NameNodeDirType.IMAGE) == 0) { throw new IOException( "Failed to save in any storage directories while saving namespace."); @@ -1248,9 +1248,9 @@ private synchronized void saveFSImageInAllDirs(FSNamesystem source, ctx.checkCancelled(); // throws assert false : "should have thrown above!"; } - + renameCheckpoint(txid, NameNodeFile.IMAGE_NEW, nnf, false); - + // Since we now have a new checkpoint, we can clean up some // old edit logs and checkpoints. // Do not purge anything if we just wrote a corrupted FsImage. @@ -1337,7 +1337,7 @@ private void deleteCancelledCheckpoint(long txid) throws IOException { File ckpt = NNStorage.getStorageFile(sd, NameNodeFile.IMAGE_NEW, txid); if (ckpt.exists() && !ckpt.delete()) { LOG.warn("Unable to delete cancelled checkpoint in " + sd); - al.add(sd); + al.add(sd); } } storage.reportErrorsOnDirectories(al); @@ -1349,12 +1349,12 @@ private void renameImageFileInDir(StorageDirectory sd, NameNodeFile fromNnf, final File toFile = NNStorage.getStorageFile(sd, toNnf, txid); // renameTo fails on Windows if the destination file already exists. if(LOG.isDebugEnabled()) { - LOG.debug("renaming " + fromFile.getAbsolutePath() + LOG.debug("renaming " + fromFile.getAbsolutePath() + " to " + toFile.getAbsolutePath()); } if (!fromFile.renameTo(toFile)) { if (!toFile.delete() || !fromFile.renameTo(toFile)) { - throw new IOException("renaming " + fromFile.getAbsolutePath() + " to " + + throw new IOException("renaming " + fromFile.getAbsolutePath() + " to " + toFile.getAbsolutePath() + " FAILED"); } } @@ -1377,16 +1377,16 @@ CheckpointSignature rollEditLog(int layoutVersion) throws IOException { /** * Start checkpoint. *

- * If backup storage contains image that is newer than or incompatible with + * If backup storage contains image that is newer than or incompatible with * what the active name-node has, then the backup node should shutdown.
- * If the backup image is older than the active one then it should + * If the backup image is older than the active one then it should * be discarded and downloaded from the active node.
* If the images are the same then the backup image will be used as current. - * + * * @param bnReg the backup node registration. * @param nnReg this (active) name-node registration. * @return {@link NamenodeCommand} if backup node should shutdown or - * {@link CheckpointCommand} prescribing what backup node should + * {@link CheckpointCommand} prescribing what backup node should * do with its image. * @throws IOException */ @@ -1429,7 +1429,7 @@ else if(bnReg.getLayoutVersion() < storage.getLayoutVersion() * End checkpoint. *

* Validate the current storage info with the given signature. - * + * * @param sig to validate the current storage info against * @throws IOException if the checkpoint fields are inconsistent */ @@ -1448,7 +1448,7 @@ public synchronized void saveDigestAndRenameCheckpointImage(NameNodeFile nnf, long txid, MD5Hash digest) throws IOException { // Write and rename MD5 file List badSds = Lists.newArrayList(); - + for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) { File imageFile = NNStorage.getImageFile(sd, nnf, txid); try { @@ -1458,9 +1458,9 @@ public synchronized void saveDigestAndRenameCheckpointImage(NameNodeFile nnf, } } storage.reportErrorsOnDirectories(badSds); - + CheckpointFaultInjector.getInstance().afterMD5Rename(); - + // Rename image from tmp file renameCheckpoint(txid, NameNodeFile.IMAGE_NEW, nnf, false); // So long as this is the newest image available, @@ -1488,7 +1488,7 @@ synchronized public void close() throws IOException { * * @param conf the Configuration * @param defaultValue a default value for the attribute, if null - * @return a Collection of URIs representing the values in + * @return a Collection of URIs representing the values in * dfs.namenode.checkpoint.dir configuration property */ static Collection getCheckpointDirs(Configuration conf, @@ -1518,15 +1518,15 @@ public NNStorage getStorage() { public int getLayoutVersion() { return storage.getLayoutVersion(); } - + public int getNamespaceID() { return storage.getNamespaceID(); } - + public String getClusterID() { return storage.getClusterID(); } - + public String getBlockPoolID() { return storage.getBlockPoolID(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 2ce2c645e1511..7e679296e25c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -76,7 +76,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Contains inner classes for reading or writing the on-disk format for diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java index 26df995e552e1..1f21871ac7b02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatPBINode.java @@ -72,7 +72,7 @@ import org.apache.hadoop.hdfs.util.EnumCounters; import org.apache.hadoop.hdfs.util.ReadOnlyList; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.protobuf.ByteString; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 3d75cebf729d3..404f2c73ad3a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -53,7 +53,7 @@ import org.xml.sax.ContentHandler; import org.xml.sax.SAXException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Static utility functions for serializing various pieces of data in the correct diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index fd5661855f26d..cf2162ac60f6d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -98,6 +98,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SNAPSHOT_DIFF_LISTING_LIMIT_DEFAULT; import static org.apache.hadoop.hdfs.DFSUtil.isParentEntry; +import java.lang.reflect.Constructor; +import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.text.CaseUtils; @@ -176,8 +178,8 @@ import javax.management.ObjectName; import javax.management.StandardMBean; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; @@ -336,14 +338,12 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.VersionInfo; -import org.apache.log4j.Logger; import org.apache.log4j.Appender; import org.apache.log4j.AsyncAppender; import org.eclipse.jetty.util.ajax.JSON; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; @@ -477,7 +477,7 @@ private boolean isClientPortInfoAbsent(CallerContext ctx){ * perm=<permissions (optional)> * */ - public static final Log auditLog = LogFactory.getLog( + public static final Logger auditLog = LoggerFactory.getLogger( FSNamesystem.class.getName() + ".audit"); private final int maxCorruptFileBlocksReturn; @@ -842,11 +842,7 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { throws IOException { provider = DFSUtil.createKeyProviderCryptoExtension(conf); LOG.info("KeyProvider: " + provider); - if (conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_ASYNC_KEY, - DFS_NAMENODE_AUDIT_LOG_ASYNC_DEFAULT)) { - LOG.info("Enabling async auditlog"); - enableAsyncAuditLog(conf); - } + checkForAsyncLogEnabledByOldConfigs(conf); auditLogWithRemotePort = conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_WITH_REMOTE_PORT_KEY, DFS_NAMENODE_AUDIT_LOG_WITH_REMOTE_PORT_DEFAULT); @@ -1058,6 +1054,26 @@ static FSNamesystem loadFromDisk(Configuration conf) throws IOException { } } + private T createLock(Class theClass, Configuration conf, + MutableRatesWithAggregation mutableRatesMetrics) { + try { + Constructor meth = theClass.getDeclaredConstructor( + Configuration.class, MutableRatesWithAggregation.class); + meth.setAccessible(true); + return meth.newInstance(conf, mutableRatesMetrics); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static void checkForAsyncLogEnabledByOldConfigs(Configuration conf) { + // dfs.namenode.audit.log.async is no longer in use. Use log4j properties instead. + if (conf.getBoolean("dfs.namenode.audit.log.async", false)) { + LOG.warn("Use log4j properties to enable async log for audit logs. " + + "dfs.namenode.audit.log.async is no longer in use."); + } + } + @VisibleForTesting public List getAuditLoggers() { return auditLoggers; @@ -1915,7 +1931,7 @@ void metaSave(String filename) throws IOException { File file = new File(System.getProperty("hadoop.log.dir"), filename); PrintWriter out = new PrintWriter(new BufferedWriter( new OutputStreamWriter(Files.newOutputStream(file.toPath()), - Charsets.UTF_8))); + StandardCharsets.UTF_8))); metaSave(out); out.flush(); out.close(); @@ -4110,7 +4126,7 @@ DirectoryListing getListing(String src, byte[] startAfter, public byte[] getSrcPathsHash(String[] srcs) { synchronized (digest) { for (String src : srcs) { - digest.update(src.getBytes(Charsets.UTF_8)); + digest.update(src.getBytes(StandardCharsets.UTF_8)); } byte[] result = digest.digest(); digest.reset(); @@ -8580,34 +8596,6 @@ public void logAuditMessage(String message) { } } - private static void enableAsyncAuditLog(Configuration conf) { - if (!(auditLog instanceof Log4JLogger)) { - LOG.warn("Log4j is required to enable async auditlog"); - return; - } - Logger logger = ((Log4JLogger)auditLog).getLogger(); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - // failsafe against trying to async it more than once - if (!appenders.isEmpty() && !(appenders.get(0) instanceof AsyncAppender)) { - AsyncAppender asyncAppender = new AsyncAppender(); - asyncAppender.setBlocking(conf.getBoolean( - DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_KEY, - DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BLOCKING_DEFAULT - )); - asyncAppender.setBufferSize(conf.getInt( - DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_KEY, - DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_ASYNC_BUFFER_SIZE_DEFAULT - )); - // change logger to have an async appender containing all the - // previously configured appenders - for (Appender appender : appenders) { - logger.removeAppender(appender); - asyncAppender.addAppender(appender); - } - logger.addAppender(asyncAppender); - } - } /** * Return total number of Sync Operations on FSEditLog. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java index 5d136cb7504c4..5f61f924a7438 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.Stack; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.ipc.CallerContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java index a90dc27a54fa8..c1d26f40c0826 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java @@ -34,7 +34,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * FSTreeTraverser traverse directory recursively and process files diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 82fa2d3b6429d..ca448502e2620 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -47,7 +47,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 77425a13684e8..305ef2ba6dd5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 49638a2d27208..dc54c156c32ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.security.AccessControlException; import static org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java index 5e5c4b4b81fb7..05b78ccc6f53d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.util.EnumCounters; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * The attributes of an inode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index c484a2a2a173d..ac3323a82ad0c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -58,7 +58,7 @@ import static org.apache.hadoop.io.erasurecode.ErasureCodeConstants.REPLICATION_POLICY_ID; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** I-node for closed file. */ @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java index f35949fdcdbed..01709832e9a3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java @@ -26,7 +26,7 @@ import org.apache.hadoop.util.GSet; import org.apache.hadoop.util.LightWeightGSet; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Storing all the {@link INode}s and maintaining the mapping between INode ID diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index ce37f0afa2dbc..d2b8253a86aff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.security.AccessControlException; /** @@ -38,23 +38,23 @@ * This class and its subclasses are used to support multiple access paths. * A file/directory may have multiple access paths when it is stored in some * snapshots and it is renamed/moved to other locations. - * + * * For example, * (1) Suppose we have /abc/foo, say the inode of foo is inode(id=1000,name=foo) * (2) create snapshot s0 for /abc * (3) mv /abc/foo /xyz/bar, i.e. inode(id=1000,name=...) is renamed from "foo" * to "bar" and its parent becomes /xyz. - * + * * Then, /xyz/bar and /abc/.snapshot/s0/foo are two different access paths to * the same inode, inode(id=1000,name=bar). * * With references, we have the following * - /abc has a child ref(id=1001,name=foo). - * - /xyz has a child ref(id=1002) + * - /xyz has a child ref(id=1002) * - Both ref(id=1001,name=foo) and ref(id=1002) point to another reference, * ref(id=1003,count=2). * - Finally, ref(id=1003,count=2) points to inode(id=1000,name=bar). - * + * * Note 1: For a reference without name, e.g. ref(id=1002), it uses the name * of the referred inode. * Note 2: getParent() always returns the parent in the current state, e.g. @@ -81,7 +81,7 @@ private static int removeReference(INodeReference ref) { if (!(referred instanceof WithCount)) { return -1; } - + WithCount wc = (WithCount) referred; wc.removeReference(ref); return wc.getReferenceCount(); @@ -90,7 +90,7 @@ private static int removeReference(INodeReference ref) { /** * When destroying a reference node (WithName or DstReference), we call this * method to identify the snapshot which is the latest snapshot before the - * reference node's creation. + * reference node's creation. */ static int getPriorSnapshot(INodeReference ref) { WithCount wc = (WithCount) ref.getReferredINode(); @@ -114,9 +114,9 @@ static int getPriorSnapshot(INodeReference ref) { } return Snapshot.NO_SNAPSHOT_ID; } - + private INode referred; - + public INodeReference(INode parent, INode referred) { super(parent); this.referred = referred; @@ -130,7 +130,7 @@ public final INode getReferredINode() { public final boolean isReference() { return true; } - + @Override public final INodeReference asReference() { return this; @@ -140,27 +140,27 @@ public final INodeReference asReference() { public final boolean isFile() { return referred.isFile(); } - + @Override public final INodeFile asFile() { return referred.asFile(); } - + @Override public final boolean isDirectory() { return referred.isDirectory(); } - + @Override public final INodeDirectory asDirectory() { return referred.asDirectory(); } - + @Override public final boolean isSymlink() { return referred.isSymlink(); } - + @Override public final INodeSymlink asSymlink() { return referred.asSymlink(); @@ -180,32 +180,32 @@ public void setLocalName(byte[] name) { public final long getId() { return referred.getId(); } - + @Override public final PermissionStatus getPermissionStatus(int snapshotId) { return referred.getPermissionStatus(snapshotId); } - + @Override public final String getUserName(int snapshotId) { return referred.getUserName(snapshotId); } - + @Override final void setUser(String user) { referred.setUser(user); } - + @Override public final String getGroupName(int snapshotId) { return referred.getGroupName(snapshotId); } - + @Override final void setGroup(String group) { referred.setGroup(group); } - + @Override public final FsPermission getFsPermission(int snapshotId) { return referred.getFsPermission(snapshotId); @@ -225,17 +225,17 @@ final void addAclFeature(AclFeature aclFeature) { final void removeAclFeature() { referred.removeAclFeature(); } - + @Override final XAttrFeature getXAttrFeature(int snapshotId) { return referred.getXAttrFeature(snapshotId); } - + @Override final void addXAttrFeature(XAttrFeature xAttrFeature) { referred.addXAttrFeature(xAttrFeature); } - + @Override final void removeXAttrFeature() { referred.removeXAttrFeature(); @@ -245,7 +245,7 @@ final void removeXAttrFeature() { public final short getFsPermissionShort() { return referred.getFsPermissionShort(); } - + @Override void setPermission(FsPermission permission) { referred.setPermission(permission); @@ -260,22 +260,22 @@ public long getPermissionLong() { public final long getModificationTime(int snapshotId) { return referred.getModificationTime(snapshotId); } - + @Override public final INode updateModificationTime(long mtime, int latestSnapshotId) { return referred.updateModificationTime(mtime, latestSnapshotId); } - + @Override public final void setModificationTime(long modificationTime) { referred.setModificationTime(modificationTime); } - + @Override public final long getAccessTime(int snapshotId) { return referred.getAccessTime(snapshotId); } - + @Override public final void setAccessTime(long accessTime) { referred.setAccessTime(accessTime); @@ -349,7 +349,7 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, out.print(", count=" + ((WithCount)this).getReferenceCount()); } out.println(); - + final StringBuilder b = new StringBuilder(); for(int i = 0; i < prefix.length(); i++) { b.append(' '); @@ -357,11 +357,11 @@ public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, b.append("->"); getReferredINode().dumpTreeRecursively(out, b, snapshot); } - + public int getDstSnapshotId() { return Snapshot.CURRENT_STATE_ID; } - + /** An anonymous reference with reference count. */ public static class WithCount extends INodeReference { @@ -378,13 +378,13 @@ public int compare(WithName left, WithName right) { return left.lastSnapshotId - right.lastSnapshotId; } }; - + public WithCount(INodeReference parent, INode referred) { super(parent, referred); Preconditions.checkArgument(!referred.isReference()); referred.setParentReference(this); } - + public int getReferenceCount() { int count = withNameList.size(); if (getParentReference() != null) { @@ -421,10 +421,10 @@ public void removeReference(INodeReference ref) { /** Return the last WithName reference if there is any, null otherwise. */ public WithName getLastWithName() { - return withNameList.size() > 0 ? + return withNameList.size() > 0 ? withNameList.get(withNameList.size() - 1) : null; } - + WithName getPriorWithName(WithName post) { int i = Collections.binarySearch(withNameList, post, WITHNAME_COMPARATOR); if (i > 0) { @@ -444,7 +444,7 @@ public INodeReference getParentRef(int snapshotId) { int end = withNameList.size() - 1; while (start < end) { int mid = start + (end - start) / 2; - int sid = withNameList.get(mid).lastSnapshotId; + int sid = withNameList.get(mid).lastSnapshotId; if (sid == snapshotId) { return withNameList.get(mid); } else if (sid < snapshotId) { @@ -461,20 +461,20 @@ public INodeReference getParentRef(int snapshotId) { } } } - + /** A reference with a fixed name. */ public static class WithName extends INodeReference { private final byte[] name; /** - * The id of the last snapshot in the src tree when this WithName node was - * generated. When calculating the quota usage of the referred node, only - * the files/dirs existing when this snapshot was taken will be counted for + * The id of the last snapshot in the src tree when this WithName node was + * generated. When calculating the quota usage of the referred node, only + * the files/dirs existing when this snapshot was taken will be counted for * this WithName node and propagated along its ancestor path. */ private final int lastSnapshotId; - + public WithName(INodeDirectory parent, WithCount referred, byte[] name, int lastSnapshotId) { super(parent, referred); @@ -493,11 +493,11 @@ public final void setLocalName(byte[] name) { throw new UnsupportedOperationException("Cannot set name: " + getClass() + " is immutable."); } - + public int getLastSnapshotId() { return lastSnapshotId; } - + @Override public final ContentSummaryComputationContext computeContentSummary( int snapshotId, ContentSummaryComputationContext summary) @@ -523,14 +523,14 @@ public final QuotaCounts computeQuotaUsage(BlockStoragePolicySuite bsps, final INode referred = this.getReferredINode().asReference() .getReferredINode(); // We will continue the quota usage computation using the same snapshot id - // as time line (if the given snapshot id is valid). Also, we cannot use - // cache for the referred node since its cached quota may have already + // as time line (if the given snapshot id is valid). Also, we cannot use + // cache for the referred node since its cached quota may have already // been updated by changes in the current tree. - int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ? + int id = lastSnapshotId != Snapshot.CURRENT_STATE_ID ? lastSnapshotId : this.lastSnapshotId; return referred.computeQuotaUsage(bsps, blockStoragePolicyId, false, id); } - + @Override public void cleanSubtree(ReclaimContext reclaimContext, final int snapshot, int prior) { @@ -542,7 +542,7 @@ public void cleanSubtree(ReclaimContext reclaimContext, final int snapshot, if (prior == Snapshot.NO_SNAPSHOT_ID) { prior = getPriorSnapshot(this); } - + if (prior != Snapshot.NO_SNAPSHOT_ID && Snapshot.ID_INTEGER_COMPARATOR.compare(snapshot, prior) <= 0) { return; @@ -558,16 +558,16 @@ public void cleanSubtree(ReclaimContext reclaimContext, final int snapshot, // we need to update the quota usage along the parent path from ref reclaimContext.quotaDelta().addUpdatePath(ref, current); } - + if (snapshot < lastSnapshotId) { // for a WithName node, when we compute its quota usage, we only count // in all the nodes existing at the time of the corresponding rename op. - // Thus if we are deleting a snapshot before/at the snapshot associated + // Thus if we are deleting a snapshot before/at the snapshot associated // with lastSnapshotId, we do not need to update the quota upwards. reclaimContext.quotaDelta().setCounts(old); } } - + @Override public void destroyAndCollectBlocks(ReclaimContext reclaimContext) { int snapshot = getSelfSnapshot(); @@ -580,9 +580,9 @@ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) { if (snapshot != Snapshot.NO_SNAPSHOT_ID) { if (prior != Snapshot.NO_SNAPSHOT_ID && snapshot <= prior) { - // the snapshot to be deleted has been deleted while traversing - // the src tree of the previous rename operation. This usually - // happens when rename's src and dst are under the same + // the snapshot to be deleted has been deleted while traversing + // the src tree of the previous rename operation. This usually + // happens when rename's src and dst are under the same // snapshottable directory. E.g., the following operation sequence: // 1. create snapshot s1 on /test // 2. rename /test/foo/bar to /test/foo2/bar @@ -602,7 +602,7 @@ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) { } } } - + private int getSelfSnapshot() { INode referred = getReferredINode().asReference().getReferredINode(); int snapshot = Snapshot.NO_SNAPSHOT_ID; @@ -618,7 +618,7 @@ private int getSelfSnapshot() { return snapshot; } } - + public static class DstReference extends INodeReference { /** * Record the latest snapshot of the dst subtree before the rename. For @@ -626,24 +626,24 @@ public static class DstReference extends INodeReference { * snapshot is after this dstSnapshot, changes will be recorded to the * latest snapshot. Otherwise changes will be recorded to the snapshot * belonging to the src of the rename. - * + * * {@link Snapshot#NO_SNAPSHOT_ID} means no dstSnapshot (e.g., src of the * first-time rename). */ private final int dstSnapshotId; - + @Override public final int getDstSnapshotId() { return dstSnapshotId; } - + public DstReference(INodeDirectory parent, WithCount referred, final int dstSnapshotId) { super(parent, referred); this.dstSnapshotId = dstSnapshotId; referred.addReference(this); } - + @Override public void cleanSubtree(ReclaimContext reclaimContext, int snapshot, int prior) { @@ -651,7 +651,7 @@ public void cleanSubtree(ReclaimContext reclaimContext, int snapshot, && prior == Snapshot.NO_SNAPSHOT_ID) { destroyAndCollectBlocks(reclaimContext); } else { - // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to + // if prior is NO_SNAPSHOT_ID, we need to check snapshot belonging to // the previous WithName instance if (prior == Snapshot.NO_SNAPSHOT_ID) { prior = getPriorSnapshot(this); @@ -667,15 +667,15 @@ public void cleanSubtree(ReclaimContext reclaimContext, int snapshot, getReferredINode().cleanSubtree(reclaimContext, snapshot, prior); } } - + /** * {@inheritDoc} *
- * To destroy a DstReference node, we first remove its link with the + * To destroy a DstReference node, we first remove its link with the * referred node. If the reference number of the referred node is <= 0, * we destroy the subtree of the referred node. Otherwise, we clean the - * referred node's subtree and delete everything created after the last - * rename operation, i.e., everything outside of the scope of the prior + * referred node's subtree and delete everything created after the last + * rename operation, i.e., everything outside of the scope of the prior * WithName nodes. * @param reclaimContext */ @@ -691,15 +691,15 @@ public void destroyAndCollectBlocks(ReclaimContext reclaimContext) { if (removeReference(this) <= 0) { getReferredINode().destroyAndCollectBlocks(newCtx); } else { - // we will clean everything, including files, directories, and + // we will clean everything, including files, directories, and // snapshots, that were created after this prior snapshot int prior = getPriorSnapshot(this); - // prior must be non-null, otherwise we do not have any previous + // prior must be non-null, otherwise we do not have any previous // WithName nodes, and the reference number will be 0. Preconditions.checkState(prior != Snapshot.NO_SNAPSHOT_ID); // identify the snapshot created after prior int snapshot = getSelfSnapshot(prior); - + INode referred = getReferredINode().asReference().getReferredINode(); if (referred.isFile()) { // if referred is a file, it must be a file with snapshot since we did diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java index cd3f842d2641b..b5e7707af09b9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeWithAdditionalFields.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.util.LongBitFormat; import org.apache.hadoop.util.LightWeightGSet.LinkedElement; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * {@link INode} with additional fields including id, name, permission, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index c2cdd48d4952a..832d84a50f28f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.CURRENT_STATE_ID; import static org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.ID_INTEGER_COMPARATOR; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java index f85f434e799e5..442c1aba95b1c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageServlet.java @@ -65,7 +65,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * This class is used in Namesystem's jetty to retrieve/upload a file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index f88e4d28d32c7..795ac037c1cd0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -31,6 +31,7 @@ import java.util.Map; import java.util.PriorityQueue; import java.util.SortedSet; +import java.util.TreeSet; import java.util.concurrent.CopyOnWriteArrayList; import org.slf4j.Logger; @@ -42,9 +43,10 @@ import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.util.Lists; -import org.apache.hadoop.util.Sets; + +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.Preconditions; /** * Manages a collection of Journals. None of the methods are synchronized, it is @@ -676,7 +678,7 @@ public synchronized RemoteEditLogManifest getEditLogManifest(long fromTxId) { // storage directory with ancient logs. Clear out any logs we've // accumulated so far, and then skip to the next segment of logs // after the gap. - SortedSet startTxIds = Sets.newTreeSet(logsByStartTxId.keySet()); + SortedSet startTxIds = new TreeSet<>(logsByStartTxId.keySet()); startTxIds = startTxIds.tailSet(curStartTxId); if (startTxIds.isEmpty()) { break; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java index 6c176bdfd58cc..de0785fbaaad8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java @@ -51,8 +51,12 @@ import org.apache.hadoop.util.Daemon; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; + +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.Preconditions; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java index a9f16eb893453..b28918f176790 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java @@ -58,7 +58,7 @@ import org.eclipse.jetty.util.ajax.JSON; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java index 596db1a21425c..97d64c8c9a929 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile; import org.apache.hadoop.hdfs.util.MD5FileUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ComparisonChain; import org.apache.hadoop.util.Lists; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java index 8086b60637dae..4e9ebec1d8529 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNUpgradeUtil.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.common.StorageInfo; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; public abstract class NNUpgradeUtil { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index dc37c25587157..b834a78b240cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -19,13 +19,13 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Sets; import java.util.Set; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -418,8 +418,8 @@ public long getProtocolVersion(String protocol, private static final String NAMENODE_HTRACE_PREFIX = "namenode.htrace."; - public static final Log MetricsLog = - LogFactory.getLog("NameNodeMetricsLog"); + public static final Logger MetricsLog = + LoggerFactory.getLogger("NameNodeMetricsLog"); protected FSNamesystem namesystem; protected final NamenodeRole role; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 2e22fc72752a4..25c88840a423b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -47,6 +47,7 @@ import java.util.Set; import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.util.Lists; import org.apache.hadoop.ipc.CallerContext; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java index c35a582d1846c..cd18804f65435 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RedundantEditLogInputStream.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.primitives.Longs; import org.apache.hadoop.log.LogThrottlingHelper; import org.apache.hadoop.log.LogThrottlingHelper.LogAction; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java index da5413ab5e4e8..1a60879a970fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.classification.InterfaceAudience; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java index e808216e2955b..146aff3ffb9f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -33,6 +33,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.util.Preconditions; + import java.io.IOException; import java.util.Arrays; import java.util.Iterator; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java index 35a7cd2f643cc..ae6bef783cacc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SaveNamespaceContext.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.util.Canceler; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Context for an ongoing SaveNamespace operation. This class diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 1c62d0064677a..334981952bb27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -78,7 +78,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.VersionInfo; import javax.management.ObjectName; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java index af1025ab457ec..4d46e691df217 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrFormat.java @@ -25,7 +25,7 @@ import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.hdfs.XAttrHelper; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.hdfs.util.LongBitFormat; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java index 2237fb8a4ab4d..ecc701f70c4a2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/XAttrPermissionFilter.java @@ -26,7 +26,8 @@ import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.Lists; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + +import org.apache.hadoop.util.Preconditions; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index 3f273cb5e75e6..7a9ce46b1159f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -71,7 +71,7 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Tool which allows the standby node's storage directories to be bootstrapped diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java index bc3eee7a2806f..bfd8ec3c00ee9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java @@ -59,7 +59,7 @@ import static org.apache.hadoop.util.ExitUtil.terminate; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Time; @@ -86,7 +86,7 @@ public class EditLogTailer { Long.MAX_VALUE; private final EditLogTailerThread tailerThread; - + private final Configuration conf; private final FSNamesystem namesystem; private final Iterator nnLookup; @@ -98,7 +98,7 @@ public class EditLogTailer { * The last transaction ID at which an edit log roll was initiated. */ private long lastRollTriggerTxId = HdfsServerConstants.INVALID_TXID; - + /** * The highest transaction ID loaded by the Standby. */ @@ -177,7 +177,7 @@ public EditLogTailer(FSNamesystem namesystem, Configuration conf) { this.conf = conf; this.namesystem = namesystem; this.editLog = namesystem.getEditLog(); - + lastLoadTimeMs = monotonicNow(); lastRollTimeMs = monotonicNow(); @@ -208,7 +208,7 @@ public EditLogTailer(FSNamesystem namesystem, Configuration conf) { LOG.info("Not going to trigger log rolls on active node because " + DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY + " is negative."); } - + sleepTimeMs = conf.getTimeDuration( DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_DEFAULT, @@ -264,7 +264,7 @@ public EditLogTailer(FSNamesystem namesystem, Configuration conf) { public void start() { tailerThread.start(); } - + public void stop() throws IOException { tailerThread.setShouldRun(false); tailerThread.interrupt(); @@ -277,12 +277,12 @@ public void stop() throws IOException { rollEditsRpcExecutor.shutdown(); } } - + @VisibleForTesting FSEditLog getEditLog() { return editLog; } - + @VisibleForTesting public void setEditLog(FSEditLog editLog) { this.editLog = editLog; @@ -319,10 +319,10 @@ public Void run() throws Exception { } }); } - + @VisibleForTesting public long doTailEdits() throws IOException, InterruptedException { - // Write lock needs to be interruptible here because the + // Write lock needs to be interruptible here because the // transitionToActive RPC takes the write lock before calling // tailer.stop() -- so if we're not interruptible, it will // deadlock. @@ -331,7 +331,7 @@ public long doTailEdits() throws IOException, InterruptedException { FSImage image = namesystem.getFSImage(); long lastTxnId = image.getLastAppliedTxId(); - + if (LOG.isDebugEnabled()) { LOG.debug("lastTxnId: " + lastTxnId); } @@ -354,7 +354,7 @@ public long doTailEdits() throws IOException, InterruptedException { if (LOG.isDebugEnabled()) { LOG.debug("edit streams to load from: " + streams.size()); } - + // Once we have streams to load, errors encountered are legitimate cause // for concern, so we don't catch them here. Simple errors reading from // disk are ignored. @@ -394,7 +394,7 @@ public long getLastLoadTimeMs() { * @return true if the configured log roll period has elapsed. */ private boolean tooLongSinceLastLoad() { - return logRollPeriodMs >= 0 && + return logRollPeriodMs >= 0 && (monotonicNow() - lastRollTimeMs) > logRollPeriodMs; } @@ -449,15 +449,15 @@ void sleep(long sleepTimeMillis) throws InterruptedException { */ private class EditLogTailerThread extends Thread { private volatile boolean shouldRun = true; - + private EditLogTailerThread() { super("Edit log tailer"); } - + private void setShouldRun(boolean shouldRun) { this.shouldRun = shouldRun; } - + @Override public void run() { SecurityUtil.doAsLoginUserOrFatal( @@ -469,7 +469,7 @@ public Object run() { } }); } - + private void doWork() { long currentSleepTimeMs = sleepTimeMs; while (shouldRun) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java index 3db43f01288d1..784d19660f104 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/RemoteNameNodeInfo.java @@ -29,8 +29,6 @@ import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; - /** * Information about a single remote NameNode */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java index ac1acbdea45ba..437df53e060d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java @@ -49,7 +49,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java index df052f171afa8..b1f15dbd1b331 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiff.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotFSImageFormat.ReferenceMap; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * The difference of an inode between in two snapshots. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java index dedc1e49d341f..ecdad34360937 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DiffListBySkipList.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; import org.apache.hadoop.hdfs.server.namenode.snapshot. DirectoryWithSnapshotFeature.DirectoryDiff; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java index 2c9ad473c212b..ba67a6190586f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectorySnapshottableFeature.java @@ -48,7 +48,7 @@ import org.apache.hadoop.util.Time; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java index b9b446707a115..56bebdd265863 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/DirectoryWithSnapshotFeature.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite; import org.apache.hadoop.hdfs.server.namenode.*; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java index 8d05af284b4e6..ddade3f3295e2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java @@ -82,7 +82,7 @@ import org.apache.hadoop.hdfs.server.namenode.XAttrFeature; import org.apache.hadoop.hdfs.util.EnumCounters; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.protobuf.ByteString; @InterfaceAudience.Private diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java index 58dd2cf0a3f04..74d06c883b8d7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffInfo.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.primitives.SignedBytes; import org.apache.hadoop.util.ChunkedArrayList; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffListingInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffListingInfo.java index 4b03c4f0d53a1..e20a46e1b75f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffListingInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotDiffListingInfo.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.ChunkedArrayList; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java index b43c45854bbfd..21642da9c2463 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotFSImageFormat.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff; import org.apache.hadoop.hdfs.util.ReadOnlyList; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * A helper class defining static methods for reading/writing snapshot related diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java index adbc22744f867..9b67d113d637c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotManager.java @@ -57,7 +57,10 @@ import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Lists; + +import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java index 686644f3ec019..47596019af4ed 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java @@ -58,7 +58,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Setting storagePolicy on a file after the file write will only update the new diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java index 93eea6068c2fb..a43eb62d72b41 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopAuditLogger.java @@ -19,7 +19,7 @@ import java.net.InetAddress; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java index e78e41957d107..d0c0b6958cc27 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/TopConf.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * This class is a common place for NNTop configuration. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java index 4e9807399395c..8015641e1f1bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/top/window/RollingWindowManager.java @@ -28,8 +28,8 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.hadoop.hdfs.server.namenode.top.TopConf; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 83ab9153574b7..3dce258d89d9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -26,6 +26,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; import java.security.Principal; import java.security.PrivilegedExceptionAction; import java.util.Base64; @@ -114,7 +115,6 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.util.Lists; import com.sun.jersey.spi.container.ResourceFilters; @@ -1303,7 +1303,7 @@ protected Response get( { byte[] start = HdfsFileStatus.EMPTY_NAME; if (startAfter != null && startAfter.getValue() != null) { - start = startAfter.getValue().getBytes(Charsets.UTF_8); + start = startAfter.getValue().getBytes(StandardCharsets.UTF_8); } final DirectoryListing listing = getDirectoryListing(cp, fullpath, start); final String js = JsonUtil.toJsonString(listing); @@ -1414,7 +1414,7 @@ private static StreamingOutput getListingStream(final ClientProtocol cp, @Override public void write(final OutputStream outstream) throws IOException { final PrintWriter out = new PrintWriter(new OutputStreamWriter( - outstream, Charsets.UTF_8)); + outstream, StandardCharsets.UTF_8)); out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\"" + FileStatus.class.getSimpleName() + "\":["); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java index f60d748dc9f56..21fc09b183b49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlocksWithLocations.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.protocol; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.StorageType; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java index 8b94a11594146..93125a22dbd24 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java @@ -31,7 +31,7 @@ import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * NamespaceInfo is returned by the name-node in reply diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java index 391078f558509..8cd7ebd1cefac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java @@ -21,7 +21,7 @@ import java.util.List; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java index 40d0e69591c9d..c2bdee1af7649 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/AdminHelper.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.hdfs.tools; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index 640af66e4b7f0..4deadab059ce2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -105,7 +105,7 @@ import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * This class provides some DFS administrative access shell commands. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java index 15c63732f7a69..4d0d56c05a75c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java @@ -23,7 +23,7 @@ import java.util.Collection; import java.util.Map; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSortedMap; import org.apache.commons.cli.CommandLine; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java index 94aff53470b72..4778cd43f98e0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/NNHAServiceTarget.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java index fc5f30e883001..ac43b21d840ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java @@ -22,6 +22,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import java.util.Stack; import org.apache.hadoop.classification.InterfaceAudience; @@ -41,7 +42,6 @@ import org.xml.sax.helpers.DefaultHandler; import org.xml.sax.helpers.XMLReaderFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * OfflineEditsXmlLoader walks an EditsVisitor over an OEV XML file @@ -75,7 +75,7 @@ public OfflineEditsXmlLoader(OfflineEditsVisitor visitor, File inputFile, OfflineEditsViewer.Flags flags) throws FileNotFoundException { this.visitor = visitor; this.fileReader = - new InputStreamReader(new FileInputStream(inputFile), Charsets.UTF_8); + new InputStreamReader(new FileInputStream(inputFile), StandardCharsets.UTF_8); this.fixTxIds = flags.getFixTxIds(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java index 411df1062771b..fe477058a05a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/StatisticsEditsVisitor.java @@ -21,6 +21,7 @@ import java.io.OutputStream; import java.io.OutputStreamWriter; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.HashMap; @@ -30,7 +31,6 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * StatisticsEditsVisitor implements text version of EditsVisitor @@ -53,7 +53,7 @@ public class StatisticsEditsVisitor implements OfflineEditsVisitor { * @param out Name of file to write output to */ public StatisticsEditsVisitor(OutputStream out) throws IOException { - this.out = new PrintWriter(new OutputStreamWriter(out, Charsets.UTF_8)); + this.out = new PrintWriter(new OutputStreamWriter(out, StandardCharsets.UTF_8)); } /** Start the visitor */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java index f14ee5f930fe0..167e8360cd742 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelFutureListener; @@ -37,6 +36,7 @@ import java.io.FileNotFoundException; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; @@ -124,7 +124,7 @@ public void channelRead0(ChannelHandlerContext ctx, HttpRequest request) DefaultFullHttpResponse resp = new DefaultFullHttpResponse(HTTP_1_1, HttpResponseStatus.OK, Unpooled.wrappedBuffer(content - .getBytes(Charsets.UTF_8))); + .getBytes(StandardCharsets.UTF_8))); resp.headers().set(CONTENT_TYPE, APPLICATION_JSON_UTF8); resp.headers().set(CONTENT_LENGTH, resp.content().readableBytes()); resp.headers().set(CONNECTION, CLOSE); @@ -142,7 +142,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) Exception e = cause instanceof Exception ? (Exception) cause : new Exception(cause); final String output = JsonUtil.toJsonString(e); - ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(Charsets.UTF_8)); + ByteBuf content = Unpooled.wrappedBuffer(output.getBytes(StandardCharsets.UTF_8)); final DefaultFullHttpResponse resp = new DefaultFullHttpResponse( HTTP_1_1, INTERNAL_SERVER_ERROR, content); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java index 491861bb2771c..6fea2aff10e6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java @@ -58,7 +58,12 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.LimitInputStream; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; +import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; +import org.apache.hadoop.thirdparty.protobuf.CodedInputStream; +import org.apache.hadoop.thirdparty.protobuf.InvalidProtocolBufferException; + import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java index 54b183b7b6965..fbeea0f673c0e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java @@ -33,7 +33,7 @@ import org.apache.hadoop.util.LimitInputStream; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * This is the tool for analyzing file sizes in the namespace image. In order to diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java index 2e2eaf4e4d46e..6b55c7f205b9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/ImageLoaderCurrent.java @@ -19,6 +19,7 @@ import java.io.DataInputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.Date; @@ -320,7 +321,7 @@ private void processINodesUC(DataInputStream in, ImageVisitor v, for(int i = 0; i < numINUC; i++) { v.visitEnclosingElement(ImageElement.INODE_UNDER_CONSTRUCTION); byte [] name = FSImageSerialization.readBytes(in); - String n = new String(name, "UTF8"); + String n = new String(name, StandardCharsets.UTF_8); v.visit(ImageElement.INODE_PATH, n); if (NameNodeLayoutVersion.supports(Feature.ADD_INODE_ID, imageVersion)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java index 6a2049acb4b54..0c075ff6dac7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageReconstructor.java @@ -36,7 +36,7 @@ import java.io.InputStreamReader; import java.io.OutputStream; import java.nio.ByteBuffer; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.security.DigestOutputStream; @@ -1840,7 +1840,7 @@ public static void run(String inputPath, String outputPath) Files.deleteIfExists(Paths.get(outputPath)); fout = Files.newOutputStream(Paths.get(outputPath)); fis = Files.newInputStream(Paths.get(inputPath)); - reader = new InputStreamReader(fis, Charset.forName("UTF-8")); + reader = new InputStreamReader(fis, StandardCharsets.UTF_8); out = new CountingOutputStream( new DigestOutputStream( new BufferedOutputStream(fout), digester)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageCorruptionDetector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageCorruptionDetector.java index 737e7384b9a7c..28c450701b846 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageCorruptionDetector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageCorruptionDetector.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.server.namenode.FsImageProto; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java index 319cba7b955b4..24650410b54ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java @@ -17,9 +17,26 @@ */ package org.apache.hadoop.hdfs.tools.offlineImageViewer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; -import org.apache.hadoop.util.Lists; +import java.io.BufferedInputStream; +import java.io.Closeable; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintStream; +import java.io.RandomAccessFile; +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + import org.apache.commons.lang3.StringUtils; import org.apache.commons.text.StringEscapeUtils; import org.apache.hadoop.conf.Configuration; @@ -40,7 +57,9 @@ import org.apache.hadoop.hdfs.server.namenode.SerialNumberManager; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.LimitInputStream; +import org.apache.hadoop.util.Lists; import org.apache.hadoop.util.Time; + import org.fusesource.leveldbjni.JniDBFactory; import org.iq80.leveldb.DB; import org.iq80.leveldb.Options; @@ -48,24 +67,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.BufferedInputStream; -import java.io.Closeable; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.PrintStream; -import java.io.RandomAccessFile; -import java.io.UnsupportedEncodingException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; +import org.apache.hadoop.util.Preconditions; +import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; /** * This class reads the protobuf-based fsimage and generates text output @@ -403,9 +406,8 @@ private static byte[] toBytes(long value) { return ByteBuffer.allocate(8).putLong(value).array(); } - private static byte[] toBytes(String value) - throws UnsupportedEncodingException { - return value.getBytes("UTF-8"); + private static byte[] toBytes(String value) { + return value.getBytes(StandardCharsets.UTF_8); } private static long toLong(byte[] bytes) { @@ -414,11 +416,7 @@ private static long toLong(byte[] bytes) { } private static String toString(byte[] bytes) throws IOException { - try { - return new String(bytes, "UTF-8"); - } catch (UnsupportedEncodingException e) { - throw new IOException(e); - } + return new String(bytes, StandardCharsets.UTF_8); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java index f732bd6a7f542..1d7c8a4a139ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TextWriterImageVisitor.java @@ -19,10 +19,10 @@ import java.io.IOException; import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * TextWriterImageProcessor mixes in the ability for ImageVisitor @@ -61,7 +61,7 @@ public TextWriterImageVisitor(String filename, boolean printToScreen) super(); this.printToScreen = printToScreen; fw = new OutputStreamWriter(Files.newOutputStream(Paths.get(filename)), - Charsets.UTF_8); + StandardCharsets.UTF_8); okToWrite = true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java index 21a7bb58750b8..298e645d43224 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/Diff.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.util; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.util.ArrayList; import java.util.Collections; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java index 880bf6edb2516..0e2097953fc62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.util; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.lang3.ArrayUtils; import java.util.Arrays; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java index fee687edf54b1..5480d288410be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumDoubles.java @@ -19,7 +19,7 @@ import java.util.Arrays; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Similar to {@link EnumCounters} except that the value type is double. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java index 2bc63ec77eb29..faf689d84461d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/MD5FileUtils.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.security.DigestInputStream; import java.security.MessageDigest; @@ -35,7 +36,6 @@ import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * Static functions for dealing with files of the same format @@ -75,7 +75,7 @@ public static void verifySavedMD5(File dataFile, MD5Hash expectedMD5) private static Matcher readStoredMd5(File md5File) throws IOException { BufferedReader reader = new BufferedReader(new InputStreamReader( - Files.newInputStream(md5File.toPath()), Charsets.UTF_8)); + Files.newInputStream(md5File.toPath()), StandardCharsets.UTF_8)); String md5Line; try { md5Line = reader.readLine(); @@ -155,7 +155,7 @@ private static void saveMD5File(File dataFile, String digestString) String md5Line = digestString + " *" + dataFile.getName() + "\n"; AtomicFileOutputStream afos = new AtomicFileOutputStream(md5File); - afos.write(md5Line.getBytes(Charsets.UTF_8)); + afos.write(md5Line.getBytes(StandardCharsets.UTF_8)); afos.close(); if (LOG.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java index 78834e0161a4f..309e27d82c00f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/PersistentLongFile.java @@ -22,14 +22,13 @@ import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; - /** * Class that represents a file on disk which persistently stores * a single long value. The file is updated atomically @@ -77,7 +76,7 @@ public void set(long newVal) throws IOException { public static void writeFile(File file, long val) throws IOException { AtomicFileOutputStream fos = new AtomicFileOutputStream(file); try { - fos.write(String.valueOf(val).getBytes(Charsets.UTF_8)); + fos.write(String.valueOf(val).getBytes(StandardCharsets.UTF_8)); fos.write('\n'); fos.close(); fos = null; @@ -93,7 +92,7 @@ public static long readFile(File file, long defaultVal) throws IOException { if (file.exists()) { BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream( - file), Charsets.UTF_8)); + file), StandardCharsets.UTF_8)); try { val = Long.parseLong(br.readLine()); br.close(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java index 99b1ddbbc1130..7cf216b45508d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestEnhancedByteBufferAccess.java @@ -71,7 +71,7 @@ import org.junit.BeforeClass; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.util.function.Supplier; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 380fcb74338a9..0a33c3f01aa6c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -55,6 +55,7 @@ import java.net.URL; import java.net.URLConnection; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; @@ -72,9 +73,8 @@ import java.util.UUID; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import java.util.function.Supplier; import org.apache.hadoop.util.Lists; @@ -990,7 +990,7 @@ public static void appendFileNewBlock(DistributedFileSystem fs, * @return url content as string (UTF-8 encoding assumed) */ public static String urlGet(URL url) throws IOException { - return new String(urlGetBytes(url), Charsets.UTF_8); + return new String(urlGetBytes(url), StandardCharsets.UTF_8); } /** @@ -1443,7 +1443,7 @@ public static void runOperations(MiniDFSCluster cluster, Short permission = 0777; filesystem.setPermission(pathFileCreate, new FsPermission(permission)); // OP_SET_OWNER 8 - filesystem.setOwner(pathFileCreate, new String("newOwner"), null); + filesystem.setOwner(pathFileCreate, "newOwner", null); // OP_CLOSE 9 see above // OP_SET_GENSTAMP 10 see above // OP_SET_NS_QUOTA 11 obsolete diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java index ba5a451d8c2f6..b2ef1b4ec3065 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/ErasureCodeBenchmarkThroughput.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; import org.apache.hadoop.fs.FSDataInputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index d324720a9254a..a2d1bd3a800fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -71,6 +71,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; @@ -86,6 +87,7 @@ import org.apache.hadoop.hdfs.server.namenode.ImageServlet; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; +import org.apache.hadoop.util.Lists; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; @@ -146,9 +148,7 @@ import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; -import org.apache.hadoop.util.Lists; -import org.apache.hadoop.util.Sets; +import org.apache.hadoop.util.Preconditions; /** * This class creates a single-process DFS cluster for junit testing. @@ -650,7 +650,7 @@ public DataNode getDatanode() { private boolean federation; private boolean checkExitOnShutdown = true; protected final int storagesPerDatanode; - private Set fileSystems = Sets.newHashSet(); + private Set fileSystems = new HashSet<>(); private List storageCap = Lists.newLinkedList(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java index 93f48b4e3027b..2068a0076a582 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSNNTopology.java @@ -22,7 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java index 0ef3b75adceef..411ea5c2d4f48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestAppendSnapshotTruncate.java @@ -51,7 +51,7 @@ import org.junit.BeforeClass; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Test randomly mixing append, snapshot and truncate operations. @@ -139,7 +139,7 @@ public boolean accept(File f) { static class DirWorker extends Worker { final Path dir; final File localDir; - + final FileWorker[] files; private Map snapshotPaths = new HashMap(); @@ -165,7 +165,7 @@ String createSnapshot(String snapshot) throws IOException { .append(snapshot).append(" for ").append(dir); { - //copy all local files to a sub dir to simulate snapshot. + //copy all local files to a sub dir to simulate snapshot. final File subDir = new File(localDir, snapshot); Assert.assertFalse(subDir.exists()); subDir.mkdir(); @@ -174,7 +174,7 @@ String createSnapshot(String snapshot) throws IOException { FileUtils.copyFile(f, new File(subDir, f.getName())); } } - + final Path p = dfs.createSnapshot(dir, snapshot); snapshotPaths.put(snapshot, p); return b.toString(); @@ -186,14 +186,14 @@ String checkSnapshot(String snapshot) throws IOException { final File subDir = new File(localDir, snapshot); Assert.assertTrue(subDir.exists()); - + final File[] localFiles = subDir.listFiles(FILE_ONLY); final Path p = snapshotPaths.get(snapshot); final FileStatus[] statuses = dfs.listStatus(p); Assert.assertEquals(localFiles.length, statuses.length); b.append(p).append(" vs ").append(subDir).append(", ") .append(statuses.length).append(" entries"); - + Arrays.sort(localFiles); Arrays.sort(statuses); for(int i = 0; i < statuses.length; i++) { @@ -211,7 +211,7 @@ String deleteSnapshot(String snapshot) throws IOException { return b.toString(); } - + @Override public String call() throws Exception { final int op = ThreadLocalRandom.current().nextInt(6); @@ -231,7 +231,7 @@ public String call() throws Exception { final String snapshot = keys[ThreadLocalRandom.current() .nextInt(keys.length)]; final String s = checkSnapshot(snapshot); - + if (op == 2) { return deleteSnapshot(snapshot); } @@ -242,7 +242,7 @@ public String call() throws Exception { } void pauseAllFiles() { - for(FileWorker f : files) { + for(FileWorker f : files) { f.pause(); } @@ -251,22 +251,22 @@ void pauseAllFiles() { for(; i < files.length && files[i].isPaused(); i++); } } - + void startAllFiles() { - for(FileWorker f : files) { + for(FileWorker f : files) { f.start(); } } - + void stopAllFiles() throws InterruptedException { - for(FileWorker f : files) { + for(FileWorker f : files) { f.stop(); } } void checkEverything() throws IOException { LOG.info("checkEverything"); - for(FileWorker f : files) { + for(FileWorker f : files) { f.checkFullFile(); f.checkErrorState(); } @@ -331,7 +331,7 @@ String append(int n) throws IOException { } return b.toString(); } - + String truncateArbitrarily(int nBytes) throws IOException { Preconditions.checkArgument(nBytes > 0); final int length = checkLength(); @@ -367,7 +367,7 @@ private boolean truncate(long newLength, StringBuilder b) throws IOException { } return isReady; } - + int checkLength() throws IOException { return checkLength(file, localFile); } @@ -378,7 +378,7 @@ static int checkLength(Path file, File localFile) throws IOException { Assert.assertTrue(length <= Integer.MAX_VALUE); return (int)length; } - + String checkFullFile() throws IOException { return checkFullFile(file, localFile); } @@ -388,23 +388,23 @@ static String checkFullFile(Path file, File localFile) throws IOException { .append(file.getName()).append(" vs ").append(localFile); final byte[] bytes = new byte[checkLength(file, localFile)]; b.append(", length=").append(bytes.length); - - final FileInputStream in = new FileInputStream(localFile); + + final FileInputStream in = new FileInputStream(localFile); for(int n = 0; n < bytes.length; ) { n += in.read(bytes, n, bytes.length - n); } in.close(); - + AppendTestUtil.checkFullFile(dfs, file, bytes.length, bytes, "File content mismatch: " + b, false); return b.toString(); } } - + static abstract class Worker implements Callable { enum State { IDLE(false), RUNNING(false), STOPPED(true), ERROR(true); - + final boolean isTerminated; State(boolean isTerminated) { @@ -441,7 +441,7 @@ void setErrorState(Throwable t) { void start() { Preconditions.checkState(state.compareAndSet(State.IDLE, State.RUNNING)); - + if (thread.get() == null) { final Thread t = new Thread(null, new Runnable() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java index 3191fbdf8fe1f..bc83a704720f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBalancerBandwidth.java @@ -23,6 +23,7 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.concurrent.TimeoutException; @@ -46,7 +47,7 @@ public class TestBalancerBandwidth { final static private int DEFAULT_BANDWIDTH = 1024*1024; public static final Logger LOG = LoggerFactory.getLogger(TestBalancerBandwidth.class); - private static final Charset UTF8 = Charset.forName("UTF-8"); + private static final Charset UTF8 = StandardCharsets.UTF_8; private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); private final PrintStream outStream = new PrintStream(outContent); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java index c224c4916b57d..115843d46626d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockTokenWrappingQOP.java @@ -21,8 +21,8 @@ import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; @@ -52,7 +52,7 @@ */ @RunWith(Parameterized.class) public class TestBlockTokenWrappingQOP extends SaslDataTransferTestCase { - public static final Log LOG = LogFactory.getLog(TestPermission.class); + public static final Logger LOG = LoggerFactory.getLogger(TestPermission.class); private HdfsConfiguration conf; private MiniDFSCluster cluster; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index 02b88505aab11..56bd773d98c02 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -23,6 +23,7 @@ import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.List; @@ -40,7 +41,6 @@ import org.junit.After; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.util.Lists; /** @@ -311,8 +311,8 @@ public void testRollback() throws Exception { for (File f : baseDirs) { UpgradeUtilities.corruptFile( new File(f,"VERSION"), - "layoutVersion".getBytes(Charsets.UTF_8), - "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8)); + "layoutVersion".getBytes(StandardCharsets.UTF_8), + "xxxxxxxxxxxxx".getBytes(StandardCharsets.UTF_8)); } startNameNodeShouldFail("file VERSION has layoutVersion missing"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java index bbe991dacc781..7c2775aa8fdc1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailureBase.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java index 77cae4f0e686c..0e7c35a5d777a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java @@ -29,6 +29,7 @@ import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.regex.Pattern; import org.slf4j.Logger; @@ -49,7 +50,6 @@ import org.junit.Ignore; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; /** @@ -334,8 +334,8 @@ public void testUpgrade() throws Exception { for (File f : baseDirs) { UpgradeUtilities.corruptFile( new File(f,"VERSION"), - "layoutVersion".getBytes(Charsets.UTF_8), - "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8)); + "layoutVersion".getBytes(StandardCharsets.UTF_8), + "xxxxxxxxxxxxx".getBytes(StandardCharsets.UTF_8)); } startNameNodeShouldFail(StartupOption.UPGRADE); UpgradeUtilities.createEmptyDirs(nameNodeDirs); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 19a053d0841ee..06b3531d259ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -54,6 +54,7 @@ import java.net.URISyntaxException; import java.util.Arrays; import java.util.Collection; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -1045,10 +1046,10 @@ public void testGetNNServiceRpcAddressesForNsIds() throws IOException { { Collection internal = DFSUtil.getInternalNameServices(conf); - assertEquals(Sets.newHashSet("nn1"), internal); + assertEquals(new HashSet<>(Arrays.asList("nn1")), internal); Collection all = DFSUtilClient.getNameServiceIds(conf); - assertEquals(Sets.newHashSet("nn1", "nn2"), all); + assertEquals(new HashSet<>(Arrays.asList("nn1", "nn2")), all); } Map> nnMap = DFSUtil diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index de738eef177a3..f2500af46bb36 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -160,7 +160,7 @@ public void testDatanodeReportMissingBlock() throws Exception { cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); Path p = new Path("/testDatanodeReportMissingBlock"); - DFSTestUtil.writeFile(fs, p, new String("testdata")); + DFSTestUtil.writeFile(fs, p, "testdata"); LocatedBlock lb = fs.getClient().getLocatedBlocks(p.toString(), 0).get(0); assertEquals(3, lb.getLocations().length); ExtendedBlock b = lb.getBlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java index 8f42e47263abb..d138d8704ae85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestLeaseRecoveryStriped.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.util.function.Supplier; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index 84f62325fe47c..65e8b7a42bd09 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -48,7 +48,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Tests MiniDFS cluster setup/teardown and isolation. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java index d536c5e8a969a..8121d8454a183 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs; import java.net.URI; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -287,7 +288,7 @@ public void testMultipleNNPortOverwriteDownStream() throws Exception { private void doTest(FileSystem fs, Path path) throws Exception { FileSystemTestHelper.createFile(fs, path, NUM_BLOCKS, BLOCK_SIZE); assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE), - DFSTestUtil.readFile(fs, path).getBytes("UTF-8")); + DFSTestUtil.readFile(fs, path).getBytes(StandardCharsets.UTF_8)); BlockLocation[] blockLocations = fs.getFileBlockLocations(path, 0, Long.MAX_VALUE); assertNotNull(blockLocations); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index f5e7b94d0bd0b..f94aeaf7bd8ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -32,6 +32,7 @@ import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.List; import java.util.Scanner; @@ -65,7 +66,6 @@ import org.junit.Rule; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.util.Lists; import org.junit.rules.Timeout; import org.slf4j.Logger; @@ -1216,7 +1216,7 @@ public void testSetSpaceQuotaWhenStorageTypeIsWrong() throws Exception { String[] args = { "-setSpaceQuota", "100", "-storageType", "COLD", "/testDir" }; admin.run(args); - String errOutput = new String(err.toByteArray(), Charsets.UTF_8); + String errOutput = new String(err.toByteArray(), StandardCharsets.UTF_8); assertTrue( errOutput.contains(StorageType.getTypesSupportingQuota().toString())); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java index 29ac394363772..58d1a62b4a9e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStripedFileAppend.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.hdfs; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.Path; @@ -47,7 +47,7 @@ * Tests append on erasure coded file. */ public class TestStripedFileAppend { - public static final Log LOG = LogFactory.getLog(TestStripedFileAppend.class); + public static final Logger LOG = LoggerFactory.getLogger(TestStripedFileAppend.class); static { DFSTestUtil.setNameNodeLogLevel(Level.ALL); @@ -146,4 +146,4 @@ public void testAppendWithoutNewBlock() throws IOException { assertFalse("No file should be open after append failure", listOpenFiles.hasNext()); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java index 2b90c92388f32..7ebf55f571cd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.io.Files; import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java index c0a057310d052..93f2e4f4e3a5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java @@ -34,6 +34,7 @@ import org.junit.Test; import org.junit.rules.Timeout; +import java.util.Arrays; import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; @@ -274,10 +275,11 @@ public void testChooseRandomWithStorageType() throws Exception { // test the choose random can return desired storage type nodes without // exclude Set diskUnderL1 = - Sets.newHashSet("host2", "host4", "host5", "host6"); - Set archiveUnderL1 = Sets.newHashSet("host1", "host3"); - Set ramdiskUnderL1 = Sets.newHashSet("host7"); - Set ssdUnderL1 = Sets.newHashSet("host8"); + new HashSet<>(Arrays.asList("host2", "host4", "host5", "host6")); + Set archiveUnderL1 = new HashSet<>(Arrays.asList("host1", "host3")); + Set ramdiskUnderL1 = new HashSet<>(Arrays.asList("host7")); + Set ssdUnderL1 = new HashSet<>(Arrays.asList("host8")); + Set nvdimmUnderL1 = new HashSet<>(Arrays.asList("host9")); for (int i = 0; i < 10; i++) { n = CLUSTER.chooseRandomWithStorageType("/l1", null, null, StorageType.DISK); @@ -360,8 +362,8 @@ public void testChooseRandomWithStorageTypeWithExcluded() throws Exception { assertEquals("host6", dd.getHostName()); // exclude the host on r4 (since there is only one host, no randomness here) excluded.add(n); - Set expectedSet = Sets.newHashSet("host4", "host5"); - for (int i = 0; i<10; i++) { + Set expectedSet = new HashSet<>(Arrays.asList("host4", "host5")); + for (int i = 0; i < 10; i++) { // under l1, there are four hosts with DISK: // /l1/d1/r1/host2, /l1/d1/r2/host4, /l1/d1/r2/host5 and /l1/d2/r3/host6 // host6 is excludedNode, host2 is under excluded range scope /l1/d1/r1 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java index 3dd0b7eb99ea1..85e43f65c37b2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/TestSaslDataTransfer.java @@ -32,6 +32,7 @@ import java.net.ServerSocket; import java.net.Socket; import java.net.SocketTimeoutException; +import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicBoolean; import org.slf4j.LoggerFactory; @@ -200,7 +201,7 @@ private void doTest(HdfsConfiguration conf) throws IOException { fs = FileSystem.get(cluster.getURI(), conf); FileSystemTestHelper.createFile(fs, PATH, NUM_BLOCKS, BLOCK_SIZE); assertArrayEquals(FileSystemTestHelper.getFileData(NUM_BLOCKS, BLOCK_SIZE), - DFSTestUtil.readFile(fs, PATH).getBytes("UTF-8")); + DFSTestUtil.readFile(fs, PATH).getBytes(StandardCharsets.UTF_8)); BlockLocation[] blockLocations = fs.getFileBlockLocations(PATH, 0, Long.MAX_VALUE); assertNotNull(blockLocations); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java index d1d1e7200e764..202ed65452d61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQJMWithFaults.java @@ -34,6 +34,7 @@ import java.util.Map; import java.util.Random; import java.util.SortedSet; +import java.util.TreeSet; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; @@ -61,7 +62,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; import org.apache.hadoop.util.Sets; @@ -108,7 +109,7 @@ private static long determineMaxIpcNumber() throws Exception { qjm.format(FAKE_NSINFO, false); doWorkload(cluster, qjm); - SortedSet ipcCounts = Sets.newTreeSet(); + SortedSet ipcCounts = new TreeSet<>(); for (AsyncLogger l : qjm.getLoggerSetForTests().getLoggersForTests()) { InvocationCountingChannel ch = (InvocationCountingChannel)l; ch.waitForAllPendingCalls(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index 6e117b7687ac7..c029b11c084b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdfs.qjournal.server; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.primitives.Bytes; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.conf.Configuration; @@ -54,6 +53,7 @@ import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; +import java.nio.charset.StandardCharsets; import java.util.Collection; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -255,7 +255,7 @@ public void testJournal() throws Exception { ch.newEpoch(1).get(); ch.setEpoch(1); ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get(); - ch.sendEdits(1L, 1, 1, "hello".getBytes(Charsets.UTF_8)).get(); + ch.sendEdits(1L, 1, 1, "hello".getBytes(StandardCharsets.UTF_8)).get(); metrics = MetricsAsserts.getMetrics( journal.getMetrics().getName()); @@ -268,7 +268,7 @@ public void testJournal() throws Exception { beginTimestamp = lastJournalTimestamp; ch.setCommittedTxId(100L); - ch.sendEdits(1L, 2, 1, "goodbye".getBytes(Charsets.UTF_8)).get(); + ch.sendEdits(1L, 2, 1, "goodbye".getBytes(StandardCharsets.UTF_8)).get(); metrics = MetricsAsserts.getMetrics( journal.getMetrics().getName()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java index ef0c0a6e9dc2d..c25cc88059d31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java @@ -36,7 +36,7 @@ import org.apache.hadoop.test.Whitebox; import org.junit.Assert; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; public class BlockManagerTestUtil { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java index 987b74df47b8b..0f710f9a3c395 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/FsDatasetTestUtils.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdfs.server.datanode; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java index 30fee2fddd99b..2b04e2707a034 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/InternalDataNodeTestUtils.java @@ -46,7 +46,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java index c5135c2d05240..36c665cd8fc28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java @@ -446,7 +446,7 @@ public void testDNShouldNotDeleteBlockONTooManyOpenFiles() cluster.waitActive(); DistributedFileSystem fs = cluster.getFileSystem(); Path p = new Path("/testShouldThrowTMP"); - DFSTestUtil.writeFile(fs, p, new String("testdata")); + DFSTestUtil.writeFile(fs, p, "testdata"); //Before DN throws too many open files verifyBlockLocations(fs, p, 1); Mockito.doThrow(new FileNotFoundException("Too many open files")). diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java index 8443c36835489..84f8c8e0ec481 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetricsLogger.java @@ -32,8 +32,6 @@ import java.util.concurrent.TimeoutException; import java.util.regex.Pattern; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.impl.Log4JLogger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -131,51 +129,6 @@ public void testDisableMetricsLogger() throws IOException { assertNull(dn.getMetricsLoggerTimer()); } - @Test - public void testMetricsLoggerIsAsync() throws IOException { - startDNForTest(true); - assertNotNull(dn); - org.apache.log4j.Logger logger = ((Log4JLogger) DataNode.METRICS_LOG) - .getLogger(); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - assertTrue(appenders.get(0) instanceof AsyncAppender); - } - - /** - * Publish a fake metric under the "Hadoop:" domain and ensure it is logged by - * the metrics logger. - */ - @Test - public void testMetricsLogOutput() throws IOException, InterruptedException, - TimeoutException { - TestFakeMetric metricsProvider = new TestFakeMetric(); - MBeans.register(this.getClass().getSimpleName(), "DummyMetrics", - metricsProvider); - startDNForTest(true); - assertNotNull(dn); - final PatternMatchingAppender appender = new PatternMatchingAppender( - "^.*FakeMetric.*$"); - addAppender(DataNode.METRICS_LOG, appender); - - // Ensure that the supplied pattern was matched. - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return appender.isMatched(); - } - }, 1000, 60000); - - dn.shutdown(); - } - - private void addAppender(Log log, Appender appender) { - org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger(); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - ((AsyncAppender) appenders.get(0)).addAppender(appender); - } - public interface TestFakeMetricMXBean { int getFakeMetric(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java index 9f9eb3bfd8837..0f723469b3789 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestRefreshNamenodes.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.util.HashSet; import java.util.Set; import org.apache.hadoop.conf.Configuration; @@ -73,13 +74,13 @@ public void testRefreshNamenodes() throws IOException { // Ensure a BPOfferService in the datanodes corresponds to // a namenode in the cluster - Set nnAddrsFromCluster = Sets.newHashSet(); + Set nnAddrsFromCluster = new HashSet<>(); for (int i = 0; i < 4; i++) { assertTrue(nnAddrsFromCluster.add( cluster.getNameNode(i).getNameNodeAddress())); } - Set nnAddrsFromDN = Sets.newHashSet(); + Set nnAddrsFromDN = new HashSet<>(); for (BPOfferService bpos : dn.getAllBpOs()) { for (BPServiceActor bpsa : bpos.getBPServiceActors()) { assertTrue(nnAddrsFromDN.add(bpsa.getNNSocketAddress())); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java index cf35ba99fd085..753e2e8fade4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImplTestUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.io.FileExistsException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java index cb26533229c32..b6e1697ed6170 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java @@ -45,8 +45,8 @@ import java.util.UUID; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java index f7aa4c34123d6..76af9a48d8080 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestScrLazyPersistFiles.java @@ -16,7 +16,8 @@ * limitations under the License. */ package org.apache.hadoop.hdfs.server.datanode.fsdataset.impl; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; + +import org.apache.hadoop.util.Preconditions; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.fs.Path; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java index 2a496fb0ec222..aa9b6f1c19c5c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java index a0e4e4db3f04a..81191692d92e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancer.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.hdfs.server.diskbalancer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.util.function.Supplier; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java index 7f6cd5e156ab6..1943b6454baad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerWithMockMover.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.util.function.Supplier; import org.apache.commons.codec.digest.DigestUtils; import org.slf4j.Logger; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index d95e76fc85294..94f7ef69c9bb3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -65,7 +65,7 @@ import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 08a436b73f78a..9fbd7770bd223 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -415,7 +415,7 @@ public static void assertFileContentsSame(File... files) throws Exception { if (files.length < 2) return; Map md5s = getFileMD5s(files); - if (Sets.newHashSet(md5s.values()).size() > 1) { + if (new HashSet<>(md5s.values()).size() > 1) { fail("File contents differed:\n " + Joiner.on("\n ") .withKeyValueSeparator("=") @@ -432,7 +432,8 @@ public static void assertFileContentsDifferent( File... files) throws Exception { Map md5s = getFileMD5s(files); - if (Sets.newHashSet(md5s.values()).size() != expectedUniqueHashes) { + int uniqueHashes = new HashSet<>(md5s.values()).size(); + if (uniqueHashes != expectedUniqueHashes) { fail("Expected " + expectedUniqueHashes + " different hashes, got:\n " + Joiner.on("\n ") .withKeyValueSeparator("=") diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 9537435a59c7c..5a34a64c4fc55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -27,7 +27,7 @@ import java.util.EnumSet; import java.util.List; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index c5dc66e5fa3a9..030dee6de2a12 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -112,7 +112,7 @@ public class TestCheckpoint { static final Logger LOG = LoggerFactory.getLogger(TestCheckpoint.class); static final String NN_METRICS = "NameNodeActivity"; - + static final long seed = 0xDEADBEEFL; static final int blockSize = 4096; static final int fileSize = 8192; @@ -127,27 +127,27 @@ public boolean accept(File dir, String name) { }; private CheckpointFaultInjector faultInjector; - + @Before public void setUp() { FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory())); faultInjector = Mockito.mock(CheckpointFaultInjector.class); CheckpointFaultInjector.instance = faultInjector; } - + @After public void checkForSNNThreads() { GenericTestUtils.assertNoThreadsMatching(".*SecondaryNameNode.*"); } - + static void checkFile(FileSystem fileSys, Path name, int repl) throws IOException { assertTrue(fileSys.exists(name)); int replication = fileSys.getFileStatus(name).getReplication(); assertEquals("replication for " + name, repl, replication); - //We should probably test for more of the file properties. + //We should probably test for more of the file properties. } - + static void cleanupFile(FileSystem fileSys, Path name) throws IOException { assertTrue(fileSys.exists(name)); @@ -164,14 +164,14 @@ public void testNameDirError() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .build(); - + Collection nameDirs = cluster.getNameDirs(0); cluster.shutdown(); cluster = null; - + for (URI nameDirUri : nameDirs) { File dir = new File(nameDirUri.getPath()); - + try { // Simulate the mount going read-only FileUtil.setWritable(dir, false); @@ -214,7 +214,7 @@ public void testWriteTransactionIdHandlesIOE() throws Exception { assertTrue("List of removed storage directories wasn't empty", nnStorage.getRemovedStorageDirs().isEmpty()); } finally { - // Delete storage directory to cause IOException in writeTransactionIdFile + // Delete storage directory to cause IOException in writeTransactionIdFile assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(), filePath.delete()); } @@ -260,13 +260,13 @@ public void testReloadOnEditReplayFailure () throws IOException { fail("Fault injection failed."); } catch (IOException ioe) { // This is expected. - } + } Mockito.reset(faultInjector); - + // The error must be recorded, so next checkpoint will reload image. fos.write(new byte[] { 0, 1, 2, 3 }); fos.hsync(); - + assertTrue("Another checkpoint should have reloaded image", secondary.doCheckpoint()); } finally { @@ -346,10 +346,10 @@ public void testSecondaryNamenodeError1() cluster.waitActive(); fileSys = cluster.getFileSystem(); assertTrue(!fileSys.exists(file1)); - + // Make the checkpoint fail after rolling the edits log. secondary = startSecondaryNameNode(conf); - + Mockito.doThrow(new IOException( "Injecting failure after rolling edit logs")) .when(faultInjector).afterSecondaryCallsRollEditLog(); @@ -360,7 +360,7 @@ public void testSecondaryNamenodeError1() } catch (IOException e) { // expected } - + Mockito.reset(faultInjector); // @@ -379,7 +379,7 @@ public void testSecondaryNamenodeError1() // // Restart cluster and verify that file exists. - // Then take another checkpoint to verify that the + // Then take another checkpoint to verify that the // namenode restart accounted for the rolled edit logs. // try { @@ -422,7 +422,7 @@ public void testSecondaryNamenodeError2() throws IOException { // Make the checkpoint fail after uploading the new fsimage. // secondary = startSecondaryNameNode(conf); - + Mockito.doThrow(new IOException( "Injecting failure after uploading new image")) .when(faultInjector).afterSecondaryUploadsNewImage(); @@ -451,7 +451,7 @@ public void testSecondaryNamenodeError2() throws IOException { // // Restart cluster and verify that file exists. - // Then take another checkpoint to verify that the + // Then take another checkpoint to verify that the // namenode restart accounted for the rolled edit logs. // try { @@ -531,7 +531,7 @@ public void testSecondaryNamenodeError3() throws IOException { // // Restart cluster and verify that file exists. - // Then take another checkpoint to verify that the + // Then take another checkpoint to verify that the // namenode restart accounted for the twice-rolled edit logs. // try { @@ -584,7 +584,7 @@ private void doSecondaryFailsToReturnImage() throws IOException { image = cluster.getNameNode().getFSImage(); assertTrue(!fileSys.exists(file1)); StorageDirectory sd = image.getStorage().getStorageDir(0); - + File latestImageBeforeCheckpoint = FSImageTestUtil.findLatestImageFile(sd); long fsimageLength = latestImageBeforeCheckpoint.length(); // @@ -605,7 +605,7 @@ private void doSecondaryFailsToReturnImage() throws IOException { // Verify that image file sizes did not change. for (StorageDirectory sd2 : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) { - + File thisNewestImage = FSImageTestUtil.findLatestImageFile(sd2); long len = thisNewestImage.length(); assertEquals(fsimageLength, len); @@ -649,7 +649,7 @@ private void checkTempImages(NNStorage storage) throws IOException { public void testNameNodeImageSendFailWrongSize() throws IOException { LOG.info("Starting testNameNodeImageSendFailWrongSize"); - + Mockito.doReturn(true).when(faultInjector) .shouldSendShortFile(filePathContaining("fsimage")); doSendFailTest("is not of the advertised size"); @@ -707,7 +707,7 @@ private void doSendFailTest(String exceptionSubstring) secondary.shutdown(); // secondary namenode crash! secondary = null; - // start new instance of secondary and verify that + // start new instance of secondary and verify that // a new rollEditLog succedes in spite of the fact that we had // a partially failed checkpoint previously. // @@ -728,7 +728,7 @@ private void doSendFailTest(String exceptionSubstring) cluster = null; } } - + /** * Test that the NN locks its storage and edits directories, and won't start up * if the directories are already locked @@ -737,7 +737,7 @@ private void doSendFailTest(String exceptionSubstring) public void testNameDirLocking() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; - + // Start a NN, and verify that lock() fails in all of the configured // directories StorageDirectory savedSd = null; @@ -753,7 +753,7 @@ public void testNameDirLocking() throws IOException { cluster = null; } assertNotNull(savedSd); - + // Lock one of the saved directories, then start the NN, and make sure it // fails to start assertClusterStartFailsWhenDirLocked(conf, savedSd); @@ -775,7 +775,7 @@ public void testSeparateEditsDirLocking() throws IOException { conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsDir.getAbsolutePath()); MiniDFSCluster cluster = null; - + // Start a NN, and verify that lock() fails in all of the configured // directories StorageDirectory savedSd = null; @@ -793,12 +793,12 @@ public void testSeparateEditsDirLocking() throws IOException { cluster = null; } assertNotNull(savedSd); - + // Lock one of the saved directories, then start the NN, and make sure it // fails to start assertClusterStartFailsWhenDirLocked(conf, savedSd); } - + /** * Test that the SecondaryNameNode properly locks its storage directories. */ @@ -814,7 +814,7 @@ public void testSecondaryNameNodeLocking() throws Exception { // Start a secondary NN, then make sure that all of its storage // dirs got locked. secondary = startSecondaryNameNode(conf); - + NNStorage storage = secondary.getFSImage().getStorage(); for (StorageDirectory sd : storage.dirIterable(null)) { assertLockFails(sd); @@ -837,7 +837,7 @@ public void testSecondaryNameNodeLocking() throws Exception { } finally { savedSd.unlock(); } - + } finally { cleanup(secondary); secondary = null; @@ -845,7 +845,7 @@ public void testSecondaryNameNodeLocking() throws Exception { cluster = null; } } - + /** * Test that, an attempt to lock a storage that is already locked by nodename, * logs error message that includes JVM name of the namenode that locked it. @@ -862,7 +862,7 @@ public void testStorageAlreadyLockedErrorMessage() throws Exception { assertLockFails(sd); savedSd = sd; } - + LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LoggerFactory.getLogger(Storage.class)); try { @@ -899,7 +899,7 @@ private static void assertLockFails(StorageDirectory sd) { GenericTestUtils.assertExceptionContains("already locked", ioe); } } - + /** * Assert that, if sdToLock is locked, the cluster is not allowed to start up. * @param conf cluster conf to use @@ -910,7 +910,7 @@ private static void assertClusterStartFailsWhenDirLocked( // Lock the edits dir, then start the NN, and make sure it fails to start sdToLock.lock(); MiniDFSCluster cluster = null; - try { + try { cluster = new MiniDFSCluster.Builder(conf).format(false) .manageNameDfsDirs(false).numDataNodes(0).build(); assertFalse("cluster should fail to start after locking " + @@ -941,11 +941,11 @@ public void testImportCheckpoint() throws Exception { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); nameDirs = cluster.getNameDirs(0); - + // Make an entry in the namespace, used for verifying checkpoint // later. cluster.getFileSystem().mkdirs(testPath); - + // Take a checkpoint snn = startSecondaryNameNode(conf); snn.doCheckpoint(); @@ -954,7 +954,7 @@ public void testImportCheckpoint() throws Exception { cleanup(cluster); cluster = null; } - + LOG.info("Trying to import checkpoint when the NameNode already " + "contains an image. This should fail."); try { @@ -970,19 +970,19 @@ public void testImportCheckpoint() throws Exception { cleanup(cluster); cluster = null; } - + LOG.info("Removing NN storage contents"); for(URI uri : nameDirs) { File dir = new File(uri.getPath()); LOG.info("Cleaning " + dir); removeAndRecreateDir(dir); } - + LOG.info("Trying to import checkpoint"); try { cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0) .startupOption(StartupOption.IMPORT).build(); - + assertTrue("Path from checkpoint should exist after import", cluster.getFileSystem().exists(testPath)); @@ -993,7 +993,7 @@ public void testImportCheckpoint() throws Exception { cluster = null; } } - + private static void removeAndRecreateDir(File dir) throws IOException { if(dir.exists()) if(!(FileUtil.fullyDelete(dir))) @@ -1001,13 +1001,13 @@ private static void removeAndRecreateDir(File dir) throws IOException { if (!dir.mkdirs()) throw new IOException("Cannot create directory " + dir); } - + SecondaryNameNode startSecondaryNameNode(Configuration conf ) throws IOException { conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); return new SecondaryNameNode(conf); } - + SecondaryNameNode startSecondaryNameNode(Configuration conf, int index) throws IOException { Configuration snnConf = new Configuration(conf); @@ -1028,7 +1028,7 @@ public void testCheckpoint() throws IOException { Configuration conf = new HdfsConfiguration(); conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0"); replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3); - + MiniDFSCluster cluster = null; FileSystem fileSys = null; SecondaryNameNode secondary = null; @@ -1042,7 +1042,7 @@ public void testCheckpoint() throws IOException { // assertTrue(!fileSys.exists(file1)); assertTrue(!fileSys.exists(file2)); - + // // Create file1 // @@ -1084,7 +1084,7 @@ public void testCheckpoint() throws IOException { .format(false).build(); cluster.waitActive(); fileSys = cluster.getFileSystem(); - + // check that file1 still exists checkFile(fileSys, file1, replication); cleanupFile(fileSys, file1); @@ -1099,11 +1099,11 @@ public void testCheckpoint() throws IOException { // secondary = startSecondaryNameNode(conf); secondary.doCheckpoint(); - + FSDirectory secondaryFsDir = secondary.getFSNamesystem().dir; INode rootInMap = secondaryFsDir.getInode(secondaryFsDir.rootDir.getId()); assertSame(rootInMap, secondaryFsDir.rootDir); - + fileSys.delete(tmpDir, true); fileSys.mkdirs(tmpDir); secondary.doCheckpoint(); @@ -1187,10 +1187,10 @@ public void testSaveNamespace() throws IOException { } catch(Exception e) { throw new IOException(e); } - + // TODO: Fix the test to not require a hard-coded transaction count. final int EXPECTED_TXNS_FIRST_SEG = 13; - + // the following steps should have happened: // edits_inprogress_1 -> edits_1-12 (finalized) // fsimage_12 created @@ -1214,7 +1214,7 @@ public void testSaveNamespace() throws IOException { + NNStorage.getInProgressEditsFileName( EXPECTED_TXNS_FIRST_SEG + 1))); } - + Collection imageDirs = cluster.getNameDirs(0); for (URI uri : imageDirs) { File imageDir = new File(uri.getPath()); @@ -1222,7 +1222,7 @@ public void testSaveNamespace() throws IOException { + NNStorage.getImageFileName( EXPECTED_TXNS_FIRST_SEG)); assertTrue("Should have saved image at " + savedImage, - savedImage.exists()); + savedImage.exists()); } // restart cluster and verify file exists @@ -1241,7 +1241,7 @@ public void testSaveNamespace() throws IOException { cluster = null; } } - + /* Test case to test CheckpointSignature */ @Test public void testCheckpointSignature() throws IOException { @@ -1275,7 +1275,7 @@ public void testCheckpointSignature() throws IOException { cluster = null; } } - + /** * Tests the following sequence of events: * - secondary successfully makes a checkpoint @@ -1287,19 +1287,19 @@ public void testCheckpointSignature() throws IOException { public void testCheckpointAfterTwoFailedUploads() throws IOException { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; - + Configuration conf = new HdfsConfiguration(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .format(true).build(); - + secondary = startSecondaryNameNode(conf); Mockito.doThrow(new IOException( "Injecting failure after rolling edit logs")) .when(faultInjector).afterSecondaryCallsRollEditLog(); - + // Fail to checkpoint once try { secondary.doCheckpoint(); @@ -1322,7 +1322,7 @@ public void testCheckpointAfterTwoFailedUploads() throws IOException { // Now with the cleared error simulation, it should succeed secondary.doCheckpoint(); - + } finally { cleanup(secondary); secondary = null; @@ -1330,12 +1330,12 @@ public void testCheckpointAfterTwoFailedUploads() throws IOException { cluster = null; } } - + /** * Starts two namenodes and two secondary namenodes, verifies that secondary * namenodes are configured correctly to talk to their respective namenodes * and can do the checkpoint. - * + * * @throws IOException */ @Test @@ -1398,7 +1398,7 @@ public void testMultipleSecondaryNamenodes() throws IOException { cluster = null; } } - + /** * Test that the secondary doesn't have to re-download image * if it hasn't changed. @@ -1434,19 +1434,19 @@ public void testSecondaryImageDownload() throws IOException { NNStorage.getImageFileName(expectedTxIdToDownload)); File secondaryFsImageAfter = new File(secondaryCurrent, NNStorage.getImageFileName(expectedTxIdToDownload + 2)); - + assertFalse("Secondary should start with empty current/ dir " + "but " + secondaryFsImageBefore + " exists", secondaryFsImageBefore.exists()); assertTrue("Secondary should have loaded an image", secondary.doCheckpoint()); - + assertTrue("Secondary should have downloaded original image", secondaryFsImageBefore.exists()); assertTrue("Secondary should have created a new image", secondaryFsImageAfter.exists()); - + long fsimageLength = secondaryFsImageBefore.length(); assertEquals("Image size should not have changed", fsimageLength, @@ -1454,10 +1454,10 @@ public void testSecondaryImageDownload() throws IOException { // change namespace fileSys.mkdirs(dir); - + assertFalse("Another checkpoint should not have to re-load image", secondary.doCheckpoint()); - + for (StorageDirectory sd : image.getStorage().dirIterable(NameNodeDirType.IMAGE)) { File imageFile = NNStorage.getImageFile(sd, NameNodeFile.IMAGE, @@ -1474,7 +1474,7 @@ public void testSecondaryImageDownload() throws IOException { cluster = null; } } - + /** * Test NN restart if a failure happens in between creating the fsimage * MD5 file and renaming the fsimage. @@ -1582,7 +1582,7 @@ public void testEditFailureBeforeRename() throws IOException { Mockito.reset(faultInjector); } } - + /** * Test that a fault while downloading edits the first time after the 2NN * starts up does not prevent future checkpointing. @@ -1599,19 +1599,19 @@ public void testEditFailureOnFirstCheckpoint() throws IOException { cluster.waitActive(); fs = cluster.getFileSystem(); fs.mkdirs(new Path("test-file-1")); - + // Make sure the on-disk fsimage on the NN has txid > 0. FSNamesystem fsns = cluster.getNamesystem(); fsns.enterSafeMode(false); fsns.saveNamespace(0, 0); fsns.leaveSafeMode(false); - + secondary = startSecondaryNameNode(conf); // Cause edit rename to fail during next checkpoint Mockito.doThrow(new IOException("Injecting failure before edit rename")) .when(faultInjector).beforeEditsRename(); - + try { secondary.doCheckpoint(); fail("Fault injection failed."); @@ -1620,7 +1620,7 @@ public void testEditFailureOnFirstCheckpoint() throws IOException { "Injecting failure before edit rename", ioe); } Mockito.reset(faultInjector); - + // Next checkpoint should succeed secondary.doCheckpoint(); } finally { @@ -1709,14 +1709,14 @@ public void testDeleteTemporaryEditsOnStartup() throws IOException { * Test case where two secondary namenodes are checkpointing the same * NameNode. This differs from {@link #testMultipleSecondaryNamenodes()} * since that test runs against two distinct NNs. - * + * * This case tests the following interleaving: * - 2NN A downloads image (up to txid 2) * - 2NN A about to save its own checkpoint * - 2NN B downloads image (up to txid 4) * - 2NN B uploads checkpoint (txid 4) * - 2NN A uploads checkpoint (txid 2) - * + * * It verifies that this works even though the earlier-txid checkpoint gets * uploaded after the later-txid checkpoint. */ @@ -1732,7 +1732,7 @@ public void testMultipleSecondaryNNsAgainstSameNN() throws Exception { // Start 2NNs secondary1 = startSecondaryNameNode(conf, 1); secondary2 = startSecondaryNameNode(conf, 2); - + // Make the first 2NN's checkpoint process delayable - we can pause it // right before it saves its checkpoint image. CheckpointStorage spyImage1 = spyOnSecondaryImage(secondary1); @@ -1746,17 +1746,17 @@ public void testMultipleSecondaryNNsAgainstSameNN() throws Exception { // Wait for the first checkpointer to get to where it should save its image. delayer.waitForCall(); - + // Now make the second checkpointer run an entire checkpoint secondary2.doCheckpoint(); - + // Let the first one finish delayer.proceed(); - + // It should have succeeded even though another checkpoint raced with it. checkpointThread.join(); checkpointThread.propagateExceptions(); - + // primary should record "last checkpoint" as the higher txid (even though // a checkpoint with a lower txid finished most recently) NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); @@ -1764,17 +1764,17 @@ public void testMultipleSecondaryNNsAgainstSameNN() throws Exception { // Should have accepted both checkpoints assertNNHasCheckpoints(cluster, ImmutableList.of(2,4)); - + // Now have second one checkpoint one more time just to make sure that // the NN isn't left in a broken state secondary2.doCheckpoint(); - + // NN should have received new checkpoint assertEquals(6, storage.getMostRecentCheckpointTxId()); - + // Validate invariant that files named the same are the same. assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2)); - + // NN should have removed the checkpoint at txid 2 at this point, but has // one at txid 6 assertNNHasCheckpoints(cluster, ImmutableList.of(4,6)); @@ -1789,13 +1789,13 @@ public void testMultipleSecondaryNNsAgainstSameNN() throws Exception { } } } - - + + /** * Test case where two secondary namenodes are checkpointing the same * NameNode. This differs from {@link #testMultipleSecondaryNamenodes()} * since that test runs against two distinct NNs. - * + * * This case tests the following interleaving: * - 2NN A) calls rollEdits() * - 2NN B) calls rollEdits() @@ -1804,7 +1804,7 @@ public void testMultipleSecondaryNNsAgainstSameNN() throws Exception { * - 2NN B) uploads checkpoint fsimage_4 * - 2NN A) allowed to proceed, also returns up to txid 4 * - 2NN A) uploads checkpoint fsimage_4 as well, should fail gracefully - * + * * It verifies that one of the two gets an error that it's uploading a * duplicate checkpoint, and the other one succeeds. */ @@ -1820,7 +1820,7 @@ public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception { // Start 2NNs secondary1 = startSecondaryNameNode(conf, 1); secondary2 = startSecondaryNameNode(conf, 2); - + // Make the first 2NN's checkpoint process delayable - we can pause it // right before it calls getRemoteEditLogManifest. // The method to set up a spy on an RPC protocol is a little bit involved @@ -1836,41 +1836,41 @@ protected Object passThrough(InvocationOnMock invocation) throws Throwable { } }; secondary1.setNameNode(spyNN); - + Mockito.doAnswer(delayer).when(spyNN) - .getEditLogManifest(Mockito.anyLong()); - + .getEditLogManifest(Mockito.anyLong()); + // Set up a thread to do a checkpoint from the first 2NN DoCheckpointThread checkpointThread = new DoCheckpointThread(secondary1); checkpointThread.start(); // Wait for the first checkpointer to be about to call getEditLogManifest delayer.waitForCall(); - + // Now make the second checkpointer run an entire checkpoint secondary2.doCheckpoint(); - + // NN should have now received fsimage_4 NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); assertEquals(4, storage.getMostRecentCheckpointTxId()); - + // Let the first one finish delayer.proceed(); - + // Letting the first node continue, it should try to upload the // same image, and gracefully ignore it, while logging an // error message. checkpointThread.join(); checkpointThread.propagateExceptions(); - + // primary should still consider fsimage_4 the latest assertEquals(4, storage.getMostRecentCheckpointTxId()); - + // Now have second one checkpoint one more time just to make sure that // the NN isn't left in a broken state secondary2.doCheckpoint(); assertEquals(6, storage.getMostRecentCheckpointTxId()); - + // Should have accepted both checkpoints assertNNHasCheckpoints(cluster, ImmutableList.of(4,6)); @@ -1878,10 +1878,10 @@ protected Object passThrough(InvocationOnMock invocation) throws Throwable { // continue at next checkpoint secondary1.setNameNode(origNN); secondary1.doCheckpoint(); - + // NN should have received new checkpoint assertEquals(8, storage.getMostRecentCheckpointTxId()); - + // Validate invariant that files named the same are the same. assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2)); // Validate that the NN received checkpoints at expected txids @@ -1896,7 +1896,7 @@ protected Object passThrough(InvocationOnMock invocation) throws Throwable { cluster = null; } } - + /** * Test case where the name node is reformatted while the secondary namenode * is running. The secondary should shut itself down if if talks to a NN @@ -1906,7 +1906,7 @@ protected Object passThrough(InvocationOnMock invocation) throws Throwable { public void testReformatNNBetweenCheckpoints() throws IOException { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; - + Configuration conf = new HdfsConfiguration(); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 1); @@ -1936,7 +1936,7 @@ public void testReformatNNBetweenCheckpoints() throws IOException { Thread.sleep(100); } catch (InterruptedException ie) { } - + // Start a new NN with the same host/port. cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) @@ -1956,9 +1956,9 @@ public void testReformatNNBetweenCheckpoints() throws IOException { secondary = null; cleanup(cluster); cluster = null; - } + } } - + /** * Test that the primary NN will not serve any files to a 2NN who doesn't * share its namespace ID, and also will not accept any files from one. @@ -1966,22 +1966,22 @@ public void testReformatNNBetweenCheckpoints() throws IOException { @Test public void testNamespaceVerifiedOnFileTransfer() throws IOException { MiniDFSCluster cluster = null; - + Configuration conf = new HdfsConfiguration(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(true).build(); - + NamenodeProtocols nn = cluster.getNameNodeRpc(); URL fsName = DFSUtil.getInfoServer( cluster.getNameNode().getServiceRpcAddress(), conf, DFSUtil.getHttpClientScheme(conf)).toURL(); - // Make a finalized log on the server side. + // Make a finalized log on the server side. nn.rollEditLog(); RemoteEditLogManifest manifest = nn.getEditLogManifest(1); RemoteEditLog log = manifest.getLogs().get(0); - + NNStorage dstImage = Mockito.mock(NNStorage.class); Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written"))) .when(dstImage).getFiles( @@ -2024,7 +2024,7 @@ public void testNamespaceVerifiedOnFileTransfer() throws IOException { } finally { cleanup(cluster); cluster = null; - } + } } /** @@ -2036,13 +2036,13 @@ public void testCheckpointWithFailedStorageDir() throws Exception { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; File currentDir = null; - + Configuration conf = new HdfsConfiguration(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(true).build(); - + secondary = startSecondaryNameNode(conf); // Checkpoint once @@ -2054,17 +2054,17 @@ public void testCheckpointWithFailedStorageDir() throws Exception { NNStorage storage = cluster.getNameNode().getFSImage().getStorage(); StorageDirectory sd0 = storage.getStorageDir(0); StorageDirectory sd1 = storage.getStorageDir(1); - + currentDir = sd0.getCurrentDir(); FileUtil.setExecutable(currentDir, false); // Upload checkpoint when NN has a bad storage dir. This should // succeed and create the checkpoint in the good dir. secondary.doCheckpoint(); - + GenericTestUtils.assertExists( new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2))); - + // Restore the good dir FileUtil.setExecutable(currentDir, true); nn.restoreFailedStorage("true"); @@ -2072,7 +2072,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception { // Checkpoint again -- this should upload to both dirs secondary.doCheckpoint(); - + assertNNHasCheckpoints(cluster, ImmutableList.of(8)); assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { @@ -2085,7 +2085,7 @@ public void testCheckpointWithFailedStorageDir() throws Exception { cluster = null; } } - + /** * Test case where the NN is configured with a name-only and an edits-only * dir, with storage-restore turned on. In this case, if the name-only dir @@ -2098,7 +2098,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; File currentDir = null; - + Configuration conf = new HdfsConfiguration(); File base_dir = new File(MiniDFSCluster.getBaseDirectory()); @@ -2113,7 +2113,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true) .manageNameDfsDirs(false).build(); - + secondary = startSecondaryNameNode(conf); // Checkpoint once @@ -2137,7 +2137,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { GenericTestUtils.assertExceptionContains( "No targets in destination storage", ioe); } - + // Restore the good dir assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755")); nn.restoreFailedStorage("true"); @@ -2145,7 +2145,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { // Checkpoint again -- this should upload to the restored name dir secondary.doCheckpoint(); - + assertNNHasCheckpoints(cluster, ImmutableList.of(8)); assertParallelFilesInvariant(cluster, ImmutableList.of(secondary)); } finally { @@ -2158,7 +2158,7 @@ public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { cluster = null; } } - + /** * Test that the 2NN triggers a checkpoint after the configurable interval */ @@ -2170,7 +2170,7 @@ public void testCheckpointTriggerOnTxnCount() throws Exception { conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); - + try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) @@ -2193,7 +2193,7 @@ public Boolean get() { for (int i = 0; i < 10; i++) { fs.mkdirs(new Path("/test" + i)); } - + GenericTestUtils.waitFor(new Supplier() { @Override public Boolean get() { @@ -2225,7 +2225,7 @@ public void testSecondaryHasVeryOutOfDateImage() throws IOException { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .format(true).build(); - + secondary = startSecondaryNameNode(conf); // Checkpoint once @@ -2238,11 +2238,11 @@ public void testSecondaryHasVeryOutOfDateImage() throws IOException { nn.saveNamespace(0, 0); } nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false); - + // Now the secondary tries to checkpoint again with its // old image in memory. secondary.doCheckpoint(); - + } finally { cleanup(secondary); secondary = null; @@ -2250,7 +2250,7 @@ public void testSecondaryHasVeryOutOfDateImage() throws IOException { cluster = null; } } - + /** * Regression test for HDFS-3678 "Edit log files are never being purged from 2NN" */ @@ -2263,18 +2263,18 @@ public void testSecondaryPurgesEditLogs() throws IOException { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true) .build(); - + FileSystem fs = cluster.getFileSystem(); fs.mkdirs(new Path("/foo")); - + secondary = startSecondaryNameNode(conf); - + // Checkpoint a few times. Doing this will cause a log roll, and thus // several edit log segments on the 2NN. for (int i = 0; i < 5; i++) { secondary.doCheckpoint(); } - + // Make sure there are no more edit log files than there should be. List checkpointDirs = getCheckpointCurrentDirs(secondary); for (File checkpointDir : checkpointDirs) { @@ -2283,7 +2283,7 @@ public void testSecondaryPurgesEditLogs() throws IOException { assertEquals("Edit log files were not purged from 2NN", 1, editsFiles.size()); } - + } finally { cleanup(secondary); secondary = null; @@ -2291,7 +2291,7 @@ public void testSecondaryPurgesEditLogs() throws IOException { cluster = null; } } - + /** * Regression test for HDFS-3835 - "Long-lived 2NN cannot perform a * checkpoint if security is enabled and the NN restarts without outstanding @@ -2301,28 +2301,28 @@ public void testSecondaryPurgesEditLogs() throws IOException { public void testSecondaryNameNodeWithDelegationTokens() throws IOException { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; - + Configuration conf = new HdfsConfiguration(); conf.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) .format(true).build(); - + assertNotNull(cluster.getNamesystem().getDelegationToken(new Text("atm"))); - + secondary = startSecondaryNameNode(conf); // Checkpoint once, so the 2NN loads the DT into its in-memory sate. secondary.doCheckpoint(); - + // Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN // therefore needs to download a new fsimage the next time it performs a // checkpoint. cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); cluster.getNameNodeRpc().saveNamespace(0, 0); cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false); - + // Ensure that the 2NN can still perform a checkpoint. secondary.doCheckpoint(); } finally { @@ -2367,10 +2367,10 @@ public void testSecondaryNameNodeWithSavedLeases() throws IOException { cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false); cluster.getNameNodeRpc().saveNamespace(0, 0); cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false); - + // Ensure that the 2NN can still perform a checkpoint. secondary.doCheckpoint(); - + // And the leases have been cleared... assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease()); } finally { @@ -2383,7 +2383,7 @@ public void testSecondaryNameNodeWithSavedLeases() throws IOException { cluster = null; } } - + @Test public void testCommandLineParsing() throws ParseException { SecondaryNameNode.CommandLineOpts opts = @@ -2404,17 +2404,17 @@ public void testCommandLineParsing() throws ParseException { opts.parse("-geteditsize"); assertEquals(SecondaryNameNode.CommandLineOpts.Command.GETEDITSIZE, opts.getCommand()); - + opts.parse("-format"); assertTrue(opts.shouldFormat()); - + try { opts.parse("-geteditsize", "-checkpoint"); fail("Should have failed bad parsing for two actions"); } catch (ParseException e) { LOG.warn("Encountered ", e); } - + try { opts.parse("-checkpoint", "xx"); fail("Should have failed for bad checkpoint arg"); @@ -2601,9 +2601,9 @@ private void assertParallelFilesInvariant(MiniDFSCluster cluster, allCurrentDirs.addAll(getCheckpointCurrentDirs(snn)); } FSImageTestUtil.assertParallelFilesAreIdentical(allCurrentDirs, - ImmutableSet.of("VERSION")); + ImmutableSet.of("VERSION")); } - + private static List getCheckpointCurrentDirs(SecondaryNameNode secondary) { List ret = Lists.newArrayList(); for (String u : secondary.getCheckpointDirectories()) { @@ -2618,18 +2618,18 @@ private static CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary secondary1.setFSImage(spy); return spy; } - + /** * A utility class to perform a checkpoint in a different thread. */ private static class DoCheckpointThread extends Thread { private final SecondaryNameNode snn; private volatile Throwable thrown = null; - + DoCheckpointThread(SecondaryNameNode snn) { this.snn = snn; } - + @Override public void run() { try { @@ -2638,7 +2638,7 @@ public void run() { thrown = t; } } - + void propagateExceptions() { if (thrown != null) { throw new RuntimeException(thrown); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index ccac846968371..2d0bd3b554708 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -177,16 +177,16 @@ void fromXml(Stanza st) throws InvalidXmlException { } static final Logger LOG = LoggerFactory.getLogger(TestEditLog.class); - + static final int NUM_DATA_NODES = 0; // This test creates NUM_THREADS threads and each thread does // 2 * NUM_TRANSACTIONS Transactions concurrently. static final int NUM_TRANSACTIONS = 100; static final int NUM_THREADS = 100; - + static final File TEST_DIR = PathUtils.getTestDir(TestEditLog.class); - + /** An edits log with 3 edits from 0.20 - the result of * a fresh namesystem followed by hadoop fs -touchz /myfile */ static final byte[] HADOOP20_SOME_EDITS = @@ -212,7 +212,7 @@ void fromXml(Stanza st) throws InvalidXmlException { // the tests run much faster. EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); } - + static final byte TRAILER_BYTE = FSEditLogOpCodes.OP_INVALID.getOpCode(); private static final int CHECKPOINT_ON_STARTUP_MIN_TXNS = 100; @@ -250,10 +250,10 @@ public void run() { } } } - + /** * Construct FSEditLog with default configuration, taking editDirs from NNStorage - * + * * @param storage Storage object used by namenode */ private static FSEditLog getFSEditLog(NNStorage storage) throws IOException { @@ -278,7 +278,7 @@ public void testPreTxIdEditLogNoEdits() throws Exception { namesys); assertEquals(0, numEdits); } - + /** * Test case for loading a very simple edit log from a format * prior to the inclusion of edit transaction IDs in the log. @@ -304,7 +304,7 @@ public void testPreTxidEditLogWithEdits() throws Exception { if (cluster != null) { cluster.shutdown(); } } } - + private long testLoad(byte[] data, FSNamesystem namesys) throws IOException { FSEditLogLoader loader = new FSEditLogLoader(namesys, 0); return loader.loadFSEdits(new EditLogByteInputStream(data), 1); @@ -396,7 +396,7 @@ public void testMultiStreamsLoadEditWithConfMaxTxns() */ @Test public void testSimpleEditLog() throws IOException { - // start a cluster + // start a cluster Configuration conf = getConf(); MiniDFSCluster cluster = null; FileSystem fileSys = null; @@ -407,15 +407,15 @@ public void testSimpleEditLog() throws IOException { final FSNamesystem namesystem = cluster.getNamesystem(); FSImage fsimage = namesystem.getFSImage(); final FSEditLog editLog = fsimage.getEditLog(); - + assertExistsInStorageDirs( - cluster, NameNodeDirType.EDITS, + cluster, NameNodeDirType.EDITS, NNStorage.getInProgressEditsFileName(1)); - + editLog.logSetReplication("fakefile", (short) 1); editLog.logSync(); - + editLog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); assertExistsInStorageDirs( @@ -425,10 +425,10 @@ public void testSimpleEditLog() throws IOException { cluster, NameNodeDirType.EDITS, NNStorage.getInProgressEditsFileName(4)); - + editLog.logSetReplication("fakefile", (short) 2); editLog.logSync(); - + editLog.close(); } finally { if(fileSys != null) fileSys.close(); @@ -445,8 +445,8 @@ public void testMultiThreadedEditLog() throws IOException { // force edit buffer to automatically sync on each log of edit log entry testEditLog(1); } - - + + private void assertExistsInStorageDirs(MiniDFSCluster cluster, NameNodeDirType dirType, String filename) { @@ -456,16 +456,16 @@ private void assertExistsInStorageDirs(MiniDFSCluster cluster, assertTrue("Expect that " + f + " exists", f.exists()); } } - + /** * Test edit log with different initial buffer size - * + * * @param initialSize initial edit log buffer size * @throws IOException */ private void testEditLog(int initialSize) throws IOException { - // start a cluster + // start a cluster Configuration conf = getConf(); MiniDFSCluster cluster = null; FileSystem fileSys = null; @@ -475,27 +475,27 @@ private void testEditLog(int initialSize) throws IOException { cluster.waitActive(); fileSys = cluster.getFileSystem(); final FSNamesystem namesystem = cluster.getNamesystem(); - + for (Iterator it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) { File dir = new File(it.next().getPath()); System.out.println(dir); } - + FSImage fsimage = namesystem.getFSImage(); FSEditLog editLog = fsimage.getEditLog(); - + // set small size of flush buffer editLog.setOutputBufferCapacity(initialSize); - + // Roll log so new output buffer size takes effect // we should now be writing to edits_inprogress_3 fsimage.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); - + // Remember the current lastInodeId and will reset it back to test // loading editlog segments.The transactions in the following allocate new // inode id to write to editlogs but doesn't create ionde in namespace long originalLastInodeId = namesystem.dir.getLastInodeId(); - + // Create threads and make them run transactions concurrently. Thread threadId[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { @@ -504,39 +504,39 @@ private void testEditLog(int initialSize) throws IOException { threadId[i] = new Thread(trans, "TransactionThread-" + i); threadId[i].start(); } - + // wait for all transactions to get over for (int i = 0; i < NUM_THREADS; i++) { try { threadId[i].join(); } catch (InterruptedException e) { - i--; // retry + i--; // retry } - } + } // Reopen some files as for append - Transactions trans = + Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS, NUM_TRANSACTIONS / 2); trans.run(); // Roll another time to finalize edits_inprogress_3 fsimage.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); - + long expectedTxns = ((NUM_THREADS+1) * 2 * NUM_TRANSACTIONS) + 2; // +2 for start/end txns - + // Verify that we can read in all the transactions that we have written. // If there were any corruptions, it is likely that the reading in // of these transactions will throw an exception. // namesystem.dir.resetLastInodeIdWithoutChecking(originalLastInodeId); - for (Iterator it = + for (Iterator it = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) { FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0); - + File editFile = NNStorage.getFinalizedEditsFile(it.next(), 3, 3 + expectedTxns - 1); assertTrue("Expect " + editFile + " exists", editFile.exists()); - + System.out.println("Verifying file: " + editFile); long numEdits = loader.loadFSEdits( new EditLogFileInputStream(editFile), 3); @@ -547,7 +547,7 @@ private void testEditLog(int initialSize) throws IOException { "Expected " + expectedTxns + " transactions. "+ "Found " + numEdits + " transactions.", numEdits == expectedTxns); - + } } finally { try { @@ -570,7 +570,7 @@ public Void call() { } }).get(); } - + private void doCallLogSync(ExecutorService exec, final FSEditLog log) throws Exception { @@ -618,7 +618,7 @@ public void testSyncBatching() throws Exception { assertEquals("should start with only the BEGIN_LOG_SEGMENT txn synced", 1, editLog.getSyncTxId()); - + // Log an edit from thread A doLogEdit(threadA, editLog, "thread-a 1"); assertEquals("logging edit without syncing should do not affect txid", @@ -641,7 +641,7 @@ public void testSyncBatching() throws Exception { 3, editLog.getSyncTxId()); //Should have incremented the batch count exactly once - assertCounter("TransactionsBatchedInSync", 1L, + assertCounter("TransactionsBatchedInSync", 1L, getMetrics("NameNodeActivity")); } finally { threadA.shutdown(); @@ -650,7 +650,7 @@ public void testSyncBatching() throws Exception { if(cluster != null) cluster.shutdown(); } } - + /** * Test what happens with the following sequence: * @@ -664,7 +664,7 @@ public void testSyncBatching() throws Exception { */ @Test public void testBatchedSyncWithClosedLogs() throws Exception { - // start a cluster + // start a cluster Configuration conf = getConf(); MiniDFSCluster cluster = null; FileSystem fileSys = null; @@ -704,10 +704,10 @@ public void testBatchedSyncWithClosedLogs() throws Exception { if(cluster != null) cluster.shutdown(); } } - + @Test public void testEditChecksum() throws Exception { - // start a cluster + // start a cluster Configuration conf = getConf(); MiniDFSCluster cluster = null; FileSystem fileSys = null; @@ -732,7 +732,7 @@ public void testEditChecksum() throws Exception { for (StorageDirectory sd : sds) { File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3); assertTrue(editFile.exists()); - + long fileLen = editFile.length(); LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen); RandomAccessFile rwf = new RandomAccessFile(editFile, "rw"); @@ -742,7 +742,7 @@ public void testEditChecksum() throws Exception { rwf.writeInt(b+1); rwf.close(); } - + try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build(); fail("should not be able to start"); @@ -762,7 +762,7 @@ public void testEditChecksum() throws Exception { public void testCrashRecoveryNoTransactions() throws Exception { testCrashRecovery(0); } - + /** * Test what happens if the NN crashes when it has has started and * had a few transactions written @@ -771,7 +771,7 @@ public void testCrashRecoveryNoTransactions() throws Exception { public void testCrashRecoveryWithTransactions() throws Exception { testCrashRecovery(150); } - + /** * Do a test to make sure the edit log can recover edits even after * a non-clean shutdown. This does a simulated crash by copying over @@ -783,28 +783,28 @@ private void testCrashRecovery(int numTransactions) throws Exception { Configuration conf = getConf(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, CHECKPOINT_ON_STARTUP_MIN_TXNS); - + try { LOG.info("\n===========================================\n" + "Starting empty cluster"); - + cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATA_NODES) .format(true) .build(); cluster.waitActive(); - + FileSystem fs = cluster.getFileSystem(); for (int i = 0; i < numTransactions; i++) { fs.mkdirs(new Path("/test" + i)); - } - + } + // Directory layout looks like: // test/data/dfs/nameN/current/{fsimage_N,edits_...} File nameDir = new File(cluster.getNameDirs(0).iterator().next().getPath()); File dfsDir = nameDir.getParentFile(); assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir - + LOG.info("Copying data directory aside to a hot backup"); File backupDir = new File(dfsDir.getParentFile(), "dfs.backup-while-running"); FileUtils.copyDirectory(dfsDir, backupDir); @@ -812,12 +812,12 @@ private void testCrashRecovery(int numTransactions) throws Exception { LOG.info("Shutting down cluster #1"); cluster.shutdown(); cluster = null; - + // Now restore the backup FileUtil.fullyDeleteContents(dfsDir); dfsDir.delete(); backupDir.renameTo(dfsDir); - + // Directory layout looks like: // test/data/dfs/nameN/current/{fsimage_N,edits_...} File currentDir = new File(nameDir, "current"); @@ -825,8 +825,8 @@ private void testCrashRecovery(int numTransactions) throws Exception { // We should see the file as in-progress File editsFile = new File(currentDir, NNStorage.getInProgressEditsFileName(1)); - assertTrue("Edits file " + editsFile + " should exist", editsFile.exists()); - + assertTrue("Edits file " + editsFile + " should exist", editsFile.exists()); + File imageFile = FSImageTestUtil.findNewestImageFile( currentDir.getAbsolutePath()); assertNotNull("No image found in " + nameDir, imageFile); @@ -839,7 +839,7 @@ private void testCrashRecovery(int numTransactions) throws Exception { .format(false) .build(); cluster.waitActive(); - + // We should still have the files we wrote prior to the simulated crash fs = cluster.getFileSystem(); for (int i = 0; i < numTransactions; i++) { @@ -860,11 +860,11 @@ private void testCrashRecovery(int numTransactions) throws Exception { assertNotNull("No image found in " + nameDir, imageFile); assertEquals(NNStorage.getImageFileName(expectedTxId), imageFile.getName()); - + // Started successfully. Shut it down and make sure it can restart. - cluster.shutdown(); + cluster.shutdown(); cluster = null; - + cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATA_NODES) .format(false) @@ -876,13 +876,13 @@ private void testCrashRecovery(int numTransactions) throws Exception { } } } - + // should succeed - only one corrupt log dir @Test public void testCrashRecoveryEmptyLogOneDir() throws Exception { doTestCrashRecoveryEmptyLog(false, true, true); } - + // should fail - seen_txid updated to 3, but no log dir contains txid 3 @Test public void testCrashRecoveryEmptyLogBothDirs() throws Exception { @@ -891,11 +891,11 @@ public void testCrashRecoveryEmptyLogBothDirs() throws Exception { // should succeed - only one corrupt log dir @Test - public void testCrashRecoveryEmptyLogOneDirNoUpdateSeenTxId() + public void testCrashRecoveryEmptyLogOneDirNoUpdateSeenTxId() throws Exception { doTestCrashRecoveryEmptyLog(false, false, true); } - + // should succeed - both log dirs corrupt, but seen_txid never updated @Test public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId() @@ -909,7 +909,7 @@ public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId() * (ie before writing START_LOG_SEGMENT). In the case * that all logs have this problem, it should mark them * as corrupt instead of trying to finalize them. - * + * * @param inBothDirs if true, there will be a truncated log in * both of the edits directories. If false, the truncated log * will only be in one of the directories. In both cases, the @@ -921,16 +921,16 @@ public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId() * seen_txid file. * @param shouldSucceed true if the test is expected to succeed. */ - private void doTestCrashRecoveryEmptyLog(boolean inBothDirs, + private void doTestCrashRecoveryEmptyLog(boolean inBothDirs, boolean updateTransactionIdFile, boolean shouldSucceed) throws Exception { - // start a cluster + // start a cluster Configuration conf = getConf(); MiniDFSCluster cluster = null; cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATA_NODES).build(); cluster.shutdown(); - + Collection editsDirs = cluster.getNameEditsDirs(0); for (URI uri : editsDirs) { File dir = new File(uri.getPath()); @@ -948,11 +948,11 @@ private void doTestCrashRecoveryEmptyLog(boolean inBothDirs, if (!inBothDirs) { break; } - - NNStorage storage = new NNStorage(conf, + + NNStorage storage = new NNStorage(conf, Collections.emptyList(), Lists.newArrayList(uri)); - + if (updateTransactionIdFile) { storage.writeTransactionIdFileToStorage(3); } @@ -961,7 +961,7 @@ private void doTestCrashRecoveryEmptyLog(boolean inBothDirs, stream.close(); } } - + try { cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(NUM_DATA_NODES).format(false).build(); @@ -983,7 +983,7 @@ private void doTestCrashRecoveryEmptyLog(boolean inBothDirs, } } - + private static class EditLogByteInputStream extends EditLogInputStream { private final InputStream input; private final long len; @@ -1000,25 +1000,25 @@ public EditLogByteInputStream(byte[] data) throws IOException { version = EditLogFileInputStream.readLogVersion(in, true); tracker = new FSEditLogLoader.PositionTrackingInputStream(in); in = new DataInputStream(tracker); - + reader = FSEditLogOp.Reader.create(in, tracker, version); } - + @Override public long getFirstTxId() { return HdfsServerConstants.INVALID_TXID; } - + @Override public long getLastTxId() { return HdfsServerConstants.INVALID_TXID; } - + @Override public long length() throws IOException { return len; } - + @Override public long getPosition() { return tracker.getPos(); @@ -1078,7 +1078,7 @@ public void testFailedOpen() throws Exception { ExitUtil.resetFirstExitException(); } } - + /** * Regression test for HDFS-1112/HDFS-3020. Ensures that, even if * logSync isn't called periodically, the edit log will sync itself. @@ -1088,10 +1088,10 @@ public void testAutoSync() throws Exception { File logDir = new File(TEST_DIR, "testAutoSync"); logDir.mkdirs(); FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir); - + String oneKB = StringUtils.byteToHexString( new byte[500]); - + try { log.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); NameNodeMetrics mockMetrics = Mockito.mock(NameNodeMetrics.class); @@ -1102,7 +1102,7 @@ public void testAutoSync() throws Exception { } // After ~400KB, we're still within the 512KB buffer size Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong()); - + // After ~400KB more, we should have done an automatic sync for (int i = 0; i < 400; i++) { log.logDelete(oneKB, 1L, false); @@ -1143,7 +1143,7 @@ public void testEditLogManifestMocks() throws IOException { log.initJournalsForWrite(); assertEquals("[[1,100], [101,200], [201,300], [301,400]]" + " CommittedTxId: 400", log.getEditLogManifest(1).toString()); - + // Case where one directory has an earlier finalized log, followed // by a gap. The returned manifest should start after the gap. storage = mockStorageWithEdits( @@ -1153,7 +1153,7 @@ public void testEditLogManifestMocks() throws IOException { log.initJournalsForWrite(); assertEquals("[[301,400], [401,500]] CommittedTxId: 500", log.getEditLogManifest(1).toString()); - + // Case where different directories have different length logs // starting at the same txid - should pick the longer one storage = mockStorageWithEdits( @@ -1170,8 +1170,8 @@ public void testEditLogManifestMocks() throws IOException { // the second has finalised that file (i.e. the first failed // recently) storage = mockStorageWithEdits( - "[1,100]|[101,]", - "[1,100]|[101,200]"); + "[1,100]|[101,]", + "[1,100]|[101,200]"); log = getFSEditLog(storage); log.initJournalsForWrite(); assertEquals("[[1,100], [101,200]] CommittedTxId: 200", @@ -1179,7 +1179,7 @@ public void testEditLogManifestMocks() throws IOException { assertEquals("[[101,200]] CommittedTxId: 200", log.getEditLogManifest(101).toString()); } - + /** * Create a mock NNStorage object with several directories, each directory * holding edit logs according to a specification. Each directory @@ -1216,22 +1216,22 @@ private NNStorage mockStorageWithEdits(String... editsDirSpecs) throws IOExcepti URI u = URI.create("file:///storage"+ Math.random()); Mockito.doReturn(sd).when(storage).getStorageDirectory(u); uris.add(u); - } + } Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS); Mockito.doReturn(uris).when(storage).getEditsDirectories(); return storage; } - /** + /** * Specification for a failure during #setupEdits */ static class AbortSpec { final int roll; final int logindex; - + /** - * Construct the failure specification. + * Construct the failure specification. * @param roll number to fail after. e.g. 1 to fail after the first roll * @param logindex index of journal to fail. */ @@ -1241,15 +1241,15 @@ static class AbortSpec { } } - final static int TXNS_PER_ROLL = 10; + final static int TXNS_PER_ROLL = 10; final static int TXNS_PER_FAIL = 2; - + /** - * Set up directories for tests. + * Set up directories for tests. * - * Each rolled file is 10 txns long. + * Each rolled file is 10 txns long. * A failed file is 2 txns long. - * + * * @param editUris directories to create edit logs in * @param numrolls number of times to roll the edit log during setup * @param closeOnFinish whether to close the edit log after setup @@ -1273,9 +1273,9 @@ public static NNStorage setupEdits(List editUris, int numrolls, DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY, 700), permission.toOctal()); } - FSEditLog editlog = getFSEditLog(storage); + FSEditLog editlog = getFSEditLog(storage); // open the edit log and add two transactions - // logGenerationStamp is used, simply because it doesn't + // logGenerationStamp is used, simply because it doesn't // require complex arguments. editlog.initJournalsForWrite(); editlog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); @@ -1283,30 +1283,30 @@ public static NNStorage setupEdits(List editUris, int numrolls, editlog.logGenerationStamp((long) 0); } editlog.logSync(); - + // Go into edit log rolling loop. - // On each roll, the abortAtRolls abort specs are - // checked to see if an abort is required. If so the + // On each roll, the abortAtRolls abort specs are + // checked to see if an abort is required. If so the // the specified journal is aborted. It will be brought // back into rotation automatically by rollEditLog for (int i = 0; i < numrolls; i++) { editlog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION); - + editlog.logGenerationStamp((long) i); editlog.logSync(); - while (aborts.size() > 0 + while (aborts.size() > 0 && aborts.get(0).roll == (i+1)) { AbortSpec spec = aborts.remove(0); editlog.getJournals().get(spec.logindex).abort(); - } - + } + for (int j = 3; j < TXNS_PER_ROLL; j++) { editlog.logGenerationStamp((long) i); } editlog.logSync(); } - + if (closeOnFinish) { editlog.close(); } @@ -1314,23 +1314,23 @@ public static NNStorage setupEdits(List editUris, int numrolls, FSImageTestUtil.logStorageContents(LOG, storage); return storage; } - + /** - * Set up directories for tests. + * Set up directories for tests. * - * Each rolled file is 10 txns long. + * Each rolled file is 10 txns long. * A failed file is 2 txns long. - * + * * @param editUris directories to create edit logs in * @param numrolls number of times to roll the edit log during setup * @param abortAtRolls Specifications for when to fail, see AbortSpec */ - public static NNStorage setupEdits(List editUris, int numrolls, + public static NNStorage setupEdits(List editUris, int numrolls, AbortSpec... abortAtRolls) throws IOException { return setupEdits(editUris, numrolls, true, abortAtRolls); } - /** + /** * Test loading an editlog which has had both its storage fail * on alternating rolls. Two edit log directories are created. * The first one fails on odd rolls, the second on even. Test @@ -1342,7 +1342,7 @@ public void testAlternatingJournalFailure() throws IOException { File f2 = new File(TEST_DIR + "/alternatingjournaltest1"); List editUris = ImmutableList.of(f1.toURI(), f2.toURI()); - + NNStorage storage = setupEdits(editUris, 10, new AbortSpec(1, 0), new AbortSpec(2, 1), @@ -1358,7 +1358,7 @@ public void testAlternatingJournalFailure() throws IOException { FSEditLog editlog = getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId = 1; - Iterable editStreams = editlog.selectInputStreams(startTxId, + Iterable editStreams = editlog.selectInputStreams(startTxId, TXNS_PER_ROLL*11); for (EditLogInputStream edits : editStreams) { @@ -1373,16 +1373,16 @@ public void testAlternatingJournalFailure() throws IOException { editlog.close(); storage.close(); - assertEquals(TXNS_PER_ROLL*11, totaltxnread); + assertEquals(TXNS_PER_ROLL*11, totaltxnread); } - /** + /** * Test loading an editlog with gaps. A single editlog directory * is set up. On of the edit log files is deleted. This should - * fail when selecting the input streams as it will not be able + * fail when selecting the input streams as it will not be able * to select enough streams to load up to 4*TXNS_PER_ROLL. * There should be 4*TXNS_PER_ROLL transactions as we rolled 3 - * times. + * times. */ @Test public void testLoadingWithGaps() throws IOException { @@ -1390,14 +1390,14 @@ public void testLoadingWithGaps() throws IOException { List editUris = ImmutableList.of(f1.toURI()); NNStorage storage = setupEdits(editUris, 3); - + final long startGapTxId = 1*TXNS_PER_ROLL + 1; final long endGapTxId = 2*TXNS_PER_ROLL; File[] files = new File(f1, "current").listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { - if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, + if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, endGapTxId))) { return true; } @@ -1406,7 +1406,7 @@ public boolean accept(File dir, String name) { }); assertEquals(1, files.length); assertTrue(files[0].delete()); - + FSEditLog editlog = getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId = 1; @@ -1510,7 +1510,7 @@ private static long readAllEdits(Collection streams, } /** - * Test edit log failover. If a single edit log is missing, other + * Test edit log failover. If a single edit log is missing, other * edits logs should be used instead. */ @Test @@ -1520,14 +1520,14 @@ public void testEditLogFailOverFromMissing() throws IOException { List editUris = ImmutableList.of(f1.toURI(), f2.toURI()); NNStorage storage = setupEdits(editUris, 3); - + final long startErrorTxId = 1*TXNS_PER_ROLL + 1; final long endErrorTxId = 2*TXNS_PER_ROLL; File[] files = new File(f1, "current").listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { - if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId, + if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId, endErrorTxId))) { return true; } @@ -1553,7 +1553,7 @@ public boolean accept(File dir, String name) { } } - /** + /** * Test edit log failover from a corrupt edit log */ @Test @@ -1563,14 +1563,14 @@ public void testEditLogFailOverFromCorrupt() throws IOException { List editUris = ImmutableList.of(f1.toURI(), f2.toURI()); NNStorage storage = setupEdits(editUris, 3); - + final long startErrorTxId = 1*TXNS_PER_ROLL + 1; final long endErrorTxId = 2*TXNS_PER_ROLL; File[] files = new File(f1, "current").listFiles(new FilenameFilter() { @Override public boolean accept(File dir, String name) { - if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId, + if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId, endErrorTxId))) { return true; } @@ -1587,7 +1587,7 @@ public boolean accept(File dir, String name) { rwf.seek(fileLen-4); rwf.writeInt(b+1); rwf.close(); - + FSEditLog editlog = getFSEditLog(storage); editlog.initJournalsForWrite(); long startTxId = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java index f5a112c7acd54..349f287ac0cb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java @@ -60,7 +60,7 @@ public class TestFavoredNodesEndToEnd { private static Configuration conf; private final static int NUM_DATA_NODES = 10; private final static int NUM_FILES = 10; - private final static byte[] SOME_BYTES = new String("foo").getBytes(); + private final static byte[] SOME_BYTES = "foo".getBytes(); private static DistributedFileSystem dfs; private static ArrayList datanodes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 53c689bbfddc1..677b83437b98b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -50,6 +50,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; @@ -386,15 +387,15 @@ public void testFsckMove() throws Exception { cluster.getNameNodePort()), conf); String[] fileNames = util.getFileNames(topDir); CorruptedTestFile[] ctFiles = new CorruptedTestFile[]{ - new CorruptedTestFile(fileNames[0], Sets.newHashSet(0), + new CorruptedTestFile(fileNames[0], new HashSet<>(Arrays.asList(0)), dfsClient, numDatanodes, dfsBlockSize), - new CorruptedTestFile(fileNames[1], Sets.newHashSet(2, 3), + new CorruptedTestFile(fileNames[1], new HashSet<>(Arrays.asList(2, 3)), dfsClient, numDatanodes, dfsBlockSize), - new CorruptedTestFile(fileNames[2], Sets.newHashSet(4), + new CorruptedTestFile(fileNames[2], new HashSet<>(Arrays.asList(4)), dfsClient, numDatanodes, dfsBlockSize), - new CorruptedTestFile(fileNames[3], Sets.newHashSet(0, 1, 2, 3), + new CorruptedTestFile(fileNames[3], new HashSet<>(Arrays.asList(0, 1, 2, 3)), dfsClient, numDatanodes, dfsBlockSize), - new CorruptedTestFile(fileNames[4], Sets.newHashSet(1, 2, 3, 4), + new CorruptedTestFile(fileNames[4], new HashSet<>(Arrays.asList(1, 2, 3, 4)), dfsClient, numDatanodes, dfsBlockSize) }; int totalMissingBlocks = 0; @@ -947,7 +948,7 @@ public void testFsckReplicaDetails() throws Exception { dfs = cluster.getFileSystem(); // create files - final String testFile = new String("/testfile"); + final String testFile = "/testfile"; final Path path = new Path(testFile); DFSTestUtil.createFile(dfs, path, fileSize, replFactor, 1000L); DFSTestUtil.waitReplication(dfs, path, replFactor); @@ -1236,7 +1237,7 @@ public void testFsckMissingReplicas() throws IOException { assertNotNull("Failed to get FileSystem", dfs); // Create a file that will be intentionally under-replicated - final String pathString = new String("/testfile"); + final String pathString = "/testfile"; final Path path = new Path(pathString); long fileLen = blockSize * numBlocks; DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1); @@ -1298,7 +1299,7 @@ public void testFsckMisPlacedReplicas() throws IOException { assertNotNull("Failed to get FileSystem", dfs); // Create a file that will be intentionally under-replicated - final String pathString = new String("/testfile"); + final String pathString = "/testfile"; final Path path = new Path(pathString); long fileLen = blockSize * numBlocks; DFSTestUtil.createFile(dfs, path, fileLen, replFactor, 1); @@ -1475,7 +1476,7 @@ public void testBlockIdCK() throws Exception { DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); //create files - final String pathString = new String("/testfile"); + final String pathString = "/testfile"; final Path path = new Path(pathString); util.createFile(dfs, path, 1024, replFactor, 1000L); util.waitReplication(dfs, path, replFactor); @@ -1530,7 +1531,7 @@ public void testBlockIdCKDecommission() throws Exception { DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); //create files - final String pathString = new String("/testfile"); + final String pathString = "/testfile"; final Path path = new Path(pathString); util.createFile(dfs, path, 1024, replFactor, 1000L); util.waitReplication(dfs, path, replFactor); @@ -1618,7 +1619,7 @@ public void testBlockIdCKMaintenance() throws Exception { DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); //create files - final String pathString = new String("/testfile"); + final String pathString = "/testfile"; final Path path = new Path(pathString); util.createFile(dfs, path, 1024, replFactor, 1000L); util.waitReplication(dfs, path, replFactor); @@ -1737,7 +1738,7 @@ public void testBlockIdCKCorruption() throws Exception { DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); //create files - final String pathString = new String("/testfile"); + final String pathString = "/testfile"; final Path path = new Path(pathString); util.createFile(dfs, path, 1024, repFactor, 1000L); util.waitReplication(dfs, path, repFactor); @@ -1851,7 +1852,7 @@ public void testFsckWithDecommissionedReplicas() throws Exception { setName(getClass().getSimpleName()).setNumFiles(1).build(); //create files - final String testFile = new String("/testfile"); + final String testFile = "/testfile"; final Path path = new Path(testFile); util.createFile(dfs, path, fileSize, replFactor, 1000L); util.waitReplication(dfs, path, replFactor); @@ -1935,7 +1936,7 @@ public void testFsckWithMaintenanceReplicas() throws Exception { DFSTestUtil util = new DFSTestUtil.Builder(). setName(getClass().getSimpleName()).setNumFiles(1).build(); //create files - final String testFile = new String("/testfile"); + final String testFile = "/testfile"; final Path path = new Path(testFile); util.createFile(dfs, path, 1024, replFactor, 1000L); util.waitReplication(dfs, path, replFactor); @@ -2215,7 +2216,7 @@ public void testFsckMoveAfterCorruption() throws Exception { new InetSocketAddress("localhost", cluster.getNameNodePort()), conf); final String blockFileToCorrupt = fileNames[0]; final CorruptedTestFile ctf = new CorruptedTestFile(blockFileToCorrupt, - Sets.newHashSet(0), dfsClient, numDatanodes, dfsBlockSize); + new HashSet<>(Arrays.asList(0)), dfsClient, numDatanodes, dfsBlockSize); ctf.corruptBlocks(cluster); // Wait for fsck to discover all the missing blocks @@ -2314,7 +2315,7 @@ private void testUpgradeDomain(boolean defineUpgradeDomain, } // create files - final String testFile = new String("/testfile"); + final String testFile = "/testfile"; final Path path = new Path(testFile); DFSTestUtil.createFile(dfs, path, fileSize, replFactor, 1000L); DFSTestUtil.waitReplication(dfs, path, replFactor); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index b32f8fe759d1e..4f18baf1aeaa9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -1163,7 +1163,7 @@ public void testFilesInGetListingOps() throws Exception { HdfsFileStatus.EMPTY_NAME, false); assertTrue(dl.getPartialListing().length == 3); - String f2 = new String("f2"); + String f2 = "f2"; dl = cluster.getNameNodeRpc().getListing("/tmp", f2.getBytes(), false); assertTrue(dl.getPartialListing().length == 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java index 9b5e9884c525a..95fe9b37ca88f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetricsLogger.java @@ -19,8 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode; import java.util.function.Supplier; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.impl.Log4JLogger; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -67,40 +65,6 @@ public void testDisableMetricsLogger() throws IOException { assertNull(nn.metricsLoggerTimer); } - @Test - public void testMetricsLoggerIsAsync() throws IOException { - makeNameNode(true); - org.apache.log4j.Logger logger = - ((Log4JLogger) NameNode.MetricsLog).getLogger(); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - assertTrue(appenders.get(0) instanceof AsyncAppender); - } - - /** - * Publish a fake metric under the "Hadoop:" domain and ensure it is - * logged by the metrics logger. - */ - @Test - public void testMetricsLogOutput() - throws IOException, InterruptedException, TimeoutException { - TestFakeMetric metricsProvider = new TestFakeMetric(); - MBeans.register(this.getClass().getSimpleName(), - "DummyMetrics", metricsProvider); - makeNameNode(true); // Log metrics early and often. - final PatternMatchingAppender appender = - new PatternMatchingAppender("^.*FakeMetric42.*$"); - addAppender(NameNode.MetricsLog, appender); - - // Ensure that the supplied pattern was matched. - GenericTestUtils.waitFor(new Supplier() { - @Override - public Boolean get() { - return appender.isMatched(); - } - }, 1000, 60000); - } - /** * Create a NameNode object that listens on a randomly chosen port * number. @@ -118,13 +82,6 @@ private NameNode makeNameNode(boolean enableMetricsLogging) return new TestNameNode(conf); } - private void addAppender(Log log, Appender appender) { - org.apache.log4j.Logger logger = ((Log4JLogger) log).getLogger(); - @SuppressWarnings("unchecked") - List appenders = Collections.list(logger.getAllAppenders()); - ((AsyncAppender) appenders.get(0)).addAppender(appender); - } - /** * A NameNode that stubs out the NameSystem for testing. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index 8ef8cf68fbf53..3a0e81265a1d0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -28,6 +28,7 @@ import java.io.IOException; import java.io.RandomAccessFile; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Set; @@ -300,7 +301,7 @@ public long getLastValidTxId() { @Override public Set getValidTxIds() { - return Sets.newHashSet(0L); + return new HashSet<>(Arrays.asList(0L)); } public int getMaxOpSize() { @@ -342,7 +343,7 @@ public long getLastValidTxId() { @Override public Set getValidTxIds() { - return Sets.newHashSet(0L); + return new HashSet<>(Arrays.asList(0L)); } } @@ -388,7 +389,7 @@ public long getLastValidTxId() { @Override public Set getValidTxIds() { - return Sets.newHashSet(1L , 2L, 3L, 5L, 6L, 7L, 8L, 9L, 10L); + return new HashSet<>(Arrays.asList(1L, 2L, 3L, 5L, 6L, 7L, 8L, 9L, 10L)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java index c43c909c98a1e..0896780dfb11b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java @@ -104,7 +104,7 @@ public void run() { @Test public void testEditLog() throws IOException { - // start a cluster + // start a cluster Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; FileSystem fileSys = null; @@ -117,18 +117,18 @@ public void testEditLog() throws IOException { cluster.waitActive(); fileSys = cluster.getFileSystem(); final FSNamesystem namesystem = cluster.getNamesystem(); - + for (Iterator it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) { File dir = new File(it.next().getPath()); System.out.println(dir); } - + FSImage fsimage = namesystem.getFSImage(); FSEditLog editLog = fsimage.getEditLog(); - + // set small size of flush buffer editLog.setOutputBufferCapacity(2048); - + // Create threads and make them run transactions concurrently. Thread threadId[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { @@ -136,18 +136,18 @@ public void testEditLog() throws IOException { threadId[i] = new Thread(trans, "TransactionThread-" + i); threadId[i].start(); } - + // wait for all transactions to get over for (int i = 0; i < NUM_THREADS; i++) { try { threadId[i].join(); } catch (InterruptedException e) { - i--; // retry + i--; // retry } - } - + } + editLog.close(); - + // Verify that we can read in all the transactions that we have written. // If there were any corruptions, it is likely that the reading in // of these transactions will throw an exception. @@ -160,8 +160,8 @@ public void testEditLog() throws IOException { for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) { File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1); System.out.println("Verifying file: " + editFile); - - FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0); + + FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0); long numEdits = loader.loadFSEdits( new EditLogFileInputStream(editFile), 1); assertEquals("Verification for " + editFile, expectedTransactions, numEdits); @@ -171,7 +171,7 @@ public void testEditLog() throws IOException { if(cluster != null) cluster.shutdown(); } } - + @Test(timeout=10000) public void testEditsForCancelOnTokenExpire() throws IOException, InterruptedException { @@ -204,11 +204,11 @@ public Void answer(InvocationOnMock invocation) throws Throwable { ).when(log).logCancelDelegationToken(any(DelegationTokenIdentifier.class)); FSNamesystem fsn = new FSNamesystem(conf, fsImage); fsnRef.set(fsn); - + DelegationTokenSecretManager dtsm = fsn.getDelegationTokenSecretManager(); try { dtsm.startThreads(); - + // get two tokens Token token1 = fsn.getDelegationToken(renewer); Token token2 = fsn.getDelegationToken(renewer); @@ -216,14 +216,14 @@ public Void answer(InvocationOnMock invocation) throws Throwable { token1.decodeIdentifier(); DelegationTokenIdentifier ident2 = token2.decodeIdentifier(); - + // verify we got the tokens verify(log, times(1)).logGetDelegationToken(eq(ident1), anyLong()); verify(log, times(1)).logGetDelegationToken(eq(ident2), anyLong()); - + // this is a little tricky because DTSM doesn't let us set scan interval // so need to periodically sleep, then stop/start threads to force scan - + // renew first token 1/2 to expire Thread.sleep(renewInterval/2); fsn.renewDelegationToken(token2); @@ -231,10 +231,10 @@ public Void answer(InvocationOnMock invocation) throws Throwable { // force scan and give it a little time to complete dtsm.stopThreads(); dtsm.startThreads(); Thread.sleep(250); - // no token has expired yet + // no token has expired yet verify(log, times(0)).logCancelDelegationToken(eq(ident1)); verify(log, times(0)).logCancelDelegationToken(eq(ident2)); - + // sleep past expiration of 1st non-renewed token Thread.sleep(renewInterval/2); dtsm.stopThreads(); dtsm.startThreads(); @@ -242,7 +242,7 @@ public Void answer(InvocationOnMock invocation) throws Throwable { // non-renewed token should have implicitly been cancelled verify(log, times(1)).logCancelDelegationToken(eq(ident1)); verify(log, times(0)).logCancelDelegationToken(eq(ident2)); - + // sleep past expiration of 2nd renewed token Thread.sleep(renewInterval/2); dtsm.stopThreads(); dtsm.startThreads(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java index 245602ee9bfa7..d28f0a4c2920e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java @@ -24,6 +24,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.Collections; import java.util.Map; @@ -245,7 +246,7 @@ public void testFinalState() throws Exception { */ private String doGetAndReturnResponseBody() throws IOException { servlet.doGet(req, resp); - return new String(respOut.toByteArray(), "UTF-8"); + return new String(respOut.toByteArray(), StandardCharsets.UTF_8); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java index f9bcb33154cae..7b11ec30b43d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java @@ -89,7 +89,7 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.util.function.Supplier; import org.apache.hadoop.thirdparty.com.google.common.collect.HashMultimap; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java index 6b8657ccce3b4..abf06982e48d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdminWithHA.java @@ -19,8 +19,8 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; @@ -56,8 +56,8 @@ public class TestDFSAdminWithHA { private static String newLine = System.getProperty("line.separator"); private void assertOutputMatches(String string) { - String errOutput = new String(err.toByteArray(), Charsets.UTF_8); - String output = new String(out.toByteArray(), Charsets.UTF_8); + String errOutput = new String(err.toByteArray(), StandardCharsets.UTF_8); + String output = new String(out.toByteArray(), StandardCharsets.UTF_8); if (!errOutput.matches(string) && !output.matches(string)) { fail("Expected output to match '" + string + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index 0086134d78817..51d5b90e38d85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -26,6 +26,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +49,6 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mockito; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; public class TestDFSHAAdmin { @@ -435,8 +435,8 @@ private Object runTool(String ... args) throws Exception { outBytes.reset(); LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args)); int ret = tool.run(args); - errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8); - output = new String(outBytes.toByteArray(), Charsets.UTF_8); + errOutput = new String(errOutBytes.toByteArray(), StandardCharsets.UTF_8); + output = new String(outBytes.toByteArray(), StandardCharsets.UTF_8); LOG.info("Err_output:\n" + errOutput + "\nOutput:\n" + output); return ret; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java index aa048f865c2de..d99f632354e05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdminMiniCluster.java @@ -25,6 +25,7 @@ import java.io.File; import java.io.IOException; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,7 +44,6 @@ import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.io.Files; @@ -208,7 +208,7 @@ public void testFencer() throws Exception { assertEquals(0, runTool("-ns", "minidfs-ns", "-failover", "nn2", "nn1")); // Fencer has not run yet, since none of the above required fencing - assertEquals("", Files.asCharSource(tmpFile, Charsets.UTF_8).read()); + assertEquals("", Files.asCharSource(tmpFile, StandardCharsets.UTF_8).read()); // Test failover with fencer and forcefence option assertEquals(0, runTool("-failover", "nn1", "nn2", "--forcefence")); @@ -216,7 +216,7 @@ public void testFencer() throws Exception { // The fence script should run with the configuration from the target // node, rather than the configuration from the fencing node. Strip // out any trailing spaces and CR/LFs which may be present on Windows. - String fenceCommandOutput = Files.asCharSource(tmpFile, Charsets.UTF_8) + String fenceCommandOutput = Files.asCharSource(tmpFile, StandardCharsets.UTF_8) .read().replaceAll(" *[\r\n]+", ""); assertEquals("minidfs-ns.nn1 " + nn1Port + " nn1", fenceCommandOutput); tmpFile.delete(); @@ -301,7 +301,7 @@ private int runTool(String ... args) throws Exception { errOutBytes.reset(); LOG.info("Running: DFSHAAdmin " + Joiner.on(" ").join(args)); int ret = tool.run(args); - errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8); + errOutput = new String(errOutBytes.toByteArray(), StandardCharsets.UTF_8); LOG.info("Output:\n" + errOutput); return ret; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java index 9d8c82c63708a..d1922dff8216e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestStripedBlockUtil.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdfs.util; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.StripedFileTestUtil; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index 722ebad72d239..b0c6a62a4c2c2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -27,6 +27,7 @@ import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; +import java.nio.charset.StandardCharsets; import java.text.MessageFormat; import java.util.Arrays; import java.util.Map; @@ -336,7 +337,7 @@ public void testLengthParamLongerThanFile() throws IOException { byte[] respBody = new byte[content.length()]; is = conn.getInputStream(); IOUtils.readFully(is, respBody, 0, content.length()); - assertEquals(content, new String(respBody, "US-ASCII")); + assertEquals(content, new String(respBody, StandardCharsets.US_ASCII)); } finally { IOUtils.closeStream(is); if (conn != null) { @@ -387,7 +388,7 @@ public void testOffsetPlusLengthParamsLongerThanFile() throws IOException { byte[] respBody = new byte[content.length() - 1]; is = conn.getInputStream(); IOUtils.readFully(is, respBody, 0, content.length() - 1); - assertEquals(content.substring(1), new String(respBody, "US-ASCII")); + assertEquals(content.substring(1), new String(respBody, StandardCharsets.US_ASCII)); } finally { IOUtils.closeStream(is); if (conn != null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index a693ac3d5e981..1f5c89a03efaa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -31,6 +31,7 @@ import java.net.Socket; import java.net.SocketTimeoutException; import java.nio.channels.SocketChannel; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -332,7 +333,7 @@ public void run() { // Write response. out = clientSocket.getOutputStream(); - out.write(temporaryRedirect().getBytes("UTF-8")); + out.write(temporaryRedirect().getBytes(StandardCharsets.UTF_8)); } catch (IOException e) { // Fail the test on any I/O error in the server thread. LOG.error("unexpected IOException in server thread", e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java index 2d7410a405cc9..811bd65193ea5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestRefreshUserMappings.java @@ -34,8 +34,12 @@ import java.net.URL; import java.net.URLDecoder; import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.List; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -60,10 +64,10 @@ public class TestRefreshUserMappings { Configuration config; private static final long groupRefreshTimeoutSec = 1; private String tempResource = null; - + public static class MockUnixGroupsMapping implements GroupMappingServiceProvider { private int i=0; - + @Override public List getGroups(String user) throws IOException { System.out.println("Getting groups in MockUnixGroupsMapping"); @@ -80,12 +84,12 @@ public List getGroups(String user) throws IOException { public void cacheGroupsRefresh() throws IOException { System.out.println("Refreshing groups in MockUnixGroupsMapping"); } - + @Override public void cacheGroupsAdd(List groups) throws IOException { } } - + @Before public void setUp() throws Exception { config = new Configuration(); @@ -94,7 +98,7 @@ public void setUp() throws Exception { GroupMappingServiceProvider.class); config.setLong("hadoop.security.groups.cache.secs", groupRefreshTimeoutSec); Groups.getUserToGroupsMappingService(config); - + FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0"); cluster = new MiniDFSCluster.Builder(config).build(); cluster.waitActive(); @@ -114,7 +118,7 @@ public void tearDown() throws Exception { tempResource = null; } } - + @Test public void testGroupMappingRefresh() throws Exception { DFSAdmin admin = new DFSAdmin(config); @@ -173,11 +177,11 @@ public void testRefreshSuperUserGroupsConfiguration() throws Exception { getProxySuperuserGroupConfKey(SUPER_USER); String userKeyHosts = DefaultImpersonationProvider.getTestProvider(). getProxySuperuserIpConfKey (SUPER_USER); - + config.set(userKeyGroups, "gr3,gr4,gr5"); // superuser can proxy for this group config.set(userKeyHosts,"127.0.0.1"); ProxyUsers.refreshSuperUserGroupsConfiguration(config); - + UserGroupInformation ugi1 = mock(UserGroupInformation.class); UserGroupInformation ugi2 = mock(UserGroupInformation.class); UserGroupInformation suUgi = mock(UserGroupInformation.class); @@ -186,10 +190,10 @@ public void testRefreshSuperUserGroupsConfiguration() throws Exception { when(suUgi.getShortUserName()).thenReturn(SUPER_USER); // super user when(suUgi.getUserName()).thenReturn(SUPER_USER+"L"); // super user - + when(ugi1.getShortUserName()).thenReturn("user1"); when(ugi2.getShortUserName()).thenReturn("user2"); - + when(ugi1.getUserName()).thenReturn("userL1"); when(ugi2.getUserName()).thenReturn("userL2"); @@ -213,18 +217,18 @@ public void testRefreshSuperUserGroupsConfiguration() throws Exception { } catch (AuthorizationException e) { fail("first auth for " + ugi2.getShortUserName() + " should've succeeded: " + e.getLocalizedMessage()); } - + // refresh will look at configuration on the server side // add additional resource with the new value // so the server side will pick it up String rsrc = "testGroupMappingRefresh_rsrc.xml"; tempResource = addNewConfigResource(rsrc, userKeyGroups, "gr2", userKeyHosts, "127.0.0.1"); - + DFSAdmin admin = new DFSAdmin(config); String [] args = new String[]{"-refreshSuperUserGroupsConfiguration"}; admin.run(args); - + try { ProxyUsers.authorize(ugi2, "127.0.0.1"); fail("second auth for " + ugi2.getShortUserName() + " should've failed "); @@ -239,8 +243,8 @@ public void testRefreshSuperUserGroupsConfiguration() throws Exception { } catch (AuthorizationException e) { fail("second auth for " + ugi1.getShortUserName() + " should've succeeded: " + e.getLocalizedMessage()); } - - + + } public static String addNewConfigResource(String rsrcName, String keyGroup, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java index c4a1e4aa2ca77..e05aacb0ec86a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestHdfsConfigFields.java @@ -39,7 +39,7 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase { @Override public void initializeMemberVariables() { - xmlFilename = new String("hdfs-default.xml"); + xmlFilename = "hdfs-default.xml"; configurationClasses = new Class[] { HdfsClientConfigKeys.class, HdfsClientConfigKeys.Failover.class, HdfsClientConfigKeys.StripedRead.class, DFSConfigKeys.class, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java index 1640ee2f4fe57..fe998bbc3f0cb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java @@ -146,7 +146,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java index 72f8047dc33d9..78174afb6f892 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AMWebServices.java @@ -78,7 +78,7 @@ import org.apache.hadoop.yarn.webapp.BadRequestException; import org.apache.hadoop.yarn.webapp.NotFoundException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import com.google.inject.Inject; @Path("/ws/v1/mapreduce") diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java index 5a23b58875a0b..d0b9acee8ec80 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java @@ -28,6 +28,7 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.nio.charset.StandardCharsets; import java.util.function.Supplier; import java.io.File; import java.io.FileInputStream; @@ -2097,7 +2098,7 @@ public static String slurp(File f) throws IOException { String contents = null; try { in.read(buf, 0, len); - contents = new String(buf, "UTF-8"); + contents = new String(buf, StandardCharsets.UTF_8); } finally { in.close(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java index b669df765baca..77795316d74e2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/speculate/forecast/TestSimpleExponentialForecast.java @@ -18,8 +18,8 @@ package org.apache.hadoop.mapreduce.v2.app.speculate.forecast; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.yarn.util.ControlledClock; import org.junit.Assert; import org.junit.Test; @@ -28,8 +28,8 @@ * Testing the statistical model of simple exponential estimator. */ public class TestSimpleExponentialForecast { - private static final Log LOG = - LogFactory.getLog(TestSimpleExponentialForecast.class); + private static final Logger LOG = + LoggerFactory.getLogger(TestSimpleExponentialForecast.class); private static long clockTicks = 1000L; private ControlledClock clock; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java index 9158ec3b33720..3b41f418640c0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java @@ -21,6 +21,7 @@ import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.Writer; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.ArrayList; import java.util.Arrays; @@ -31,7 +32,6 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * JobQueueClient is interface provided to the user in order to get @@ -148,7 +148,7 @@ private void displayQueueList() throws IOException { JobQueueInfo[] rootQueues = jc.getRootQueues(); for (JobQueueInfo queue : rootQueues) { printJobQueueInfo(queue, new PrintWriter(new OutputStreamWriter( - System.out, Charsets.UTF_8))); + System.out, StandardCharsets.UTF_8))); } } @@ -187,7 +187,7 @@ private void displayQueueInfo(String queue, boolean showJobs) return; } printJobQueueInfo(jobQueueInfo, new PrintWriter(new OutputStreamWriter( - System.out, Charsets.UTF_8))); + System.out, StandardCharsets.UTF_8))); if (showJobs && (jobQueueInfo.getChildren() == null || jobQueueInfo.getChildren().size() == 0)) { JobStatus[] jobs = jobQueueInfo.getJobStatuses(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java index f5e07e9128a1c..7b9281aced247 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLog.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; @@ -56,7 +57,6 @@ import org.apache.log4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * A simple logger to handle the task-specific user logs. @@ -114,7 +114,7 @@ private static LogFileDetail getLogFileDetail(TaskAttemptID taskid, File indexFile = getIndexFile(taskid, isCleanup); BufferedReader fis = new BufferedReader(new InputStreamReader( SecureIOUtils.openForRead(indexFile, obtainLogDirOwner(taskid), null), - Charsets.UTF_8)); + StandardCharsets.UTF_8)); //the format of the index file is //LOG_DIR: //stdout: diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java index 45b4fd6f717ba..62f4031566f7f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TextInputFormat.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapred; import java.io.*; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -27,7 +28,6 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.compress.*; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * An {@link InputFormat} for plain text files. Files are broken into lines. @@ -62,7 +62,7 @@ public RecordReader getRecordReader( String delimiter = job.get("textinputformat.record.delimiter"); byte[] recordDelimiterBytes = null; if (null != delimiter) { - recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8); + recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8); } return new LineRecordReader(job, (FileSplit) genericSplit, recordDelimiterBytes); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java index 4c983178a7f41..ffb32ac5bd9ff 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java @@ -22,6 +22,7 @@ import java.net.InetAddress; import java.net.URI; import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; @@ -63,7 +64,6 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.yarn.api.records.ReservationId; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; @InterfaceAudience.Private @InterfaceStability.Unstable @@ -408,7 +408,7 @@ private void readTokensFromFiles(Configuration conf, Credentials credentials) for(Map.Entry ent: nm.entrySet()) { credentials.addSecretKey(new Text(ent.getKey()), ent.getValue() - .getBytes(Charsets.UTF_8)); + .getBytes(StandardCharsets.UTF_8)); } } catch (JsonMappingException | JsonParseException e) { LOG.warn("couldn't parse Token Cache JSON file with user secret keys"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java index 97d19a668dec0..3834d4ea88989 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java @@ -28,7 +28,7 @@ import java.util.Map; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.hadoop.thirdparty.com.google.common.collect.AbstractIterator; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterators; import org.apache.hadoop.thirdparty.com.google.common.collect.Maps; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java index ca7f1f06514d8..7ef3ec03f383f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FrameworkCounterGroup.java @@ -18,7 +18,7 @@ package org.apache.hadoop.mapreduce.counters; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; import java.io.DataInput; import java.io.DataOutput; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java index 5f8e9ad4b6858..88e8d07a293d5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JSONHistoryViewerPrinter.java @@ -33,6 +33,7 @@ import java.io.OutputStreamWriter; import java.io.PrintStream; import java.io.Writer; +import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.Map; @@ -72,7 +73,7 @@ public void print(PrintStream ps) throws IOException { printTaskSummary(); printTasks(); - writer = new OutputStreamWriter(ps, "UTF-8"); + writer = new OutputStreamWriter(ps, StandardCharsets.UTF_8); json.write(writer); writer.flush(); } catch (JSONException je) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java index d15ee7ca4c3fb..77bea97e149f9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TextInputFormat.java @@ -32,7 +32,8 @@ import org.apache.hadoop.mapreduce.RecordReader; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import java.nio.charset.StandardCharsets; + /** An {@link InputFormat} for plain text files. Files are broken into lines. * Either linefeed or carriage-return are used to signal end of line. Keys are @@ -49,7 +50,7 @@ public class TextInputFormat extends FileInputFormat { "textinputformat.record.delimiter"); byte[] recordDelimiterBytes = null; if (null != delimiter) - recordDelimiterBytes = delimiter.getBytes(Charsets.UTF_8); + recordDelimiterBytes = delimiter.getBytes(StandardCharsets.UTF_8); return new LineRecordReader(recordDelimiterBytes); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java index 877d73c5a59de..82b7fcb504622 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java @@ -37,7 +37,7 @@ import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.DurationInfo; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java index 15ff3c67c2fb5..2b1f7e37ebe75 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputFormat.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.text.NumberFormat; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NamedCommitterFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NamedCommitterFactory.java index ddcff646e04fd..3ec2795947c8d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NamedCommitterFactory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NamedCommitterFactory.java @@ -22,7 +22,7 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java index e463632fa56ae..c9fbe3b065f44 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/PathOutputCommitter.java @@ -20,7 +20,7 @@ import java.io.IOException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java index 69377e73e6d46..f7b68f75e8fb1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldBasedPartitioner.java @@ -18,7 +18,7 @@ package org.apache.hadoop.mapreduce.lib.partition; -import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience; @@ -90,12 +90,7 @@ public int getPartition(K2 key, V2 value, int numReduceTasks) { return getPartition(key.toString().hashCode(), numReduceTasks); } - try { - keyBytes = key.toString().getBytes("UTF-8"); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException("The current system does not " + - "support UTF-8 encoding!", e); - } + keyBytes = key.toString().getBytes(StandardCharsets.UTF_8); // return 0 if the key is empty if (keyBytes.length == 0) { return 0; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldHelper.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldHelper.java index 21ca3fae33d82..03ef14c9ba698 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldHelper.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/KeyFieldHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.mapreduce.lib.partition; -import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.ArrayList; import java.util.StringTokenizer; @@ -61,13 +61,8 @@ public String toString() { private boolean keySpecSeen = false; public void setKeyFieldSeparator(String keyFieldSeparator) { - try { - this.keyFieldSeparator = - keyFieldSeparator.getBytes("UTF-8"); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException("The current system does not " + - "support UTF-8 encoding!", e); - } + this.keyFieldSeparator = + keyFieldSeparator.getBytes(StandardCharsets.UTF_8); } /** Required for backcompatibility with num.key.fields.for.partition in diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java index cdd656ca48907..47c8d12cb4b2f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/SecureShuffleUtils.java @@ -23,6 +23,7 @@ import java.io.PrintStream; import java.io.UnsupportedEncodingException; import java.net.URL; +import java.nio.charset.StandardCharsets; import javax.crypto.SecretKey; import javax.servlet.http.HttpServletRequest; @@ -34,7 +35,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * @@ -56,7 +56,7 @@ public class SecureShuffleUtils { */ public static String generateHash(byte[] msg, SecretKey key) { return new String(Base64.encodeBase64(generateByteHash(msg, key)), - Charsets.UTF_8); + StandardCharsets.UTF_8); } /** @@ -70,7 +70,6 @@ private static byte[] generateByteHash(byte[] msg, SecretKey key) { /** * verify that hash equals to HMacHash(msg) - * @param newHash * @return true if is the same */ private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) { @@ -87,7 +86,7 @@ private static boolean verifyHash(byte[] hash, byte[] msg, SecretKey key) { */ public static String hashFromString(String enc_str, SecretKey key) throws IOException { - return generateHash(enc_str.getBytes(Charsets.UTF_8), key); + return generateHash(enc_str.getBytes(StandardCharsets.UTF_8), key); } /** @@ -98,9 +97,9 @@ public static String hashFromString(String enc_str, SecretKey key) */ public static void verifyReply(String base64Hash, String msg, SecretKey key) throws IOException { - byte[] hash = Base64.decodeBase64(base64Hash.getBytes(Charsets.UTF_8)); + byte[] hash = Base64.decodeBase64(base64Hash.getBytes(StandardCharsets.UTF_8)); - boolean res = verifyHash(hash, msg.getBytes(Charsets.UTF_8), key); + boolean res = verifyHash(hash, msg.getBytes(StandardCharsets.UTF_8), key); if(res != true) { throw new IOException("Verification of the hashReply failed"); @@ -148,7 +147,7 @@ public static String toHex(byte[] ba) { for (byte b : ba) { ps.printf("%x", b); } - strHex = baos.toString("UTF-8"); + strHex = new String(baos.toByteArray(), StandardCharsets.UTF_8); } catch (UnsupportedEncodingException e) { } return strHex; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplit.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplit.java index 7d08fb3acd791..d725196b9b884 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplit.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplit.java @@ -20,7 +20,7 @@ import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; -import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; @@ -47,15 +47,8 @@ @InterfaceStability.Unstable public class JobSplit { static final int META_SPLIT_VERSION = 1; - static final byte[] META_SPLIT_FILE_HEADER; - static { - try { - META_SPLIT_FILE_HEADER = "META-SPL".getBytes("UTF-8"); - } catch (UnsupportedEncodingException u) { - throw new RuntimeException(u); - } - } - public static final TaskSplitMetaInfo EMPTY_TASK_SPLIT = + static final byte[] META_SPLIT_FILE_HEADER = "META-SPL".getBytes(StandardCharsets.UTF_8); + public static final TaskSplitMetaInfo EMPTY_TASK_SPLIT = new TaskSplitMetaInfo(); /** diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java index 76234bd17fe9e..a32e254af4a01 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/split/JobSplitWriter.java @@ -19,7 +19,7 @@ package org.apache.hadoop.mapreduce.split; import java.io.IOException; -import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.List; @@ -54,16 +54,8 @@ public class JobSplitWriter { private static final Logger LOG = LoggerFactory.getLogger(JobSplitWriter.class); private static final int splitVersion = JobSplit.META_SPLIT_VERSION; - private static final byte[] SPLIT_FILE_HEADER; + private static final byte[] SPLIT_FILE_HEADER = "SPL".getBytes(StandardCharsets.UTF_8); - static { - try { - SPLIT_FILE_HEADER = "SPL".getBytes("UTF-8"); - } catch (UnsupportedEncodingException u) { - throw new RuntimeException(u); - } - } - @SuppressWarnings("unchecked") public static void createSplitFiles(Path jobSubmitDir, Configuration conf, FileSystem fs, List splits) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java index 7e1d9f64ea3bb..e43d8d95171a1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java @@ -24,6 +24,7 @@ import java.io.OutputStreamWriter; import java.io.PrintStream; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Set; @@ -64,7 +65,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * Interprets the map reduce cli options @@ -767,7 +767,7 @@ protected void displayTasks(Job job, String type, String state) public void displayJobList(JobStatus[] jobs) throws IOException, InterruptedException { displayJobList(jobs, new PrintWriter(new OutputStreamWriter(System.out, - Charsets.UTF_8))); + StandardCharsets.UTF_8))); } @Private diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java index bb5c30e9511fa..b646b04b74034 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestFileOutputCommitter.java @@ -22,6 +22,7 @@ import java.io.FileInputStream; import java.io.IOException; import java.net.URI; +import java.nio.charset.StandardCharsets; import org.junit.Test; import static org.assertj.core.api.Assertions.assertThat; @@ -571,7 +572,7 @@ public static String slurp(File f) throws IOException { String contents = null; try { in.read(buf, 0, len); - contents = new String(buf, "UTF-8"); + contents = new String(buf, StandardCharsets.UTF_8); } finally { in.close(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java index f4f2d18c3823b..1b3ebdfa31da0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestLineRecordReader.java @@ -238,7 +238,7 @@ public String[] readRecordsDirectly(URL testFileUrl, boolean bzip) } fis.close(); assertTrue("Test file data too big for buffer", count < data.length); - return new String(data, 0, count, "UTF-8").split("\n"); + return new String(data, 0, count, StandardCharsets.UTF_8).split("\n"); } public void checkRecordSpanningMultipleSplits(String testFile, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java index f6fbbd58dabbf..b2b2dd62586d2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestLineRecordReader.java @@ -183,7 +183,7 @@ public String[] readRecordsDirectly(URL testFileUrl, boolean bzip) } fis.close(); assertTrue("Test file data too big for buffer", count < data.length); - return new String(data, 0, count, "UTF-8").split("\n"); + return new String(data, 0, count, StandardCharsets.UTF_8).split("\n"); } public void checkRecordSpanningMultipleSplits(String testFile, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java index 9c58c0d773f99..2aa7b34a007c8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/lib/output/TestFileOutputCommitter.java @@ -23,6 +23,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; +import java.nio.charset.StandardCharsets; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -843,7 +844,7 @@ public static String slurp(File f) throws IOException { String contents = null; try { in.read(buf, 0, len); - contents = new String(buf, "UTF-8"); + contents = new String(buf, StandardCharsets.UTF_8); } finally { in.close(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java index af6b9529e02a8..ae68d74d8d4d3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestConcatenatedCompressedInput.java @@ -38,6 +38,7 @@ import java.io.FileInputStream; import java.io.IOException; import java.io.OutputStream; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.zip.Inflater; @@ -295,7 +296,7 @@ public void testPrototypeInflaterGzip() throws IOException { try { int numBytesUncompressed = inflater.inflate(uncompressedBuf); String outString = - new String(uncompressedBuf, 0, numBytesUncompressed, "UTF-8"); + new String(uncompressedBuf, 0, numBytesUncompressed, StandardCharsets.UTF_8); System.out.println("uncompressed data of first gzip member = [" + outString + "]"); } catch (java.util.zip.DataFormatException ex) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java index 51347296a861c..5fec24a1b1317 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestFixedLengthInputFormat.java @@ -97,7 +97,7 @@ public void testFormatCompressedIn() throws IOException { @Test (timeout=5000) public void testNoRecordLength() throws IOException { localFs.delete(workDir, true); - Path file = new Path(workDir, new String("testFormat.txt")); + Path file = new Path(workDir, "testFormat.txt"); createFile(file, null, 10, 10); // Set the fixed length record length config property JobConf job = new JobConf(defaultConf); @@ -124,7 +124,7 @@ public void testNoRecordLength() throws IOException { @Test (timeout=5000) public void testZeroRecordLength() throws IOException { localFs.delete(workDir, true); - Path file = new Path(workDir, new String("testFormat.txt")); + Path file = new Path(workDir, "testFormat.txt"); createFile(file, null, 10, 10); // Set the fixed length record length config property JobConf job = new JobConf(defaultConf); @@ -152,7 +152,7 @@ public void testZeroRecordLength() throws IOException { @Test (timeout=5000) public void testNegativeRecordLength() throws IOException { localFs.delete(workDir, true); - Path file = new Path(workDir, new String("testFormat.txt")); + Path file = new Path(workDir, "testFormat.txt"); createFile(file, null, 10, 10); // Set the fixed length record length config property JobConf job = new JobConf(defaultConf); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java index 4a7c3283d48a3..fd73410918d85 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/UtilsForTests.java @@ -24,6 +24,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.nio.charset.StandardCharsets; import java.text.DecimalFormat; import java.util.ArrayList; import java.util.Arrays; @@ -180,7 +181,7 @@ public static String slurp(File f) throws IOException { String contents = null; try { in.read(buf, 0, len); - contents = new String(buf, "UTF-8"); + contents = new String(buf, StandardCharsets.UTF_8); } finally { in.close(); } @@ -194,7 +195,7 @@ public static String slurpHadoop(Path p, FileSystem fs) throws IOException { String contents = null; try { in.read(buf, 0, len); - contents = new String(buf, "UTF-8"); + contents = new String(buf, StandardCharsets.UTF_8); } finally { in.close(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java index d160de5db61a6..c971ccc6c02be 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/jobcontrol/JobControlTestUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mapred.jobcontrol; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.text.NumberFormat; import java.util.Iterator; import java.util.List; @@ -100,7 +101,7 @@ static void generateData(FileSystem fs, Path dirPath) throws IOException { FSDataOutputStream out = fs.create(new Path(dirPath, "data.txt")); for (int i = 0; i < 10000; i++) { String line = generateRandomLine(); - out.write(line.getBytes("UTF-8")); + out.write(line.getBytes(StandardCharsets.UTF_8)); } out.close(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MapReduceTestUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MapReduceTestUtil.java index 2f30bb5ec0c50..4141d26933f82 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MapReduceTestUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MapReduceTestUtil.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import java.text.NumberFormat; import java.util.ArrayList; import java.util.Iterator; @@ -116,7 +117,7 @@ public static void generateData(FileSystem fs, Path dirPath) FSDataOutputStream out = fs.create(new Path(dirPath, "data.txt")); for (int i = 0; i < 10000; i++) { String line = generateRandomLine(); - out.write(line.getBytes("UTF-8")); + out.write(line.getBytes(StandardCharsets.UTF_8)); } out.close(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java index 31b90aa0e506c..17cd5bfaace33 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java @@ -47,6 +47,7 @@ import java.io.PipedInputStream; import java.io.PipedOutputStream; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import static org.junit.Assert.assertEquals; @@ -193,7 +194,7 @@ private void testfailTask(Configuration conf) throws Exception { assertEquals("Exit code", -1, exitCode); runTool(conf, jc, new String[] { "-fail-task", taid.toString() }, out); - String answer = new String(out.toByteArray(), "UTF-8"); + String answer = new String(out.toByteArray(), StandardCharsets.UTF_8); assertTrue(answer.contains("Killed task " + taid + " by failing it")); } @@ -211,7 +212,7 @@ private void testKillTask(Configuration conf) throws Exception { assertEquals("Exit code", -1, exitCode); runTool(conf, jc, new String[] { "-kill-task", taid.toString() }, out); - String answer = new String(out.toByteArray(), "UTF-8"); + String answer = new String(out.toByteArray(), StandardCharsets.UTF_8); assertTrue(answer.contains("Killed task " + taid)); } @@ -231,7 +232,7 @@ private void testKillJob(Configuration conf) throws Exception { exitCode = runTool(conf, jc, new String[] { "-kill", jobId }, out); assertEquals("Exit code", 0, exitCode); - String answer = new String(out.toByteArray(), "UTF-8"); + String answer = new String(out.toByteArray(), StandardCharsets.UTF_8); assertTrue(answer.contains("Killed job " + jobId)); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFixedLengthInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFixedLengthInputFormat.java index 684d3e13d1f6a..be9e6deff3fc8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFixedLengthInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/lib/input/TestFixedLengthInputFormat.java @@ -102,7 +102,7 @@ public void testFormatCompressedIn() throws Exception { @Test (timeout=5000) public void testNoRecordLength() throws Exception { localFs.delete(workDir, true); - Path file = new Path(workDir, new String("testFormat.txt")); + Path file = new Path(workDir, "testFormat.txt"); createFile(file, null, 10, 10); // Create the job and do not set fixed record length Job job = Job.getInstance(defaultConf); @@ -136,7 +136,7 @@ public void testNoRecordLength() throws Exception { @Test (timeout=5000) public void testZeroRecordLength() throws Exception { localFs.delete(workDir, true); - Path file = new Path(workDir, new String("testFormat.txt")); + Path file = new Path(workDir, "testFormat.txt"); createFile(file, null, 10, 10); Job job = Job.getInstance(defaultConf); // Set the fixed length record length config property @@ -172,7 +172,7 @@ public void testZeroRecordLength() throws Exception { @Test (timeout=5000) public void testNegativeRecordLength() throws Exception { localFs.delete(workDir, true); - Path file = new Path(workDir, new String("testFormat.txt")); + Path file = new Path(workDir, "testFormat.txt"); createFile(file, null, 10, 10); // Set the fixed length record length config property Job job = Job.getInstance(defaultConf); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java index 02e4358a07523..a0dfd57c1d140 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestSpeculativeExecOnCluster.java @@ -28,8 +28,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -73,8 +73,7 @@ @Ignore @RunWith(Parameterized.class) public class TestSpeculativeExecOnCluster { - private static final Log LOG = LogFactory - .getLog(TestSpeculativeExecOnCluster.class); + private static final Logger LOG = LoggerFactory.getLogger(TestSpeculativeExecOnCluster.class); private static final int NODE_MANAGERS_COUNT = 2; private static final boolean ENABLE_SPECULATIVE_MAP = true; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeMapOutputCollectorDelegator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeMapOutputCollectorDelegator.java index 4ec1f7a1c561c..acb22ceb2a249 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeMapOutputCollectorDelegator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeMapOutputCollectorDelegator.java @@ -18,8 +18,8 @@ package org.apache.hadoop.mapred.nativetask; import java.io.IOException; +import java.nio.charset.StandardCharsets; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.io.RawComparator; @@ -132,7 +132,7 @@ public void init(Context context) throws IOException, ClassNotFoundException { if (ret) { if (job.getBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, false)) { String codec = job.get(MRJobConfig.MAP_OUTPUT_COMPRESS_CODEC); - if (!NativeRuntime.supportsCompressionCodec(codec.getBytes(Charsets.UTF_8))) { + if (!NativeRuntime.supportsCompressionCodec(codec.getBytes(StandardCharsets.UTF_8))) { String message = "Native output collector doesn't support compression codec " + codec; LOG.error(message); throw new InvalidJobConfException(message); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeRuntime.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeRuntime.java index 311ee223b9c76..df1c7ade18cda 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeRuntime.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/NativeRuntime.java @@ -19,8 +19,8 @@ package org.apache.hadoop.mapred.nativetask; import java.io.IOException; +import java.nio.charset.StandardCharsets; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputBuffer; @@ -83,7 +83,7 @@ public static void configure(Configuration jobConf) { */ public synchronized static long createNativeObject(String clazz) { assertNativeLibraryLoaded(); - final long ret = JNICreateNativeObject(clazz.getBytes(Charsets.UTF_8)); + final long ret = JNICreateNativeObject(clazz.getBytes(StandardCharsets.UTF_8)); if (ret == 0) { LOG.warn("Can't create NativeObject for class " + clazz + ", probably not exist."); } @@ -95,8 +95,8 @@ public synchronized static long createNativeObject(String clazz) { */ public synchronized static long registerLibrary(String libraryName, String clazz) { assertNativeLibraryLoaded(); - final long ret = JNIRegisterModule(libraryName.getBytes(Charsets.UTF_8), - clazz.getBytes(Charsets.UTF_8)); + final long ret = JNIRegisterModule(libraryName.getBytes(StandardCharsets.UTF_8), + clazz.getBytes(StandardCharsets.UTF_8)); if (ret != 0) { LOG.warn("Can't create NativeObject for class " + clazz + ", probably not exist."); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/buffer/ByteBufferDataWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/buffer/ByteBufferDataWriter.java index da09f59591bee..6fe0143b0120a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/buffer/ByteBufferDataWriter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/buffer/ByteBufferDataWriter.java @@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.mapred.nativetask.NativeDataTarget; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * DataOutputStream implementation which buffers data in a fixed-size diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ConfigUtil.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ConfigUtil.java index 5f7be806ed954..57476836cc793 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ConfigUtil.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ConfigUtil.java @@ -17,11 +17,11 @@ */ package org.apache.hadoop.mapred.nativetask.util; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Map; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.classification.InterfaceAudience; @@ -31,8 +31,8 @@ public abstract class ConfigUtil { public static byte[][] toBytes(Configuration conf) { List nativeConfigs = new ArrayList(); for (Map.Entry e : conf) { - nativeConfigs.add(e.getKey().getBytes(Charsets.UTF_8)); - nativeConfigs.add(e.getValue().getBytes(Charsets.UTF_8)); + nativeConfigs.add(e.getKey().getBytes(StandardCharsets.UTF_8)); + nativeConfigs.add(e.getValue().getBytes(StandardCharsets.UTF_8)); } return nativeConfigs.toArray(new byte[nativeConfigs.size()][]); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ReadWriteBuffer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ReadWriteBuffer.java index af2c496eb6c64..c0161439980e5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ReadWriteBuffer.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/main/java/org/apache/hadoop/mapred/nativetask/util/ReadWriteBuffer.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.mapred.nativetask.util; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.classification.InterfaceAudience; +import java.nio.charset.StandardCharsets; + @InterfaceAudience.Private public class ReadWriteBuffer { private byte[] _buff; @@ -135,13 +136,13 @@ public byte[] readBytes() { } public void writeString(String str) { - final byte[] bytes = str.getBytes(Charsets.UTF_8); + final byte[] bytes = str.getBytes(StandardCharsets.UTF_8); writeBytes(bytes, 0, bytes.length); } public String readString() { final byte[] bytes = readBytes(); - return new String(bytes, Charsets.UTF_8); + return new String(bytes, StandardCharsets.UTF_8); } private void checkWriteSpaceAndResizeIfNecessary(int toBeWritten) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestByteBufferReadWrite.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestByteBufferReadWrite.java index 98d0697100928..8dfa5322e84e6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestByteBufferReadWrite.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/buffer/TestByteBufferReadWrite.java @@ -19,7 +19,7 @@ import java.io.ByteArrayInputStream; import java.io.IOException; -import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.mapred.nativetask.NativeDataTarget; @@ -143,8 +143,8 @@ public void testFlush() throws IOException { Mockito.verify(target).finishSendData(); } - private static String toString(byte[] str) throws UnsupportedEncodingException { - return new String(str, 0, str.length, "UTF-8"); + private static String toString(byte[] str) { + return new String(str, 0, str.length, StandardCharsets.UTF_8); } private static class MockDataTarget implements NativeDataTarget { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/testutil/BytesFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/testutil/BytesFactory.java index 9b46dfa6243bd..2235eb0685fd4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/testutil/BytesFactory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-nativetask/src/test/java/org/apache/hadoop/mapred/nativetask/testutil/BytesFactory.java @@ -19,7 +19,7 @@ import java.util.Random; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.primitives.Ints; import org.apache.hadoop.thirdparty.com.google.common.primitives.Longs; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java index ed1a4fab97da4..9ec5058ec06b4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java @@ -136,7 +136,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder; import org.apache.hadoop.thirdparty.com.google.common.cache.CacheLoader; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; @@ -1350,7 +1350,7 @@ protected void verifyRequest(String appid, ChannelHandlerContext ctx, // verify - throws exception SecureShuffleUtils.verifyReply(urlHashStr, encryptedURL, tokenSecret); // verification passed - encode the reply - String reply = SecureShuffleUtils.generateHash(urlHashStr.getBytes(Charsets.UTF_8), + String reply = SecureShuffleUtils.generateHash(urlHashStr.getBytes(StandardCharsets.UTF_8), tokenSecret); response.headers().set( SecureShuffleUtils.HTTP_HEADER_REPLY_URL_HASH, reply); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java index f0f9a34f55444..1cc099e14baee 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/BaileyBorweinPlouffe.java @@ -25,6 +25,7 @@ import java.io.OutputStreamWriter; import java.io.PrintStream; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Iterator; import java.util.List; @@ -53,7 +54,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * A map/reduce program that uses Bailey-Borwein-Plouffe to compute exact @@ -158,7 +158,7 @@ protected void cleanup(Context context final OutputStream outputstream = fs.create(outfile); try { final PrintWriter out = new PrintWriter( - new OutputStreamWriter(outputstream, Charsets.UTF_8), true); + new OutputStreamWriter(outputstream, StandardCharsets.UTF_8), true); // write hex text print(out, hex.iterator(), "Pi = 0x3.", "%02X", 5, 5); out.println("Total number of hexadecimal digits is " diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java index 26a3009918674..58518de084d35 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMean.java @@ -21,6 +21,7 @@ import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; @@ -37,8 +38,6 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; - public class WordMean extends Configured implements Tool { private double mean = 0; @@ -96,7 +95,7 @@ public static class WordMeanReducer extends public void reduce(Text key, Iterable values, Context context) throws IOException, InterruptedException { - int theSum = 0; + long theSum = 0; for (LongWritable val : values) { theSum += val.get(); } @@ -127,7 +126,7 @@ private double readAndCalcMean(Path path, Configuration conf) // average = total sum / number of elements; try { - br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8)); + br = new BufferedReader(new InputStreamReader(fs.open(file), StandardCharsets.UTF_8)); long count = 0; long length = 0; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java index 9acf62bd17e24..c209da4ee0e5b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordMedian.java @@ -21,6 +21,7 @@ import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; @@ -39,7 +40,6 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; public class WordMedian extends Configured implements Tool { @@ -130,7 +130,7 @@ private double readAndFindMedian(String path, int medianIndex1, BufferedReader br = null; try { - br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8)); + br = new BufferedReader(new InputStreamReader(fs.open(file), StandardCharsets.UTF_8)); int num = 0; String line; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java index 2a7733b875c09..57c35eb6e0766 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/WordStandardDeviation.java @@ -21,6 +21,7 @@ import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import java.util.StringTokenizer; import org.apache.hadoop.conf.Configuration; @@ -37,7 +38,6 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; public class WordStandardDeviation extends Configured implements Tool { @@ -137,7 +137,7 @@ private double readAndCalcStdDev(Path path, Configuration conf) double stddev = 0; BufferedReader br = null; try { - br = new BufferedReader(new InputStreamReader(fs.open(file), Charsets.UTF_8)); + br = new BufferedReader(new InputStreamReader(fs.open(file), StandardCharsets.UTF_8)); long count = 0; long length = 0; long square = 0; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java index e2d034193beb9..56b580ecc63be 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java @@ -19,6 +19,7 @@ package org.apache.hadoop.examples.dancing; import java.io.*; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.StringTokenizer; @@ -33,7 +34,6 @@ import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.apache.hadoop.util.*; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * Launch a distributed pentomino solver. @@ -141,7 +141,7 @@ private static long createInputDirectory(FileSystem fs, Path input = new Path(dir, "part1"); PrintWriter file = new PrintWriter(new OutputStreamWriter(new BufferedOutputStream - (fs.create(input), 64*1024), Charsets.UTF_8)); + (fs.create(input), 64*1024), StandardCharsets.UTF_8)); for(int[] prefix: splits) { for(int i=0; i < prefix.length; ++i) { if (i != 0) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java index aa2df72af2805..402ff028dfb42 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/Sudoku.java @@ -19,9 +19,9 @@ package org.apache.hadoop.examples.dancing; import java.io.*; +import java.nio.charset.StandardCharsets; import java.util.*; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** * This class uses the dancing links algorithm from Knuth to solve sudoku @@ -136,7 +136,7 @@ public void solution(List> names) { */ public Sudoku(InputStream stream) throws IOException { BufferedReader file = new BufferedReader( - new InputStreamReader(stream, Charsets.UTF_8)); + new InputStreamReader(stream, StandardCharsets.UTF_8)); String line = file.readLine(); List result = new ArrayList(); while (line != null) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java index 16273fd0baaa4..bffaf8fd1574d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Parser.java @@ -25,6 +25,7 @@ import java.io.InputStreamReader; import java.io.OutputStreamWriter; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -34,7 +35,6 @@ import org.apache.hadoop.examples.pi.math.Bellard; import org.apache.hadoop.examples.pi.math.Bellard.Parameter; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; /** A class for parsing outputs */ public final class Parser { @@ -80,7 +80,7 @@ private void parse(File f, Map> sums) throws IOExcep m.put(p, new ArrayList()); final BufferedReader in = new BufferedReader( - new InputStreamReader(new FileInputStream(f), Charsets.UTF_8)); + new InputStreamReader(new FileInputStream(f), StandardCharsets.UTF_8)); try { for(String line; (line = in.readLine()) != null; ) try { @@ -137,7 +137,7 @@ Map> parse(String inputpath, String outputdir final PrintWriter out = new PrintWriter( new OutputStreamWriter(new FileOutputStream( - new File(outputdir, p + ".txt")), Charsets.UTF_8), true); + new File(outputdir, p + ".txt")), StandardCharsets.UTF_8), true); try { for(int i = 0; i < results.size(); i++) out.println(DistSum.taskResult2string(p + "." + i, results.get(i))); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java index ddbbf6334379d..4e8461525ae3f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/Util.java @@ -25,6 +25,7 @@ import java.io.OutputStreamWriter; import java.io.PrintStream; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Arrays; @@ -46,7 +47,6 @@ import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.util.concurrent.HadoopExecutors; /** Utility methods */ @@ -216,7 +216,8 @@ public static PrintWriter createWriter(File dir, String prefix) throws IOExcepti final File f = new File(dir, prefix + dateFormat.format(new Date(System.currentTimeMillis())) + ".txt"); if (!f.exists()) - return new PrintWriter(new OutputStreamWriter(new FileOutputStream(f), Charsets.UTF_8)); + return new PrintWriter(new OutputStreamWriter( + new FileOutputStream(f), StandardCharsets.UTF_8)); try {Thread.sleep(10);} catch (InterruptedException e) {} } @@ -291,7 +292,7 @@ static List readJobOutputs(FileSystem fs, Path outdir) throws IOExce for(FileStatus status : fs.listStatus(outdir)) { if (status.getPath().getName().startsWith("part-")) { final BufferedReader in = new BufferedReader( - new InputStreamReader(fs.open(status.getPath()), Charsets.UTF_8)); + new InputStreamReader(fs.open(status.getPath()), StandardCharsets.UTF_8)); try { for(String line; (line = in.readLine()) != null; ) results.add(TaskResult.valueOf(line)); @@ -310,13 +311,14 @@ static List readJobOutputs(FileSystem fs, Path outdir) throws IOExce static void writeResults(String name, List results, FileSystem fs, String dir) throws IOException { final Path outfile = new Path(dir, name + ".txt"); Util.out.println(name + "> writing results to " + outfile); - final PrintWriter out = new PrintWriter(new OutputStreamWriter(fs.create(outfile), Charsets.UTF_8), true); + final PrintWriter printWriter = new PrintWriter(new OutputStreamWriter( + fs.create(outfile), StandardCharsets.UTF_8), true); try { for(TaskResult r : results) - out.println(r); + printWriter.println(r); } finally { - out.close(); + printWriter.close(); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java index 7998d4a8f6133..6df1f1e497783 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java @@ -19,6 +19,7 @@ package org.apache.hadoop.examples.terasort; import java.io.*; +import java.nio.charset.StandardCharsets; import java.util.*; import org.apache.hadoop.conf.Configuration; @@ -28,7 +29,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; class TeraScheduler { private static final Logger LOG = @@ -75,7 +75,7 @@ public String toString() { List readFile(String filename) throws IOException { List result = new ArrayList(10000); try (BufferedReader in = new BufferedReader( - new InputStreamReader(new FileInputStream(filename), Charsets.UTF_8))) { + new InputStreamReader(new FileInputStream(filename), StandardCharsets.UTF_8))) { String line = in.readLine(); while (line != null) { result.add(line); diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java index 528163103da44..86e8d9c2a0ed9 100644 --- a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java +++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java @@ -19,7 +19,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -108,11 +108,7 @@ public static class OutputBufferThread extends Thread { public OutputBufferThread(InputStream is) { this.setDaemon(true); output = new ArrayList(); - try { - reader = new BufferedReader(new InputStreamReader(is, "UTF-8")); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException("Unsupported encoding " + e.toString()); - } + reader = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8)); } @Override diff --git a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java index 471f35494497e..8acc7108720d6 100644 --- a/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/main/java/org/apache/hadoop/tools/HadoopArchives.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashSet; import java.util.Iterator; @@ -72,8 +73,6 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; - /** * a archive creation utility. * This class provides methods that can be used @@ -747,7 +746,7 @@ public void configure(JobConf conf) { indexStream = fs.create(index); outStream = fs.create(masterIndex); String version = VERSION + " \n"; - outStream.write(version.getBytes(Charsets.UTF_8)); + outStream.write(version.getBytes(StandardCharsets.UTF_8)); } catch(IOException e) { throw new RuntimeException(e); @@ -766,7 +765,7 @@ public void reduce(IntWritable key, Iterator values, while(values.hasNext()) { Text value = values.next(); String towrite = value.toString() + "\n"; - indexStream.write(towrite.getBytes(Charsets.UTF_8)); + indexStream.write(towrite.getBytes(StandardCharsets.UTF_8)); written++; if (written > numIndexes -1) { // every 1000 indexes we report status @@ -775,7 +774,7 @@ public void reduce(IntWritable key, Iterator values, endIndex = keyVal; String masterWrite = startIndex + " " + endIndex + " " + startPos + " " + indexStream.getPos() + " \n" ; - outStream.write(masterWrite.getBytes(Charsets.UTF_8)); + outStream.write(masterWrite.getBytes(StandardCharsets.UTF_8)); startPos = indexStream.getPos(); startIndex = endIndex; written = 0; @@ -788,7 +787,7 @@ public void close() throws IOException { if (written > 0) { String masterWrite = startIndex + " " + keyVal + " " + startPos + " " + indexStream.getPos() + " \n"; - outStream.write(masterWrite.getBytes(Charsets.UTF_8)); + outStream.write(masterWrite.getBytes(StandardCharsets.UTF_8)); } // close the streams outStream.close(); diff --git a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java index b1755affa8833..3267a683c275c 100644 --- a/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java +++ b/hadoop-tools/hadoop-archives/src/test/java/org/apache/hadoop/tools/TestHadoopArchives.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.io.PrintStream; import java.net.URI; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -81,7 +82,7 @@ public class TestHadoopArchives { private static String createFile(Path root, FileSystem fs, String... dirsAndFile ) throws IOException { String fileBaseName = dirsAndFile[dirsAndFile.length - 1]; - return createFile(root, fs, fileBaseName.getBytes("UTF-8"), dirsAndFile); + return createFile(root, fs, fileBaseName.getBytes(StandardCharsets.UTF_8), dirsAndFile); } private static String createFile(Path root, FileSystem fs, byte[] fileContent, String... dirsAndFile @@ -395,7 +396,7 @@ public void testReadFileContent() throws Exception { } else if ("zero-length".equals(baseName)) { assertEquals(0, actualContentSimple.length); } else { - String actual = new String(actualContentSimple, "UTF-8"); + String actual = new String(actualContentSimple, StandardCharsets.UTF_8); assertEquals(baseName, actual); } readFileCount++; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java index cb3af17102f9d..55351f0c81396 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AReadOpContext.java @@ -29,7 +29,7 @@ import javax.annotation.Nullable; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import static java.util.Objects.requireNonNull; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java index 1eacc4cbe07b7..f2ece63a854fa 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/WriteOperationHelper.java @@ -60,7 +60,7 @@ import org.apache.hadoop.util.functional.CallableRaisingIOE; import org.apache.hadoop.util.Preconditions; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; import static org.apache.hadoop.fs.s3a.Invoker.*; import static org.apache.hadoop.fs.store.audit.AuditingFunctions.withinAuditSpan; diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDelegationTokenBinding.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDelegationTokenBinding.java index 6af413e44d6c1..f33944070d94d 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDelegationTokenBinding.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/auth/delegation/AbstractDelegationTokenBinding.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.net.URI; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.Optional; import org.slf4j.Logger; @@ -304,7 +304,7 @@ public String getUserAgentField() { * @return a password. */ protected static byte[] getSecretManagerPasssword() { - return "non-password".getBytes(Charset.forName("UTF-8")); + return "non-password".getBytes(StandardCharsets.UTF_8); } /** diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java index b7eae8ead7096..58e38c2873bd0 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/impl/S3AMultipartUploader.java @@ -25,6 +25,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Comparator; import java.util.HashSet; @@ -40,8 +41,6 @@ import software.amazon.awssdk.services.s3.model.UploadPartRequest; import software.amazon.awssdk.services.s3.model.UploadPartResponse; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; - import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -132,7 +131,7 @@ public CompletableFuture startUpload( PutObjectOptions.keepingDirs()); statistics.uploadStarted(); return BBUploadHandle.from(ByteBuffer.wrap( - uploadId.getBytes(Charsets.UTF_8))); + uploadId.getBytes(StandardCharsets.UTF_8))); })); } @@ -151,7 +150,7 @@ public CompletableFuture putPart( checkUploadId(uploadIdBytes); String key = context.pathToKey(dest); String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, - Charsets.UTF_8); + StandardCharsets.UTF_8); return context.submit(new CompletableFuture<>(), () -> { UploadPartRequest request = writeOperations.newUploadPartRequestBuilder(key, @@ -189,7 +188,7 @@ public CompletableFuture complete( String key = context.pathToKey(dest); String uploadIdStr = new String(uploadIdBytes, 0, uploadIdBytes.length, - Charsets.UTF_8); + StandardCharsets.UTF_8); ArrayList eTags = new ArrayList<>(); eTags.ensureCapacity(handles.size()); long totalLength = 0; @@ -221,7 +220,7 @@ public CompletableFuture complete( finalLen ); - byte[] eTag = result.eTag().getBytes(Charsets.UTF_8); + byte[] eTag = result.eTag().getBytes(StandardCharsets.UTF_8); statistics.uploadCompleted(); return (PathHandle) () -> ByteBuffer.wrap(eTag); })); @@ -237,7 +236,7 @@ public CompletableFuture abort( final byte[] uploadIdBytes = uploadId.toByteArray(); checkUploadId(uploadIdBytes); String uploadIdString = new String(uploadIdBytes, 0, uploadIdBytes.length, - Charsets.UTF_8); + StandardCharsets.UTF_8); return context.submit(new CompletableFuture<>(), () -> { writeOperations.abortMultipartCommit( diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java index 239e52a7264a0..db1f5b30a0fb9 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/S3ATestUtils.java @@ -52,7 +52,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service; import org.apache.hadoop.service.ServiceOperations; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListeningExecutorService; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.MoreExecutors; import org.apache.hadoop.util.BlockingThreadPoolExecutorService; @@ -74,6 +73,7 @@ import java.io.InputStream; import java.net.URI; import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; import java.text.DateFormat; import java.text.SimpleDateFormat; import java.util.ArrayList; @@ -891,7 +891,7 @@ private static CompletableFuture put(FileSystem fs, return submit(EXECUTOR, () -> { try (DurationInfo ignore = new DurationInfo(LOG, false, "Creating %s", path)) { - createFile(fs, path, true, text.getBytes(Charsets.UTF_8)); + createFile(fs, path, true, text.getBytes(StandardCharsets.UTF_8)); return path; } }); diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java index c3030aa227d2c..3b21a08e30a0a 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/auth/delegation/ILoadTestSessionCredentials.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.FileWriter; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.concurrent.CompletionService; @@ -134,7 +135,7 @@ protected String getFilePrefix() { @Test public void testCreate10Tokens() throws Throwable { File file = fetchTokens(10); - String csv = FileUtils.readFileToString(file, "UTF-8"); + String csv = FileUtils.readFileToString(file, StandardCharsets.UTF_8); LOG.info("CSV data\n{}", csv); } diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java index a76a65be8bb3e..65b508f96c273 100644 --- a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/commit/ITestCommitOperations.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.UUID; @@ -495,7 +496,7 @@ public void testUploadEmptyFile() throws Throwable { public void testUploadSmallFile() throws Throwable { File tempFile = File.createTempFile("commit", ".txt"); String text = "hello, world"; - FileUtils.write(tempFile, text, "UTF-8"); + FileUtils.write(tempFile, text, StandardCharsets.UTF_8); CommitOperations actions = newCommitOperations(); Path dest = methodSubPath("testUploadSmallFile"); S3AFileSystem fs = getFileSystem(); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java index 3a503ddfa2b20..45fbf791908a3 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java @@ -26,7 +26,7 @@ import java.io.OutputStream; import java.net.URI; import java.net.URISyntaxException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Date; @@ -178,7 +178,7 @@ public FolderRenamePending(Path redoFile, NativeAzureFileSystem fs) "Error reading pending rename file contents -- " + "maximum file size exceeded"); } - String contents = new String(bytes, 0, l, Charset.forName("UTF-8")); + String contents = new String(bytes, 0, l, StandardCharsets.UTF_8); // parse the JSON JsonNode json = null; @@ -301,7 +301,7 @@ public void writeFile(NativeAzureFileSystem fs) throws IOException { // Write file. try { output = fs.createInternal(path, FsPermission.getFileDefault(), false, null); - output.write(contents.getBytes(Charset.forName("UTF-8"))); + output.write(contents.getBytes(StandardCharsets.UTF_8)); } catch (IOException e) { throw new IOException("Unable to write RenamePending file for folder rename from " + srcKey + " to " + dstKey, e); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java index 40bf6f4ae1a70..4fad2254b1795 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobInputStream.java @@ -30,8 +30,8 @@ import java.io.InputStream; import java.util.ArrayList; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.FSExceptionMessages; import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper; @@ -46,7 +46,7 @@ */ final class PageBlobInputStream extends InputStream { - private static final Log LOG = LogFactory.getLog(PageBlobInputStream.class); + private static final Logger LOG = LoggerFactory.getLogger(PageBlobInputStream.class); // The blob we're reading from. private final CloudPageBlobWrapper blob; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java index 3c98405ff9f04..357821579cc00 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/PageBlobOutputStream.java @@ -39,8 +39,8 @@ import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.fs.azure.StorageInterface.CloudPageBlobWrapper; import org.apache.commons.lang3.exception.ExceptionUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.classification.VisibleForTesting; @@ -120,7 +120,7 @@ final class PageBlobOutputStream extends OutputStream implements Syncable, Strea // Whether the stream has been closed. private boolean closed = false; - public static final Log LOG = LogFactory.getLog(AzureNativeFileSystemStore.class); + public static final Logger LOG = LoggerFactory.getLogger(AzureNativeFileSystemStore.class); // Set the minimum page blob file size to 128MB, which is >> the default // block size of 32MB. This default block size is often used as the diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java index 01ab06cb024e3..bdbf7523917bd 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfRenewingLease.java @@ -18,8 +18,8 @@ package org.apache.hadoop.fs.azure; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper; import org.apache.hadoop.classification.VisibleForTesting; @@ -58,7 +58,7 @@ public class SelfRenewingLease { // Time to wait to renew lease in milliseconds public static final int LEASE_RENEWAL_PERIOD = 40000; - private static final Log LOG = LogFactory.getLog(SelfRenewingLease.class); + private static final Logger LOG = LoggerFactory.getLogger(SelfRenewingLease.class); // Used to allocate thread serial numbers in thread name private static AtomicInteger threadNumber = new AtomicInteger(0); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java index a9e3df907f956..eb6b1b2f41c38 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SelfThrottlingIntercept.java @@ -21,8 +21,8 @@ import java.net.HttpURLConnection; import java.util.Date; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import com.microsoft.azure.storage.OperationContext; @@ -32,39 +32,38 @@ import com.microsoft.azure.storage.StorageEvent; /* - * Self throttling is implemented by hooking into send & response callbacks + * Self throttling is implemented by hooking into send & response callbacks * One instance of this class is created per operationContext so each blobUpload/blobDownload/etc. - * - * Self throttling only applies to 2nd and subsequent packets of an operation. This is a simple way to + * + * Self throttling only applies to 2nd and subsequent packets of an operation. This is a simple way to * ensure it only affects bulk transfers and not every tiny request. - * + * * A blobDownload will involve sequential packet transmissions and so there are no concurrency concerns * A blobUpload will generally involve concurrent upload worker threads that share one operationContext and one throttling instance. - * -- we do not track the latencies for each worker thread as they are doing similar work and will rarely collide in practice. - * -- concurrent access to lastE2Edelay must be protected. - * -- volatile is necessary and should be sufficient to protect simple access to primitive values (java 1.5 onwards) + * -- we do not track the latencies for each worker thread as they are doing similar work and will rarely collide in practice. + * -- concurrent access to lastE2Edelay must be protected. + * -- volatile is necessary and should be sufficient to protect simple access to primitive values (java 1.5 onwards) * -- synchronized{} blocks are also used to be conservative and for easier maintenance. - * + * * If an operation were to perform concurrent GETs and PUTs there is the possibility of getting confused regarding * whether lastE2Edelay was a read or write measurement. This scenario does not occur. * * readFactor = target read throughput as factor of unrestricted throughput. * writeFactor = target write throughput as factor of unrestricted throughput. - * + * * As we introduce delays it is important to only measure the actual E2E latency and not the augmented latency * To achieve this, we fiddle the 'startDate' of the transfer tracking object. */ /** - * + * * Introduces delays in our Azure traffic to prevent overrunning the server-side throttling limits. * */ @InterfaceAudience.Private public class SelfThrottlingIntercept { - public static final Log LOG = LogFactory - .getLog(SelfThrottlingIntercept.class); + public static final Logger LOG = LoggerFactory.getLogger(SelfThrottlingIntercept.class); private final float readFactor; private final float writeFactor; @@ -73,7 +72,7 @@ public class SelfThrottlingIntercept { // Concurrency: access to non-final members must be thread-safe private long lastE2Elatency; - public SelfThrottlingIntercept(OperationContext operationContext, + public SelfThrottlingIntercept(OperationContext operationContext, float readFactor, float writeFactor) { this.operationContext = operationContext; this.readFactor = readFactor; @@ -193,4 +192,4 @@ public void eventOccurred(ResponseReceivedEvent event) { responseReceived(event); } } -} \ No newline at end of file +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java index 924ecd30b31ef..8707c401edfa9 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SendRequestIntercept.java @@ -21,8 +21,8 @@ import java.net.HttpURLConnection; import java.security.InvalidKeyException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import com.microsoft.azure.storage.Constants.HeaderConstants; @@ -40,7 +40,7 @@ @InterfaceAudience.Private public final class SendRequestIntercept extends StorageEvent { - public static final Log LOG = LogFactory.getLog(SendRequestIntercept.class); + public static final Logger LOG = LoggerFactory.getLogger(SendRequestIntercept.class); private static final String ALLOW_ALL_REQUEST_PRECONDITIONS = "*"; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java index 5596f7e67c25a..9847a9ef72210 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/SimpleKeyProvider.java @@ -20,8 +20,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.ProviderUtils; @@ -32,7 +32,7 @@ */ @InterfaceAudience.Private public class SimpleKeyProvider implements KeyProvider { - private static final Log LOG = LogFactory.getLog(SimpleKeyProvider.class); + private static final Logger LOG = LoggerFactory.getLogger(SimpleKeyProvider.class); protected static final String KEY_ACCOUNT_KEY_PREFIX = "fs.azure.account.key."; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java index 699fde7dee745..615daa74dfab6 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/BandwidthGaugeUpdater.java @@ -21,8 +21,8 @@ import java.util.ArrayList; import java.util.Date; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; /** @@ -31,11 +31,10 @@ */ @InterfaceAudience.Private public final class BandwidthGaugeUpdater { - public static final Log LOG = LogFactory - .getLog(BandwidthGaugeUpdater.class); - + public static final Logger LOG = LoggerFactory.getLogger(BandwidthGaugeUpdater.class); + public static final String THREAD_NAME = "AzureNativeFilesystemStore-UploadBandwidthUpdater"; - + private static final int DEFAULT_WINDOW_SIZE_MS = 1000; private static final int PROCESS_QUEUE_INITIAL_CAPACITY = 1000; private int windowSizeMs; @@ -161,7 +160,7 @@ public void resumeAutoUpdate() { * Triggers the update of the metrics gauge based on all the blocks * uploaded/downloaded so far. This is typically done periodically in a * dedicated update thread, but exposing as public for unit test purposes. - * + * * @param updateWrite If true, we'll update the write (upload) metrics. * Otherwise we'll update the read (download) ones. */ @@ -173,7 +172,7 @@ public void triggerUpdate(boolean updateWrite) { allBlocksWritten = createNewToProcessQueue(); } else if (!updateWrite && !allBlocksRead.isEmpty()) { toProcess = allBlocksRead; - allBlocksRead = createNewToProcessQueue(); + allBlocksRead = createNewToProcessQueue(); } } @@ -195,7 +194,7 @@ public void triggerUpdate(boolean updateWrite) { long maxSingleBlockTransferRate = 0; long bytesInLastSecond = 0; for (BlockTransferWindow currentWindow : toProcess) { - long windowDuration = currentWindow.getEndDate().getTime() + long windowDuration = currentWindow.getEndDate().getTime() - currentWindow.getStartDate().getTime(); if (windowDuration == 0) { // Edge case, assume it took 1 ms but we were too fast @@ -209,8 +208,8 @@ public void triggerUpdate(boolean updateWrite) { // This block started its transfer before our time window, // interpolate to estimate how many bytes from that block // were actually transferred during our time window. - long adjustedBytes = (currentWindow.getBytesTransferred() - * (currentWindow.getEndDate().getTime() - cutoffTime)) + long adjustedBytes = (currentWindow.getBytesTransferred() + * (currentWindow.getEndDate().getTime() - cutoffTime)) / windowDuration; bytesInLastSecond += adjustedBytes; } @@ -286,4 +285,4 @@ public void close() { } } -} \ No newline at end of file +} diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java index de503bf19072d..5c64b3cbfb2e8 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/metrics/ResponseReceivedMetricUpdater.java @@ -20,8 +20,8 @@ import java.net.HttpURLConnection; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import com.microsoft.azure.storage.Constants.HeaderConstants; @@ -38,7 +38,7 @@ @InterfaceAudience.Private public final class ResponseReceivedMetricUpdater extends StorageEvent { - public static final Log LOG = LogFactory.getLog(ResponseReceivedMetricUpdater.class); + public static final Logger LOG = LoggerFactory.getLogger(ResponseReceivedMetricUpdater.class); private final AzureFileSystemInstrumentation instrumentation; private final BandwidthGaugeUpdater blockUploadGaugeUpdater; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java index 369bb07aaf556..c375653970003 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/oauth2/AzureADAuthenticator.java @@ -32,7 +32,7 @@ import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; import com.fasterxml.jackson.core.JsonToken; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -373,7 +373,7 @@ private static AzureADToken getTokenSingleCall(String authEndpoint, conn.getRequestProperties()); if (httpMethod.equals("POST")) { conn.setDoOutput(true); - conn.getOutputStream().write(payload.getBytes("UTF-8")); + conn.getOutputStream().write(payload.getBytes(StandardCharsets.UTF_8)); } int httpResponseCode = conn.getResponseCode(); diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java index 5f28cec5eb231..07115a0314f84 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClient.java @@ -38,7 +38,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.store.LogExactlyOnce; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback; diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLocatedFileStatus.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLocatedFileStatus.java index 29da2c504355a..76aa50bb507a4 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLocatedFileStatus.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsLocatedFileStatus.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.LocatedFileStatus; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; /** * {@link LocatedFileStatus} extended to also carry an ETag. diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java deleted file mode 100644 index 4389fda393c69..0000000000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestFileSystemOperationsWithThreads.java +++ /dev/null @@ -1,821 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azure; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.io.IOException; -import java.net.URI; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.azure.NativeAzureFileSystem.FolderRenamePending; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.mockito.Mockito; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; - -/** - * Tests the Native Azure file system (WASB) using parallel threads for rename and delete operations. - */ -public class ITestFileSystemOperationsWithThreads extends AbstractWasbTestBase { - - private final int renameThreads = 10; - private final int deleteThreads = 20; - private int iterations = 1; - private LogCapturer logs = null; - - @Rule - public ExpectedException exception = ExpectedException.none(); - - @Before - public void setUp() throws Exception { - super.setUp(); - Configuration conf = fs.getConf(); - - // By default enable parallel threads for rename and delete operations. - // Also enable flat listing of blobs for these operations. - conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, renameThreads); - conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, deleteThreads); - conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, true); - - URI uri = fs.getUri(); - fs.initialize(uri, conf); - - // Capture logs - logs = LogCapturer.captureLogs(new Log4JLogger(org.apache.log4j.Logger - .getRootLogger())); - } - - /* - * Helper method to create sub directory and different types of files - * for multiple iterations. - */ - private void createFolder(FileSystem fs, String root) throws Exception { - fs.mkdirs(new Path(root)); - for (int i = 0; i < this.iterations; i++) { - fs.mkdirs(new Path(root + "/" + i)); - fs.createNewFile(new Path(root + "/" + i + "/fileToRename")); - fs.createNewFile(new Path(root + "/" + i + "/file/to/rename")); - fs.createNewFile(new Path(root + "/" + i + "/file+to%rename")); - fs.createNewFile(new Path(root + "/fileToRename" + i)); - } - } - - /* - * Helper method to do rename operation and validate all files in source folder - * doesn't exists and similar files exists in new folder. - */ - private void validateRenameFolder(FileSystem fs, String source, String dest) throws Exception { - // Create source folder with files. - createFolder(fs, source); - Path sourceFolder = new Path(source); - Path destFolder = new Path(dest); - - // rename operation - assertTrue(fs.rename(sourceFolder, destFolder)); - assertTrue(fs.exists(destFolder)); - - for (int i = 0; i < this.iterations; i++) { - // Check destination folder and files exists. - assertTrue(fs.exists(new Path(dest + "/" + i))); - assertTrue(fs.exists(new Path(dest + "/" + i + "/fileToRename"))); - assertTrue(fs.exists(new Path(dest + "/" + i + "/file/to/rename"))); - assertTrue(fs.exists(new Path(dest + "/" + i + "/file+to%rename"))); - assertTrue(fs.exists(new Path(dest + "/fileToRename" + i))); - - // Check source folder and files doesn't exists. - assertFalse(fs.exists(new Path(source + "/" + i))); - assertFalse(fs.exists(new Path(source + "/" + i + "/fileToRename"))); - assertFalse(fs.exists(new Path(source + "/" + i + "/file/to/rename"))); - assertFalse(fs.exists(new Path(source + "/" + i + "/file+to%rename"))); - assertFalse(fs.exists(new Path(source + "/fileToRename" + i))); - } - } - - /* - * Test case for rename operation with multiple threads and flat listing enabled. - */ - @Test - public void testRenameSmallFolderWithThreads() throws Exception { - - validateRenameFolder(fs, "root", "rootnew"); - - // With single iteration, we would have created 7 blobs. - int expectedThreadsCreated = Math.min(7, renameThreads); - - // Validate from logs that threads are created. - String content = logs.getOutput(); - assertInLog(content, "ms with threads: " + expectedThreadsCreated); - - // Validate thread executions - for (int i = 0; i < expectedThreadsCreated; i++) { - assertInLog(content, - "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i); - } - - // Also ensure that we haven't spawned extra threads. - if (expectedThreadsCreated < renameThreads) { - for (int i = expectedThreadsCreated; i < renameThreads; i++) { - assertNotInLog(content, - "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i); - } - } - } - - /* - * Test case for rename operation with multiple threads and flat listing enabled. - */ - @Test - public void testRenameLargeFolderWithThreads() throws Exception { - - // Populate source folder with large number of files and directories. - this.iterations = 10; - validateRenameFolder(fs, "root", "rootnew"); - - // Validate from logs that threads are created. - String content = logs.getOutput(); - assertInLog(content, "ms with threads: " + renameThreads); - - // Validate thread executions - for (int i = 0; i < renameThreads; i++) { - assertInLog(content, - "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i); - } - } - - /* - * Test case for rename operation with threads disabled and flat listing enabled. - */ - @Test - public void testRenameLargeFolderDisableThreads() throws Exception { - Configuration conf = fs.getConf(); - - // Number of threads set to 0 or 1 disables threads. - conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, 0); - URI uri = fs.getUri(); - fs.initialize(uri, conf); - - // Populate source folder with large number of files and directories. - this.iterations = 10; - validateRenameFolder(fs, "root", "rootnew"); - - // Validate from logs that threads are disabled. - String content = logs.getOutput(); - assertInLog(content, - "Disabling threads for Rename operation as thread count 0"); - - // Validate no thread executions - for (int i = 0; i < renameThreads; i++) { - String term = "AzureBlobRenameThread-" - + Thread.currentThread().getName() - + "-" + i; - assertNotInLog(content, term); - } - } - - /** - * Assert that a log contains the given term. - * @param content log output - * @param term search term - */ - protected void assertInLog(String content, String term) { - assertTrue("Empty log", !content.isEmpty()); - if (!content.contains(term)) { - String message = "No " + term + " found in logs"; - LOG.error(message); - System.err.println(content); - fail(message); - } - } - - /** - * Assert that a log does not contain the given term. - * @param content log output - * @param term search term - */ - protected void assertNotInLog(String content, String term) { - assertTrue("Empty log", !content.isEmpty()); - if (content.contains(term)) { - String message = term + " found in logs"; - LOG.error(message); - System.err.println(content); - fail(message); - } - } - - /* - * Test case for rename operation with threads and flat listing disabled. - */ - @Test - public void testRenameSmallFolderDisableThreadsDisableFlatListing() throws Exception { - Configuration conf = fs.getConf(); - conf = fs.getConf(); - - // Number of threads set to 0 or 1 disables threads. - conf.setInt(NativeAzureFileSystem.AZURE_RENAME_THREADS, 1); - conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, false); - URI uri = fs.getUri(); - fs.initialize(uri, conf); - - validateRenameFolder(fs, "root", "rootnew"); - - // Validate from logs that threads are disabled. - String content = logs.getOutput(); - assertInLog(content, - "Disabling threads for Rename operation as thread count 1"); - - // Validate no thread executions - for (int i = 0; i < renameThreads; i++) { - assertNotInLog(content, - "AzureBlobRenameThread-" + Thread.currentThread().getName() + "-" + i); - } - } - - /* - * Helper method to do delete operation and validate all files in source folder - * doesn't exists after delete operation. - */ - private void validateDeleteFolder(FileSystem fs, String source) throws Exception { - // Create folder with files. - createFolder(fs, "root"); - Path sourceFolder = new Path(source); - - // Delete operation - assertTrue(fs.delete(sourceFolder, true)); - assertFalse(fs.exists(sourceFolder)); - - for (int i = 0; i < this.iterations; i++) { - // check that source folder and files doesn't exists - assertFalse(fs.exists(new Path(source + "/" + i))); - assertFalse(fs.exists(new Path(source + "/" + i + "/fileToRename"))); - assertFalse(fs.exists(new Path(source + "/" + i + "/file/to/rename"))); - assertFalse(fs.exists(new Path(source + "/" + i + "/file+to%rename"))); - assertFalse(fs.exists(new Path(source + "/fileToRename" + i))); - } - } - - /* - * Test case for delete operation with multiple threads and flat listing enabled. - */ - @Test - public void testDeleteSmallFolderWithThreads() throws Exception { - - validateDeleteFolder(fs, "root"); - - // With single iteration, we would have created 7 blobs. - int expectedThreadsCreated = Math.min(7, deleteThreads); - - // Validate from logs that threads are enabled. - String content = logs.getOutput(); - assertInLog(content, "ms with threads: " + expectedThreadsCreated); - - // Validate thread executions - for (int i = 0; i < expectedThreadsCreated; i++) { - assertInLog(content, - "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i); - } - - // Also ensure that we haven't spawned extra threads. - if (expectedThreadsCreated < deleteThreads) { - for (int i = expectedThreadsCreated; i < deleteThreads; i++) { - assertNotInLog(content, - "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i); - } - } - } - - /* - * Test case for delete operation with multiple threads and flat listing enabled. - */ - @Test - public void testDeleteLargeFolderWithThreads() throws Exception { - // Populate source folder with large number of files and directories. - this.iterations = 10; - validateDeleteFolder(fs, "root"); - - // Validate from logs that threads are enabled. - String content = logs.getOutput(); - assertInLog(content, "ms with threads: " + deleteThreads); - - // Validate thread executions - for (int i = 0; i < deleteThreads; i++) { - assertInLog(content, - "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i); - } - } - - /* - * Test case for delete operation with threads disabled and flat listing enabled. - */ - @Test - public void testDeleteLargeFolderDisableThreads() throws Exception { - Configuration conf = fs.getConf(); - conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, 0); - URI uri = fs.getUri(); - fs.initialize(uri, conf); - - // Populate source folder with large number of files and directories. - this.iterations = 10; - validateDeleteFolder(fs, "root"); - - // Validate from logs that threads are disabled. - String content = logs.getOutput(); - assertInLog(content, - "Disabling threads for Delete operation as thread count 0"); - - // Validate no thread executions - for (int i = 0; i < deleteThreads; i++) { - assertNotInLog(content, - "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i); - } - } - - /* - * Test case for rename operation with threads and flat listing disabled. - */ - @Test - public void testDeleteSmallFolderDisableThreadsDisableFlatListing() throws Exception { - Configuration conf = fs.getConf(); - - // Number of threads set to 0 or 1 disables threads. - conf.setInt(NativeAzureFileSystem.AZURE_DELETE_THREADS, 1); - conf.setBoolean(AzureNativeFileSystemStore.KEY_ENABLE_FLAT_LISTING, false); - URI uri = fs.getUri(); - fs.initialize(uri, conf); - - validateDeleteFolder(fs, "root"); - - // Validate from logs that threads are disabled. - String content = logs.getOutput(); - assertInLog(content, - "Disabling threads for Delete operation as thread count 1"); - - // Validate no thread executions - for (int i = 0; i < deleteThreads; i++) { - assertNotInLog(content, - "AzureBlobDeleteThread-" + Thread.currentThread().getName() + "-" + i); - } - } - - /* - * Test case for delete operation with multiple threads and flat listing enabled. - */ - @Test - public void testDeleteThreadPoolExceptionFailure() throws Exception { - - // Spy azure file system object and raise exception for new thread pool - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root"))); - - AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy( - mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", - path, NativeAzureFileSystem.AZURE_DELETE_THREADS)); - Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenThrow(new Exception()); - - // With single iteration, we would have created 7 blobs resulting 7 threads. - Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", - path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor); - - validateDeleteFolder(mockFs, "root"); - - // Validate from logs that threads are disabled. - String content = logs.getOutput(); - assertInLog(content, "Failed to create thread pool with threads"); - assertInLog(content, "Serializing the Delete operation"); - } - - /* - * Test case for delete operation with multiple threads and flat listing enabled. - */ - @Test - public void testDeleteThreadPoolExecuteFailure() throws Exception { - - // Mock thread pool executor to throw exception for all requests. - ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class); - Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class)); - - // Spy azure file system object and return mocked thread pool - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root"))); - - AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy( - mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", - path, NativeAzureFileSystem.AZURE_DELETE_THREADS)); - Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor); - - // With single iteration, we would have created 7 blobs resulting 7 threads. - Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", - path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor); - - validateDeleteFolder(mockFs, "root"); - - // Validate from logs that threads are disabled. - String content = logs.getOutput(); - assertInLog(content, - "Rejected execution of thread for Delete operation on blob"); - assertInLog(content, "Serializing the Delete operation"); - } - - /* - * Test case for delete operation with multiple threads and flat listing enabled. - */ - @Test - public void testDeleteThreadPoolExecuteSingleThreadFailure() throws Exception { - - // Spy azure file system object and return mocked thread pool - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - - // Spy a thread pool executor and link it to azure file system object. - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root"))); - AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy( - mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", - path, NativeAzureFileSystem.AZURE_DELETE_THREADS)); - - // With single iteration, we would have created 7 blobs resulting 7 threads. - Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", - path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor); - - // Create a thread executor and link it to mocked thread pool executor object. - ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7)); - Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor); - - // Mock thread executor to throw exception for all requests. - Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class)); - - validateDeleteFolder(mockFs, "root"); - - // Validate from logs that threads are enabled and unused threads. - String content = logs.getOutput(); - assertInLog(content, - "Using thread pool for Delete operation with threads 7"); - assertInLog(content, - "6 threads not used for Delete operation on blob"); - } - - /* - * Test case for delete operation with multiple threads and flat listing enabled. - */ - @Test - public void testDeleteThreadPoolTerminationFailure() throws Exception { - - // Spy azure file system object and return mocked thread pool - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - - // Spy a thread pool executor and link it to azure file system object. - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root"))); - AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy( - ((NativeAzureFileSystem) fs).getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", - path, NativeAzureFileSystem.AZURE_DELETE_THREADS)); - - // Create a thread executor and link it to mocked thread pool executor object. - // Mock thread executor to throw exception for terminating threads. - ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class); - Mockito.doNothing().when(mockThreadExecutor).execute(Mockito.any(Runnable.class)); - Mockito.when(mockThreadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)).thenThrow(new InterruptedException()); - - Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor); - - // With single iteration, we would have created 7 blobs resulting 7 threads. - Mockito.when(mockFs.getThreadPoolExecutor(deleteThreads, "AzureBlobDeleteThread", "Delete", - path, NativeAzureFileSystem.AZURE_DELETE_THREADS)).thenReturn(mockThreadPoolExecutor); - - createFolder(mockFs, "root"); - Path sourceFolder = new Path("root"); - boolean exception = false; - try { - mockFs.delete(sourceFolder, true); - } catch (IOException e){ - exception = true; - } - - assertTrue(exception); - assertTrue(mockFs.exists(sourceFolder)); - - // Validate from logs that threads are enabled and delete operation is failed. - String content = logs.getOutput(); - assertInLog(content, - "Using thread pool for Delete operation with threads"); - assertInLog(content, "Threads got interrupted Delete blob operation"); - assertInLog(content, - "Delete failed as operation on subfolders and files failed."); - } - - /* - * Validate that when a directory is deleted recursively, the operation succeeds - * even if a child directory delete fails because the directory does not exist. - * This can happen if a child directory is deleted by an external agent while - * the parent is in progress of being deleted recursively. - */ - @Test - public void testRecursiveDirectoryDeleteWhenChildDirectoryDeleted() - throws Exception { - testRecusiveDirectoryDelete(true); - } - - /* - * Validate that when a directory is deleted recursively, the operation succeeds - * even if a file delete fails because it does not exist. - * This can happen if a file is deleted by an external agent while - * the parent directory is in progress of being deleted. - */ - @Test - public void testRecursiveDirectoryDeleteWhenDeletingChildFileReturnsFalse() - throws Exception { - testRecusiveDirectoryDelete(false); - } - - private void testRecusiveDirectoryDelete(boolean useDir) throws Exception { - String childPathToBeDeletedByExternalAgent = (useDir) - ? "root/0" - : "root/0/fileToRename"; - // Spy azure file system object and return false for deleting one file - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path( - childPathToBeDeletedByExternalAgent))); - - Answer answer = new Answer() { - public Boolean answer(InvocationOnMock invocation) throws Throwable { - String path = (String) invocation.getArguments()[0]; - boolean isDir = (boolean) invocation.getArguments()[1]; - boolean realResult = fs.deleteFile(path, isDir); - assertTrue(realResult); - boolean fakeResult = false; - return fakeResult; - } - }; - - Mockito.when(mockFs.deleteFile(path, useDir)).thenAnswer(answer); - - createFolder(mockFs, "root"); - Path sourceFolder = new Path("root"); - - assertTrue(mockFs.delete(sourceFolder, true)); - assertFalse(mockFs.exists(sourceFolder)); - - // Validate from logs that threads are enabled, that a child directory was - // deleted by an external caller, and the parent delete operation still - // succeeds. - String content = logs.getOutput(); - assertInLog(content, - "Using thread pool for Delete operation with threads"); - assertInLog(content, String.format("Attempt to delete non-existent %s %s", - useDir ? "directory" : "file", path)); - } - - /* - * Test case for delete operation with multiple threads and flat listing enabled. - */ - @Test - public void testDeleteSingleDeleteException() throws Exception { - - // Spy azure file system object and raise exception for deleting one file - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0"))); - Mockito.doThrow(new IOException()).when(mockFs).deleteFile(path, true); - - createFolder(mockFs, "root"); - Path sourceFolder = new Path("root"); - - boolean exception = false; - try { - mockFs.delete(sourceFolder, true); - } catch (IOException e){ - exception = true; - } - - assertTrue(exception); - assertTrue(mockFs.exists(sourceFolder)); - - // Validate from logs that threads are enabled and delete operation failed. - String content = logs.getOutput(); - assertInLog(content, - "Using thread pool for Delete operation with threads"); - assertInLog(content, - "Encountered Exception for Delete operation for file " + path); - assertInLog(content, - "Terminating execution of Delete operation now as some other thread already got exception or operation failed"); - } - - /* - * Test case for rename operation with multiple threads and flat listing enabled. - */ - @Test - public void testRenameThreadPoolExceptionFailure() throws Exception { - - // Spy azure file system object and raise exception for new thread pool - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root"))); - AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy( - ((NativeAzureFileSystem) fs).getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename", - path, NativeAzureFileSystem.AZURE_RENAME_THREADS)); - Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenThrow(new Exception()); - - // With single iteration, we would have created 7 blobs resulting 7 threads. - Mockito.doReturn(mockThreadPoolExecutor).when(mockFs).getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename", - path, NativeAzureFileSystem.AZURE_RENAME_THREADS); - - validateRenameFolder(mockFs, "root", "rootnew"); - - // Validate from logs that threads are disabled. - String content = logs.getOutput(); - assertInLog(content, "Failed to create thread pool with threads"); - assertInLog(content, "Serializing the Rename operation"); - } - - /* - * Test case for rename operation with multiple threads and flat listing enabled. - */ - @Test - public void testRenameThreadPoolExecuteFailure() throws Exception { - - // Mock thread pool executor to throw exception for all requests. - ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class); - Mockito.doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class)); - - // Spy azure file system object and return mocked thread pool - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root"))); - AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy( - mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename", - path, NativeAzureFileSystem.AZURE_RENAME_THREADS)); - Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor); - - // With single iteration, we would have created 7 blobs resulting 7 threads. - Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename", - path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor); - - validateRenameFolder(mockFs, "root", "rootnew"); - - // Validate from logs that threads are disabled. - String content = logs.getOutput(); - assertInLog(content, - "Rejected execution of thread for Rename operation on blob"); - assertInLog(content, "Serializing the Rename operation"); - } - - /* - * Test case for rename operation with multiple threads and flat listing enabled. - */ - @Test - public void testRenameThreadPoolExecuteSingleThreadFailure() throws Exception { - - // Spy azure file system object and return mocked thread pool - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - - // Spy a thread pool executor and link it to azure file system object. - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root"))); - AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy( - mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename", - path, NativeAzureFileSystem.AZURE_RENAME_THREADS)); - - // With single iteration, we would have created 7 blobs resulting 7 threads. - Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename", - path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor); - - // Create a thread executor and link it to mocked thread pool executor object. - ThreadPoolExecutor mockThreadExecutor = Mockito.spy(mockThreadPoolExecutor.getThreadPool(7)); - Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor); - - // Mock thread executor to throw exception for all requests. - Mockito.doCallRealMethod().doThrow(new RejectedExecutionException()).when(mockThreadExecutor).execute(Mockito.any(Runnable.class)); - - validateRenameFolder(mockFs, "root", "rootnew"); - - // Validate from logs that threads are enabled and unused threads exists. - String content = logs.getOutput(); - assertInLog(content, - "Using thread pool for Rename operation with threads 7"); - assertInLog(content, - "6 threads not used for Rename operation on blob"); - } - - /* - * Test case for rename operation with multiple threads and flat listing enabled. - */ - @Test - public void testRenameThreadPoolTerminationFailure() throws Exception { - - // Spy azure file system object and return mocked thread pool - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - - // Spy a thread pool executor and link it to azure file system object. - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root"))); - AzureFileSystemThreadPoolExecutor mockThreadPoolExecutor = Mockito.spy( - mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename", - path, NativeAzureFileSystem.AZURE_RENAME_THREADS)); - - // With single iteration, we would have created 7 blobs resulting 7 threads. - Mockito.when(mockFs.getThreadPoolExecutor(renameThreads, "AzureBlobRenameThread", "Rename", - path, NativeAzureFileSystem.AZURE_RENAME_THREADS)).thenReturn(mockThreadPoolExecutor); - - // Mock thread executor to throw exception for all requests. - ThreadPoolExecutor mockThreadExecutor = Mockito.mock(ThreadPoolExecutor.class); - Mockito.doNothing().when(mockThreadExecutor).execute(Mockito.any(Runnable.class)); - Mockito.when(mockThreadExecutor.awaitTermination(Long.MAX_VALUE, TimeUnit.DAYS)).thenThrow(new InterruptedException()); - Mockito.when(mockThreadPoolExecutor.getThreadPool(7)).thenReturn(mockThreadExecutor); - - - createFolder(mockFs, "root"); - Path sourceFolder = new Path("root"); - Path destFolder = new Path("rootnew"); - boolean exception = false; - try { - mockFs.rename(sourceFolder, destFolder); - } catch (IOException e){ - exception = true; - } - - assertTrue(exception); - assertTrue(mockFs.exists(sourceFolder)); - - // Validate from logs that threads are enabled and rename operation is failed. - String content = logs.getOutput(); - assertInLog(content, - "Using thread pool for Rename operation with threads"); - assertInLog(content, "Threads got interrupted Rename blob operation"); - assertInLog(content, - "Rename failed as operation on subfolders and files failed."); - } - - /* - * Test case for rename operation with multiple threads and flat listing enabled. - */ - @Test - public void testRenameSingleRenameException() throws Exception { - - // Spy azure file system object and raise exception for deleting one file - Path sourceFolder = new Path("root"); - Path destFolder = new Path("rootnew"); - - // Spy azure file system object and populate rename pending spy object. - NativeAzureFileSystem mockFs = Mockito.spy((NativeAzureFileSystem) fs); - - // Populate data now only such that rename pending spy object would see this data. - createFolder(mockFs, "root"); - - String srcKey = mockFs.pathToKey(mockFs.makeAbsolute(sourceFolder)); - String dstKey = mockFs.pathToKey(mockFs.makeAbsolute(destFolder)); - - FolderRenamePending mockRenameFs = Mockito.spy(mockFs.prepareAtomicFolderRename(srcKey, dstKey)); - Mockito.when(mockFs.prepareAtomicFolderRename(srcKey, dstKey)).thenReturn(mockRenameFs); - String path = mockFs.pathToKey(mockFs.makeAbsolute(new Path("root/0"))); - Mockito.doThrow(new IOException()).when(mockRenameFs).renameFile(Mockito.any(FileMetadata.class)); - - boolean exception = false; - try { - mockFs.rename(sourceFolder, destFolder); - } catch (IOException e){ - exception = true; - } - - assertTrue(exception); - assertTrue(mockFs.exists(sourceFolder)); - - // Validate from logs that threads are enabled and delete operation failed. - String content = logs.getOutput(); - assertInLog(content, - "Using thread pool for Rename operation with threads"); - assertInLog(content, - "Encountered Exception for Rename operation for file " + path); - assertInLog(content, - "Terminating execution of Rename operation now as some other thread already got exception or operation failed"); - } - - @Override - protected AzureBlobStorageTestAccount createTestAccount() throws Exception { - return AzureBlobStorageTestAccount.create(); - } - -} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java deleted file mode 100644 index f73a7638a3efa..0000000000000 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/ITestNativeAzureFileSystemClientLogging.java +++ /dev/null @@ -1,136 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.fs.azure; - -import java.net.URI; -import java.util.StringTokenizer; - -import org.apache.commons.logging.impl.Log4JLogger; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.log4j.Logger; -import org.junit.Test; - -/** - * Test to validate Azure storage client side logging. Tests works only when - * testing with Live Azure storage because Emulator does not have support for - * client-side logging. - * - * Important: Do not attempt to move off commons-logging. - * The tests will fail. - */ -public class ITestNativeAzureFileSystemClientLogging - extends AbstractWasbTestBase { - - // Core-site config controlling Azure Storage Client logging - private static final String KEY_LOGGING_CONF_STRING = "fs.azure.storage.client.logging"; - - // Temporary directory created using WASB. - private static final String TEMP_DIR = "tempDir"; - - /* - * Helper method to verify the client logging is working. This check primarily - * checks to make sure we see a line in the logs corresponding to the entity - * that is created during test run. - */ - private boolean verifyStorageClientLogs(String capturedLogs, String entity) - throws Exception { - - URI uri = testAccount.getRealAccount().getBlobEndpoint(); - String container = testAccount.getRealContainer().getName(); - String validateString = uri + Path.SEPARATOR + container + Path.SEPARATOR - + entity; - boolean entityFound = false; - - StringTokenizer tokenizer = new StringTokenizer(capturedLogs, "\n"); - - while (tokenizer.hasMoreTokens()) { - String token = tokenizer.nextToken(); - if (token.contains(validateString)) { - entityFound = true; - break; - } - } - return entityFound; - } - - /* - * Helper method that updates the core-site config to enable/disable logging. - */ - private void updateFileSystemConfiguration(Boolean loggingFlag) - throws Exception { - - Configuration conf = fs.getConf(); - conf.set(KEY_LOGGING_CONF_STRING, loggingFlag.toString()); - URI uri = fs.getUri(); - fs.initialize(uri, conf); - } - - // Using WASB code to communicate with Azure Storage. - private void performWASBOperations() throws Exception { - - Path tempDir = new Path(Path.SEPARATOR + TEMP_DIR); - fs.mkdirs(tempDir); - fs.delete(tempDir, true); - } - - @Test - public void testLoggingEnabled() throws Exception { - - LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger - .getRootLogger())); - - // Update configuration based on the Test. - updateFileSystemConfiguration(true); - - performWASBOperations(); - - String output = getLogOutput(logs); - assertTrue("Log entry " + TEMP_DIR + " not found in " + output, - verifyStorageClientLogs(output, TEMP_DIR)); - } - - protected String getLogOutput(LogCapturer logs) { - String output = logs.getOutput(); - assertTrue("No log created/captured", !output.isEmpty()); - return output; - } - - @Test - public void testLoggingDisabled() throws Exception { - - LogCapturer logs = LogCapturer.captureLogs(new Log4JLogger(Logger - .getRootLogger())); - - // Update configuration based on the Test. - updateFileSystemConfiguration(false); - - performWASBOperations(); - String output = getLogOutput(logs); - - assertFalse("Log entry " + TEMP_DIR + " found in " + output, - verifyStorageClientLogs(output, TEMP_DIR)); - } - - @Override - protected AzureBlobStorageTestAccount createTestAccount() throws Exception { - return AzureBlobStorageTestAccount.create(); - } -} diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java index 8ac36c299b65b..ed3aed9d7de92 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/NativeAzureFileSystemBaseTest.java @@ -30,8 +30,8 @@ import java.util.EnumSet; import java.util.TimeZone; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -73,7 +73,7 @@ public abstract class NativeAzureFileSystemBaseTest private static final EnumSet CREATE_FLAG = EnumSet.of(XAttrSetFlag.CREATE); private static final EnumSet REPLACE_FLAG = EnumSet.of(XAttrSetFlag.REPLACE); - public static final Log LOG = LogFactory.getLog(NativeAzureFileSystemBaseTest.class); + public static final Logger LOG = LoggerFactory.getLogger(NativeAzureFileSystemBaseTest.class); protected NativeAzureFileSystem fs; @Override diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java index aca5f810b4b53..598469488a661 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azure/TestBlobOperationDescriptor.java @@ -32,6 +32,7 @@ import org.junit.Test; import java.net.HttpURLConnection; +import java.nio.charset.StandardCharsets; /** * Tests for BlobOperationDescriptor. @@ -71,7 +72,7 @@ public void testAppendBlockOperations() throws Exception { assertEquals(0, lastContentLengthReceived); String message = "this is a test"; - output.write(message.getBytes("UTF-8")); + output.write(message.getBytes(StandardCharsets.UTF_8)); output.flush(); assertEquals(BlobOperationDescriptor.OperationType.AppendBlock, lastOperationTypeSent); @@ -107,7 +108,7 @@ public void testPutBlockOperations() throws Exception { assertEquals(0, lastContentLengthReceived); String message = "this is a test"; - output.write(message.getBytes("UTF-8")); + output.write(message.getBytes(StandardCharsets.UTF_8)); output.flush(); assertEquals(BlobOperationDescriptor.OperationType.PutBlock, lastOperationTypeSent); @@ -186,7 +187,7 @@ public void testGetBlobOperations() throws Exception { assertNull(lastOperationTypeReceived); assertEquals(0, lastContentLengthReceived); - output.write(message.getBytes("UTF-8")); + output.write(message.getBytes(StandardCharsets.UTF_8)); output.flush(); assertEquals(BlobOperationDescriptor.OperationType.PutBlock, lastOperationTypeSent); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java index fe25477beb61e..f041f4bccdc8c 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/TestAbfsConfigurationFieldsValidation.java @@ -20,8 +20,8 @@ import java.io.IOException; import java.lang.reflect.Field; +import java.nio.charset.StandardCharsets; -import org.apache.commons.codec.Charsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; import org.apache.hadoop.fs.azurebfs.constants.TestConfigurationKeys; @@ -99,8 +99,8 @@ public class TestAbfsConfigurationFieldsValidation { public TestAbfsConfigurationFieldsValidation() throws Exception { super(); this.accountName = "testaccount1.blob.core.windows.net"; - this.encodedString = Base64.encode("base64Value".getBytes(Charsets.UTF_8)); - this.encodedAccountKey = Base64.encode("someAccountKey".getBytes(Charsets.UTF_8)); + this.encodedString = Base64.encode("base64Value".getBytes(StandardCharsets.UTF_8)); + this.encodedAccountKey = Base64.encode("someAccountKey".getBytes(StandardCharsets.UTF_8)); Configuration configuration = new Configuration(); configuration.addResource(TestConfigurationKeys.TEST_CONFIGURATION_FILE_NAME); configuration.set(INT_KEY, "1234565"); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ClassicDelegationTokenManager.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ClassicDelegationTokenManager.java index 1f0cbc0a1672b..5f131db3d9895 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ClassicDelegationTokenManager.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/extensions/ClassicDelegationTokenManager.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.net.URI; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; @@ -249,8 +249,8 @@ public static Configuration useClassicDTManager(Configuration conf) { * highlighting security risks of shared mutable byte arrays. * @return a password. */ - private static byte[] getSecretManagerPasssword() { - return "non-password".getBytes(Charset.forName("UTF-8")); + private static byte[] getSecretManagerPassword() { + return "non-password".getBytes(StandardCharsets.UTF_8); } /** @@ -265,13 +265,13 @@ public TokenSecretManager() { @Override protected byte[] createPassword(StubAbfsTokenIdentifier identifier) { - return getSecretManagerPasssword(); + return getSecretManagerPassword(); } @Override public byte[] retrievePassword(StubAbfsTokenIdentifier identifier) throws InvalidToken { - return getSecretManagerPasssword(); + return getSecretManagerPassword(); } @Override diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestShellDecryptionKeyProvider.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestShellDecryptionKeyProvider.java index b8df38eed0ac4..ce088957f64ab 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestShellDecryptionKeyProvider.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestShellDecryptionKeyProvider.java @@ -19,14 +19,14 @@ package org.apache.hadoop.fs.azurebfs.services; import java.io.File; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import org.junit.Assert; import org.junit.Test; import org.apache.commons.io.FileUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.azurebfs.constants.ConfigurationKeys; import org.apache.hadoop.fs.azurebfs.contracts.exceptions.KeyProviderException; @@ -39,8 +39,7 @@ * */ public class TestShellDecryptionKeyProvider { - public static final Log LOG = LogFactory - .getLog(TestShellDecryptionKeyProvider.class); + public static final Logger LOG = LoggerFactory.getLogger(TestShellDecryptionKeyProvider.class); private static final File TEST_ROOT_DIR = new File(System.getProperty( "test.build.data", "/tmp"), "TestShellDecryptionKeyProvider"); @@ -76,7 +75,7 @@ public void testValidScript() throws Exception { // expected result (so that we validate both script input and output) File scriptFile = new File(TEST_ROOT_DIR, "testScript.cmd"); FileUtils.writeStringToFile(scriptFile, "@echo %1 " + expectedResult, - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); ShellDecryptionKeyProvider provider = new ShellDecryptionKeyProvider(); Configuration conf = new Configuration(); diff --git a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestTextFileBasedIdentityHandler.java b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestTextFileBasedIdentityHandler.java index b0a72b2131cdd..1e578670cb33f 100644 --- a/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestTextFileBasedIdentityHandler.java +++ b/hadoop-tools/hadoop-azure/src/test/java/org/apache/hadoop/fs/azurebfs/services/TestTextFileBasedIdentityHandler.java @@ -20,7 +20,7 @@ import java.io.File; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.NoSuchFileException; import org.junit.Assert; @@ -71,22 +71,22 @@ public static void init() throws IOException { groupMappingFile = tempDir.newFile("group-mapping.conf"); //Stage data for user mapping - FileUtils.writeStringToFile(userMappingFile, testUserDataLine1, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(userMappingFile, testUserDataLine2, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(userMappingFile, testUserDataLine3, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(userMappingFile, testUserDataLine4, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(userMappingFile, testUserDataLine5, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(userMappingFile, testUserDataLine6, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(userMappingFile, testUserDataLine7, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(userMappingFile, NEW_LINE, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine1, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine2, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine3, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine4, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine5, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine6, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(userMappingFile, testUserDataLine7, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(userMappingFile, NEW_LINE, StandardCharsets.UTF_8, true); //Stage data for group mapping - FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine1, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine2, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine3, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine4, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine5, Charset.forName("UTF-8"), true); - FileUtils.writeStringToFile(groupMappingFile, NEW_LINE, Charset.forName("UTF-8"), true); + FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine1, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine2, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine3, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine4, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(groupMappingFile, testGroupDataLine5, StandardCharsets.UTF_8, true); + FileUtils.writeStringToFile(groupMappingFile, NEW_LINE, StandardCharsets.UTF_8, true); } private void assertUserLookup(TextFileBasedIdentityHandler handler, String userInTest, String expectedUser) diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java index c356edd4251c0..d1dd7617e8020 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/FileBasedCopyListing.java @@ -27,7 +27,7 @@ import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -76,7 +76,7 @@ private List fetchFileList(Path sourceListing) throws IOException { BufferedReader input = null; try { input = new BufferedReader(new InputStreamReader(fs.open(sourceListing), - Charset.forName("UTF-8"))); + StandardCharsets.UTF_8)); String line = input.readLine(); while (line != null) { result.add(new Path(line)); diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java index 0ca9e632b9197..e93358f77d037 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/RegexCopyFilter.java @@ -29,7 +29,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.ArrayList; import java.util.List; @@ -65,7 +65,7 @@ public void initialize() { try { InputStream is = Files.newInputStream(filtersFile.toPath()); reader = new BufferedReader(new InputStreamReader(is, - Charset.forName("UTF-8"))); + StandardCharsets.UTF_8)); String line; while ((line = reader.readLine()) != null) { Pattern pattern = Pattern.compile(line); diff --git a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java index fb56b90186c6b..7e5b715479019 100644 --- a/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java +++ b/hadoop-tools/hadoop-extras/src/main/java/org/apache/hadoop/tools/DistTool.java @@ -22,7 +22,7 @@ import java.io.DataOutput; import java.io.IOException; import java.io.InputStreamReader; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Random; @@ -96,7 +96,7 @@ protected static List readFile(Configuration conf, Path inputfile List result = new ArrayList(); FileSystem fs = inputfile.getFileSystem(conf); try (BufferedReader input = new BufferedReader(new InputStreamReader(fs.open(inputfile), - Charset.forName("UTF-8")))) { + StandardCharsets.UTF_8))) { for(String line; (line = input.readLine()) != null;) { result.add(line); } diff --git a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java index 9c8dc1f2304c1..2900139093294 100644 --- a/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java +++ b/hadoop-tools/hadoop-fs2img/src/main/java/org/apache/hadoop/hdfs/server/namenode/ImageWriter.java @@ -26,6 +26,7 @@ import java.io.OutputStream; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.security.DigestOutputStream; import java.security.MessageDigest; import java.util.Arrays; @@ -35,7 +36,6 @@ import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.protobuf.CodedOutputStream; import org.apache.hadoop.classification.InterfaceAudience; @@ -325,7 +325,7 @@ void writeMD5(String imagename) throws IOException { Path chk = new Path(outdir, imagename + ".md5"); try (OutputStream out = outfs.create(chk)) { String md5Line = digestString + " *" + imagename + "\n"; - out.write(md5Line.getBytes(Charsets.UTF_8)); + out.write(md5Line.getBytes(StandardCharsets.UTF_8)); } } diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java index 71db9bfb25c3a..99c621a3e92eb 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/CompressionEmulationUtil.java @@ -22,6 +22,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; @@ -99,7 +100,7 @@ class CompressionEmulationUtil { private static final CompressionRatioLookupTable COMPRESSION_LOOKUP_TABLE = new CompressionRatioLookupTable(); - private static final Charset charsetUTF8 = Charset.forName("UTF-8"); + private static final Charset charsetUTF8 = StandardCharsets.UTF_8; /** * This is a {@link Mapper} implementation for generating random text data. diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java index 56f67e5a73cb0..a6f986ce2602c 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/DistributedCacheEmulator.java @@ -42,6 +42,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; @@ -113,7 +114,7 @@ class DistributedCacheEmulator { Configuration conf; // gridmix configuration - private static final Charset charsetUTF8 = Charset.forName("UTF-8"); + private static final Charset charsetUTF8 = StandardCharsets.UTF_8; // Pseudo local file system where local FS based distributed cache files are // created by gridmix. diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java index 4a75cdedf7388..aa191629cf109 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GenerateDistCacheData.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; @@ -96,7 +97,7 @@ class GenerateDistCacheData extends GridmixJob { */ static final short GRIDMIX_DISTCACHE_FILE_PERM = 0644; - private static final Charset charsetUTF8 = Charset.forName("UTF-8"); + private static final Charset charsetUTF8 = StandardCharsets.UTF_8; public GenerateDistCacheData(Configuration conf) throws IOException { super(conf, 0L, JOB_NAME); diff --git a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java index 481799f7b5166..afb95cab87c24 100644 --- a/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java +++ b/hadoop-tools/hadoop-gridmix/src/main/java/org/apache/hadoop/mapred/gridmix/GridmixRecord.java @@ -21,6 +21,7 @@ import java.io.DataOutput; import java.io.EOFException; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import org.apache.hadoop.io.DataInputBuffer; @@ -111,7 +112,7 @@ private void writeRandomText(DataOutput out, final int size) //TODO Should we use long for size. What if the data is more than 4G? String randomWord = rtg.getRandomWord(); - byte[] bytes = randomWord.getBytes("UTF-8"); + byte[] bytes = randomWord.getBytes(StandardCharsets.UTF_8); long randomWordSize = bytes.length; while (i >= randomWordSize) { out.write(bytes); @@ -119,7 +120,7 @@ private void writeRandomText(DataOutput out, final int size) // get the next random word randomWord = rtg.getRandomWord(); - bytes = randomWord.getBytes("UTF-8"); + bytes = randomWord.getBytes(StandardCharsets.UTF_8); // determine the random word size randomWordSize = bytes.length; } diff --git a/hadoop-tools/hadoop-kafka/src/main/java/org/apache/hadoop/metrics2/sink/KafkaSink.java b/hadoop-tools/hadoop-kafka/src/main/java/org/apache/hadoop/metrics2/sink/KafkaSink.java index 0856d0f4e0eeb..9cb6b93c4e0d2 100644 --- a/hadoop-tools/hadoop-kafka/src/main/java/org/apache/hadoop/metrics2/sink/KafkaSink.java +++ b/hadoop-tools/hadoop-kafka/src/main/java/org/apache/hadoop/metrics2/sink/KafkaSink.java @@ -37,7 +37,7 @@ import java.io.Closeable; import java.io.IOException; import java.net.InetAddress; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.time.Instant; import java.time.LocalDateTime; import java.time.ZoneId; @@ -156,7 +156,7 @@ public void putMetrics(MetricsRecord record) { // Create the record to be sent from the json. ProducerRecord data = new ProducerRecord( - topic, jsonLines.toString().getBytes(Charset.forName("UTF-8"))); + topic, jsonLines.toString().getBytes(StandardCharsets.UTF_8)); // Send the data to the Kafka broker. Here is an example of this data: // {"hostname": "...", "timestamp": 1436913651516, diff --git a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java index 03c479fba59c5..665dbddd9a5db 100644 --- a/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java +++ b/hadoop-tools/hadoop-kafka/src/test/java/org/apache/hadoop/metrics2/impl/TestKafkaMetrics.java @@ -159,7 +159,7 @@ StringBuilder recordToJson(MetricsRecord record) { String date = dateFormat.format(currDate); SimpleDateFormat timeFormat = new SimpleDateFormat("HH:mm:ss"); String time = timeFormat.format(currDate); - String hostname = new String("null"); + String hostname = "null"; try { hostname = InetAddress.getLocalHost().getHostName(); } catch (Exception e) { diff --git a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/solver/impl/TestLpSolver.java b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/solver/impl/TestLpSolver.java index d32f7c3592b26..1bce63466a5d6 100644 --- a/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/solver/impl/TestLpSolver.java +++ b/hadoop-tools/hadoop-resourceestimator/src/test/java/org/apache/hadoop/resourceestimator/solver/impl/TestLpSolver.java @@ -41,7 +41,7 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.text.ParseException; import java.util.List; import java.util.Map; @@ -84,7 +84,7 @@ private void parseLog(final String inputLog) RLESparseResourceAllocation result = solver.solve(jobHistory); String file = "src/test/resources/lp/answer.txt"; Reader fileReader = new InputStreamReader(new FileInputStream(file), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); BufferedReader bufferedReader = new BufferedReader(fileReader); String line = bufferedReader.readLine(); Configuration config = new Configuration(); diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/RandomSeedGenerator.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/RandomSeedGenerator.java index ecd5f0bbfc1fd..817c5c8b2f704 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/RandomSeedGenerator.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/RandomSeedGenerator.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.tools.rumen; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; @@ -43,8 +43,7 @@ */ public class RandomSeedGenerator { private static Logger LOG = LoggerFactory.getLogger(RandomSeedGenerator.class); - private static final Charset UTF_8 = Charset.forName("UTF-8"); - + /** MD5 algorithm instance, one for each thread. */ private static final ThreadLocal md5Holder = new ThreadLocal() { @@ -74,7 +73,7 @@ public static long getSeed(String streamId, long masterSeed) { // We could have fed the bytes of masterSeed one by one to md5.update() // instead String str = streamId + '/' + masterSeed; - byte[] digest = md5.digest(str.getBytes(UTF_8)); + byte[] digest = md5.digest(str.getBytes(StandardCharsets.UTF_8)); // Create a long from the first 8 bytes of the digest // This is fine as MD5 has the avalanche property. // Paranoids could have XOR folded the other 8 bytes in too. diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Record.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Record.java index f0ec99ad8143c..84df8b8187e50 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Record.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Record.java @@ -22,6 +22,7 @@ import java.io.DataOutput; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -94,8 +95,8 @@ public String toString() { ByteArrayOutputStream s = new ByteArrayOutputStream(); CsvRecordOutput a = new CsvRecordOutput(s); this.serialize(a); - return new String(s.toByteArray(), "UTF-8"); - } catch (Throwable ex) { + return new String(s.toByteArray(), StandardCharsets.UTF_8); + } catch (Exception ex) { throw new RuntimeException(ex); } } diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java index bc92b7149ae20..7871a4c969e7d 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/Environment.java @@ -20,7 +20,7 @@ import java.io.*; import java.net.InetAddress; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.*; import org.apache.hadoop.classification.InterfaceAudience; @@ -65,7 +65,7 @@ public Environment() throws IOException { Process pid = Runtime.getRuntime().exec(command); BufferedReader in = new BufferedReader( - new InputStreamReader(pid.getInputStream(), Charset.forName("UTF-8"))); + new InputStreamReader(pid.getInputStream(), StandardCharsets.UTF_8)); try { while (true) { String line = in.readLine(); diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapper.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapper.java index 9bab1013f2cc7..438a00057ec14 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapper.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeMapper.java @@ -20,6 +20,7 @@ import java.io.*; import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Mapper; @@ -75,13 +76,11 @@ public void configure(JobConf job) { inputFormatClassName.equals(TextInputFormat.class.getCanonicalName())); } - try { - mapOutputFieldSeparator = job.get("stream.map.output.field.separator", "\t").getBytes("UTF-8"); - mapInputFieldSeparator = job.get("stream.map.input.field.separator", "\t").getBytes("UTF-8"); - numOfMapOutputKeyFields = job.getInt("stream.num.map.output.key.fields", 1); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException("The current system does not support UTF-8 encoding!", e); - } + mapOutputFieldSeparator = job.get("stream.map.output.field.separator", "\t") + .getBytes(StandardCharsets.UTF_8); + mapInputFieldSeparator = job.get("stream.map.input.field.separator", "\t") + .getBytes(StandardCharsets.UTF_8); + numOfMapOutputKeyFields = job.getInt("stream.num.map.output.key.fields", 1); } // Do NOT declare default constructor diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeReducer.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeReducer.java index ffa7b014131da..1f5a247bb2aa1 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeReducer.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/PipeReducer.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.net.URLDecoder; @@ -71,13 +72,11 @@ public void configure(JobConf job) { SkipBadRecords.setAutoIncrReducerProcCount(job, false); skipping = job.getBoolean(MRJobConfig.SKIP_RECORDS, false); - try { - reduceOutFieldSeparator = job_.get("stream.reduce.output.field.separator", "\t").getBytes("UTF-8"); - reduceInputFieldSeparator = job_.get("stream.reduce.input.field.separator", "\t").getBytes("UTF-8"); - this.numOfReduceOutputKeyFields = job_.getInt("stream.num.reduce.output.key.fields", 1); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException("The current system does not support UTF-8 encoding!", e); - } + reduceOutFieldSeparator = job_.get("stream.reduce.output.field.separator", "\t") + .getBytes(StandardCharsets.UTF_8); + reduceInputFieldSeparator = job_.get("stream.reduce.input.field.separator", "\t") + .getBytes(StandardCharsets.UTF_8); + this.numOfReduceOutputKeyFields = job_.getInt("stream.num.reduce.output.key.fields", 1); } public void reduce(Object key, Iterator values, OutputCollector output, diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java index 063ea51dac693..c757cf6d464f4 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java @@ -19,11 +19,9 @@ package org.apache.hadoop.streaming; import java.io.*; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.io.Text; -import org.apache.hadoop.io.Writable; -import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.mapred.Reporter; @@ -103,7 +101,8 @@ public Text createValue() { void numRecStats(byte[] record, int start, int len) throws IOException { numRec_++; if (numRec_ == nextStatusRec_) { - String recordStr = new String(record, start, Math.min(len, statusMaxRecordChars_), "UTF-8"); + String recordStr = new String(record, start, + Math.min(len, statusMaxRecordChars_), StandardCharsets.UTF_8); nextStatusRec_ += 100;//*= 10; String status = getStatus(recordStr); LOG.info(status); diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamUtil.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamUtil.java index 8dd987e870cda..a6983e1c6c306 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamUtil.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamUtil.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.net.InetAddress; import java.net.URL; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -146,7 +147,7 @@ static String slurp(File f) throws IOException { String contents = null; try { in.read(buf, 0, len); - contents = new String(buf, "UTF-8"); + contents = new String(buf, StandardCharsets.UTF_8); } finally { in.close(); } @@ -160,7 +161,7 @@ static String slurpHadoop(Path p, FileSystem fs) throws IOException { String contents = null; try { in.readFully(in.getPos(), buf); - contents = new String(buf, "UTF-8"); + contents = new String(buf, StandardCharsets.UTF_8); } finally { in.close(); } diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java index 7438cb8191a4b..974cdc7c8d001 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java @@ -19,6 +19,7 @@ package org.apache.hadoop.streaming; import java.io.*; +import java.nio.charset.StandardCharsets; import java.util.regex.*; import org.apache.hadoop.io.DataOutputBuffer; @@ -132,7 +133,7 @@ private boolean slowReadUntilMatch(Pattern markPattern, boolean includePat, read = bin_.read(buf); if (read == -1) return false; - String sbuf = new String(buf, 0, read, "UTF-8"); + String sbuf = new String(buf, 0, read, StandardCharsets.UTF_8); Matcher match = markPattern.matcher(sbuf); firstMatchStart_ = NA; @@ -235,7 +236,7 @@ void addGroup(StringBuffer pat, String escapedGroup) { } boolean fastReadUntilMatch(String textPat, boolean includePat, DataOutputBuffer outBufOrNull) throws IOException { - byte[] cpat = textPat.getBytes("UTF-8"); + byte[] cpat = textPat.getBytes(StandardCharsets.UTF_8); int m = 0; boolean match = false; int msup = cpat.length; diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/KeyOnlyTextOutputReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/KeyOnlyTextOutputReader.java index 32bba397cea6b..1c17659b778af 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/KeyOnlyTextOutputReader.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/KeyOnlyTextOutputReader.java @@ -21,7 +21,7 @@ import java.io.DataInput; import java.io.IOException; import java.io.InputStream; -import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.NullWritable; @@ -77,11 +77,7 @@ public NullWritable getCurrentValue() throws IOException { @Override public String getLastOutput() { if (bytes != null) { - try { - return new String(bytes, "UTF-8"); - } catch (UnsupportedEncodingException e) { - return ""; - } + return new String(bytes, StandardCharsets.UTF_8); } else { return null; } diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextInputWriter.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextInputWriter.java index 6f0fd8bfa5318..31513da71d9c9 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextInputWriter.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextInputWriter.java @@ -20,6 +20,7 @@ import java.io.DataOutput; import java.io.IOException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.Text; @@ -66,7 +67,7 @@ protected void writeUTF8(Object object) throws IOException { valSize = val.getLength(); } else { String sval = object.toString(); - bval = sval.getBytes("UTF-8"); + bval = sval.getBytes(StandardCharsets.UTF_8); valSize = bval.length; } clientOut.write(bval, 0, valSize); diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextOutputReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextOutputReader.java index 06c05bc9ef795..11c84a471f7d7 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextOutputReader.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/io/TextOutputReader.java @@ -21,8 +21,8 @@ import java.io.DataInput; import java.io.IOException; import java.io.InputStream; -import java.io.UnsupportedEncodingException; import java.nio.charset.CharacterCodingException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; @@ -84,11 +84,7 @@ public Text getCurrentValue() throws IOException { @Override public String getLastOutput() { if (bytes != null) { - try { - return new String(bytes, "UTF-8"); - } catch (UnsupportedEncodingException e) { - return ""; - } + return new String(bytes, StandardCharsets.UTF_8); } else { return null; } diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/mapreduce/StreamBaseRecordReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/mapreduce/StreamBaseRecordReader.java index 43c1b1bec0a71..e3c14743cb32b 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/mapreduce/StreamBaseRecordReader.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/mapreduce/StreamBaseRecordReader.java @@ -19,6 +19,7 @@ package org.apache.hadoop.streaming.mapreduce; import java.io.IOException; +import java.nio.charset.StandardCharsets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -107,7 +108,7 @@ void numRecStats(byte[] record, int start, int len) throws IOException { numRec_++; if (numRec_ == nextStatusRec_) { String recordStr = new String(record, start, Math.min(len, - statusMaxRecordChars_), "UTF-8"); + statusMaxRecordChars_), StandardCharsets.UTF_8); nextStatusRec_ += 100;// *= 10; String status = getStatus(recordStr); LOG.info(status); diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/mapreduce/StreamXmlRecordReader.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/mapreduce/StreamXmlRecordReader.java index c7ee847763f75..aa8a4d8832c39 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/mapreduce/StreamXmlRecordReader.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/mapreduce/StreamXmlRecordReader.java @@ -20,6 +20,7 @@ import java.io.BufferedInputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -139,7 +140,7 @@ private boolean slowReadUntilMatch(Pattern markPattern, boolean includePat, if (read == -1) return false; - String sbuf = new String(buf, 0, read, "UTF-8"); + String sbuf = new String(buf, 0, read, StandardCharsets.UTF_8); Matcher match = markPattern.matcher(sbuf); firstMatchStart_ = NA; @@ -246,7 +247,7 @@ void addGroup(StringBuffer pat, String escapedGroup) { boolean fastReadUntilMatch(String textPat, boolean includePat, DataOutputBuffer outBufOrNull) throws IOException { - byte[] cpat = textPat.getBytes("UTF-8"); + byte[] cpat = textPat.getBytes(StandardCharsets.UTF_8); int m = 0; boolean match = false; int msup = cpat.length; diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesMapApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesMapApp.java index 813c08c6111d3..e9c2740ee8877 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesMapApp.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesMapApp.java @@ -22,6 +22,7 @@ import java.io.DataOutputStream; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.io.IntWritable; @@ -52,7 +53,7 @@ public static void main(String[] args) throws IOException { } private void writeString(String str) throws IOException { - byte[] bytes = str.getBytes("UTF-8"); + byte[] bytes = str.getBytes(StandardCharsets.UTF_8); dos.writeInt(bytes.length); dos.write(bytes); } diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesReduceApp.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesReduceApp.java index 741e3d3a007e7..4a21f11f58ca0 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesReduceApp.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/RawBytesReduceApp.java @@ -21,6 +21,7 @@ import java.io.DataInputStream; import java.io.EOFException; import java.io.IOException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.io.IntWritable; @@ -62,7 +63,7 @@ private String readString() throws IOException { } byte[] bytes = new byte[length]; dis.readFully(bytes); - return new String(bytes, "UTF-8"); + return new String(bytes, StandardCharsets.UTF_8); } private int readInt() throws IOException { diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestFileArgs.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestFileArgs.java index fccd8d51e4b92..901abba885b5f 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestFileArgs.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestFileArgs.java @@ -21,6 +21,7 @@ import java.io.DataOutputStream; import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Map; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -70,7 +71,7 @@ public void setUp() throws IOException { // Set up side file FileSystem localFs = FileSystem.getLocal(conf); DataOutputStream dos = localFs.create(new Path("target/sidefile")); - dos.write("hello world\n".getBytes("UTF-8")); + dos.write("hello world\n".getBytes(StandardCharsets.UTF_8)); dos.close(); // Since ls doesn't read stdin, we don't want to write anything diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestGzipInput.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestGzipInput.java index a9fc5fd5a457b..dc12e4eff97a2 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestGzipInput.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestGzipInput.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.zip.GZIPOutputStream; /** @@ -37,7 +38,7 @@ protected void createInput() throws IOException { GZIPOutputStream out = new GZIPOutputStream( new FileOutputStream(INPUT_FILE.getAbsoluteFile())); - out.write(input.getBytes("UTF-8")); + out.write(input.getBytes(StandardCharsets.UTF_8)); out.close(); } } diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java index 752268de3dc7d..041d527ab173a 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestMultipleArchiveFiles.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.IOException; import java.io.DataOutputStream; +import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.zip.ZipEntry; import java.util.zip.ZipOutputStream; @@ -86,14 +87,14 @@ protected void createInput() throws IOException DataOutputStream dos = fileSys.create(new Path(INPUT_FILE)); String inputFileString = "symlink1" + File.separator + "cacheArchive1\nsymlink2" + File.separator + "cacheArchive2"; - dos.write(inputFileString.getBytes("UTF-8")); + dos.write(inputFileString.getBytes(StandardCharsets.UTF_8)); dos.close(); DataOutputStream out = fileSys.create(new Path(CACHE_ARCHIVE_1.toString())); ZipOutputStream zos = new ZipOutputStream(out); ZipEntry ze = new ZipEntry(CACHE_FILE_1.toString()); zos.putNextEntry(ze); - zos.write(input.getBytes("UTF-8")); + zos.write(input.getBytes(StandardCharsets.UTF_8)); zos.closeEntry(); zos.close(); @@ -101,7 +102,7 @@ protected void createInput() throws IOException zos = new ZipOutputStream(out); ze = new ZipEntry(CACHE_FILE_2.toString()); zos.putNextEntry(ze); - zos.write(input.getBytes("UTF-8")); + zos.write(input.getBytes(StandardCharsets.UTF_8)); zos.closeEntry(); zos.close(); } diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestRawBytesStreaming.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestRawBytesStreaming.java index 7621fd1fe8a9c..09adb3d5fd44c 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestRawBytesStreaming.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestRawBytesStreaming.java @@ -22,6 +22,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; @@ -46,7 +47,7 @@ public TestRawBytesStreaming() throws IOException { protected void createInput() throws IOException { DataOutputStream out = new DataOutputStream(new FileOutputStream(INPUT_FILE.getAbsoluteFile())); - out.write(input.getBytes("UTF-8")); + out.write(input.getBytes(StandardCharsets.UTF_8)); out.close(); } diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamAggregate.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamAggregate.java index b27a8c65ae866..b303c8c977249 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamAggregate.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamAggregate.java @@ -21,6 +21,7 @@ import org.junit.Test; import static org.junit.Assert.*; import java.io.*; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.mapreduce.MRJobConfig; @@ -53,7 +54,7 @@ protected void createInput() throws IOException { DataOutputStream out = new DataOutputStream( new FileOutputStream(INPUT_FILE.getAbsoluteFile())); - out.write(input.getBytes("UTF-8")); + out.write(input.getBytes(StandardCharsets.UTF_8)); out.close(); } diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java index e1f6da52768d5..b2bc84b4f90a9 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUnconsumedInput.java @@ -24,14 +24,11 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.Test; public class TestUnconsumedInput { @@ -54,12 +51,12 @@ public TestUnconsumedInput() throws IOException protected void createInput() throws IOException { - DataOutputStream out = new DataOutputStream( - new FileOutputStream(INPUT_FILE.getAbsoluteFile())); + try (DataOutputStream out = new DataOutputStream( + new FileOutputStream(INPUT_FILE.getAbsoluteFile()))) { for (int i=0; i<10000; ++i) { - out.write(input.getBytes("UTF-8")); + out.write(input.getBytes(StandardCharsets.UTF_8)); } - out.close(); + } } protected String[] genArgs() { diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/mapreduce/TestStreamXmlRecordReader.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/mapreduce/TestStreamXmlRecordReader.java index f2d9495efa892..5bf2fe52d447c 100644 --- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/mapreduce/TestStreamXmlRecordReader.java +++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/mapreduce/TestStreamXmlRecordReader.java @@ -24,6 +24,7 @@ import java.io.File; import java.io.FileOutputStream; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.HashSet; import java.util.Set; @@ -88,7 +89,7 @@ private String slurpHadoop(Path p, FileSystem fs) throws IOException { String contents = null; try { in.readFully(in.getPos(), buf); - contents = new String(buf, "UTF-8"); + contents = new String(buf, StandardCharsets.UTF_8); } finally { in.close(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java index 5c934d8f9d117..ca0bd0ffa84df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java @@ -36,7 +36,7 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase { @SuppressWarnings({"deprecation", "methodlength"}) @Override public void initializeMemberVariables() { - xmlFilename = new String("yarn-default.xml"); + xmlFilename = "yarn-default.xml"; configurationClasses = new Class[] { YarnConfiguration.class }; // Allocate for usage diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogSolrClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogSolrClient.java index b1515a5b6fbc8..bb68667f1164d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogSolrClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/AppCatalogSolrClient.java @@ -34,8 +34,8 @@ import org.apache.hadoop.yarn.appcatalog.utils.RandomWord; import org.apache.hadoop.yarn.appcatalog.utils.WordLengthException; import org.apache.hadoop.yarn.service.api.records.Service; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.solr.client.solrj.SolrClient; import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.SolrQuery.ORDER; @@ -54,7 +54,7 @@ */ public class AppCatalogSolrClient { - private static final Log LOG = LogFactory.getLog(AppCatalogSolrClient.class); + private static final Logger LOG = LoggerFactory.getLogger(AppCatalogSolrClient.class); private static String urlString; public AppCatalogSolrClient() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/YarnServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/YarnServiceClient.java index 3a6c67d73700f..37a294cc40534 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/YarnServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-catalog/hadoop-yarn-applications-catalog-webapp/src/main/java/org/apache/hadoop/yarn/appcatalog/application/YarnServiceClient.java @@ -21,8 +21,8 @@ import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.appcatalog.model.AppEntry; @@ -45,7 +45,7 @@ */ public class YarnServiceClient { - private static final Log LOG = LogFactory.getLog(YarnServiceClient.class); + private static final Logger LOG = LoggerFactory.getLogger(YarnServiceClient.class); private static Configuration conf = new Configuration(); private static ClientConfig getClientConfig() { ClientConfig config = new DefaultClientConfig(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java index f3bb5fde3a4d3..c6fe29f65fbcc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java @@ -25,7 +25,7 @@ import java.io.IOException; import java.io.InputStreamReader; import java.net.InetAddress; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.EnumSet; import java.util.Map; @@ -235,10 +235,10 @@ public void launchAM(ApplicationAttemptId attemptId) final BufferedReader errReader = new BufferedReader(new InputStreamReader( - amProc.getErrorStream(), Charset.forName("UTF-8"))); + amProc.getErrorStream(), StandardCharsets.UTF_8)); final BufferedReader inReader = new BufferedReader(new InputStreamReader( - amProc.getInputStream(), Charset.forName("UTF-8"))); + amProc.getInputStream(), StandardCharsets.UTF_8)); // read error and input streams as this would free up the buffers // free the error stream buffer diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java index 0ccc149098c15..c0d5272a124e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/client/ApiServiceClient.java @@ -30,7 +30,7 @@ import javax.ws.rs.core.MediaType; import javax.ws.rs.core.UriBuilder; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.thirdparty.com.google.common.base.Strings; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java index eb999cb9a5b5f..2deffb8674f7a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceContext.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.cache.LoadingCache; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.security.client.ClientToAMTokenSecretManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java index 5536b41008927..d37e5f0e205a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceManager.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.service; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.EventHandler; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java index 48f0ebc9c0fed..dd2d0a39a07ab 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/Component.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.service.component; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ExecutionType; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java index 9ecfd3c41dbbe..47a833a4068f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/ComponentEvent.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.component; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java index 932f97c4797ca..f88252618c32f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstanceEvent.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.component.instance; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.event.AbstractEvent; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java index fc6e2f240a81e..7db7894923011 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/AbstractLauncher.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.service.containerlaunch; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerRetryContext; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java index 0f9aebe354370..70e3fc23abbd6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/CommandLineBuilder.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.containerlaunch; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.service.utils.ServiceUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java index ac79aa4e74f7c..3debc45f670a7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/ContainerLaunchService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.containerlaunch; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.Container; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java index 69e16281cd78e..5578ea3624ee0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/containerlaunch/JavaCommandLineBuilder.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.service.containerlaunch; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.service.utils.ServiceUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java index 00e072ddb5bdb..6a7464f41f60c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderService.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.provider; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.service.api.records.Service; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java index 788e7e2281be0..189dc70a4a4a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/registry/YarnRegistryViewForProviders.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.registry; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java index ba9ebc2ffaa74..25cab87d231bc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ClientRegistryBinder.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.fs.PathNotFoundException; import org.apache.hadoop.registry.client.api.RegistryConstants; import org.apache.hadoop.registry.client.api.RegistryOperations; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java index 5d103a98855fb..c5ff20a609018 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ConfigHelper.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.service.exceptions.BadConfigException; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java index 1026092f54bb7..6978a02a09809 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/CoreFileSystem.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FSDataInputStream; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java index 8e8c2ea929e00..3b39c10bf8931 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/PublishedConfigurationOutputter.java @@ -18,8 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.io.FileUtils; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; @@ -72,7 +71,7 @@ public void save(File dest) throws IOException { * @throws IOException */ public void save(OutputStream out) throws IOException { - IOUtils.write(asString(), out, Charsets.UTF_8); + IOUtils.write(asString(), out, StandardCharsets.UTF_8); } /** * Convert to a string diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java index d5926b15e48c7..d2cd0167a102c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceApiUtil.java @@ -20,7 +20,7 @@ import com.fasterxml.jackson.databind.PropertyNamingStrategies; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ArrayListMultimap; import org.apache.hadoop.thirdparty.com.google.common.collect.Multimap; import org.apache.hadoop.util.Sets; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java index 2de2223cec133..2034cc3b5d5aa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/utils/ServiceUtils.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.service.utils; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.lang3.ArrayUtils; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java index 13b39b5c99673..f0e689d8d2830 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java @@ -48,7 +48,7 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.resource.Resources; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java index faacf61ed8cd8..76e3dd2b07a54 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/async/AMRMClientAsync.java @@ -56,7 +56,7 @@ import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index aaa586e043d0f..274920f7e1bed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -81,7 +81,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.util.resource.Resources; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index 74a853a8817e0..bb783e097ba00 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -22,7 +22,7 @@ import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.text.DecimalFormat; import java.util.*; @@ -357,7 +357,7 @@ private int printApplicationAttemptReport(String applicationAttemptId) // Use PrintWriter.println, which uses correct platform line ending. ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter appAttemptReportStr = new PrintWriter( - new OutputStreamWriter(baos, Charset.forName("UTF-8"))); + new OutputStreamWriter(baos, StandardCharsets.UTF_8)); if (appAttemptReport != null) { appAttemptReportStr.println("Application Attempt Report : "); appAttemptReportStr.print("\tApplicationAttempt-Id : "); @@ -381,11 +381,11 @@ private int printApplicationAttemptReport(String applicationAttemptId) appAttemptReportStr.print("Application Attempt with id '" + applicationAttemptId + "' doesn't exist in Timeline Server."); appAttemptReportStr.close(); - sysout.println(baos.toString("UTF-8")); + sysout.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); return -1; } appAttemptReportStr.close(); - sysout.println(baos.toString("UTF-8")); + sysout.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); return 0; } @@ -417,7 +417,7 @@ private int printContainerReport(String containerId) throws YarnException, // Use PrintWriter.println, which uses correct platform line ending. ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter containerReportStr = new PrintWriter( - new OutputStreamWriter(baos, Charset.forName("UTF-8"))); + new OutputStreamWriter(baos, StandardCharsets.UTF_8)); if (containerReport != null) { containerReportStr.println("Container Report : "); containerReportStr.print("\tContainer-Id : "); @@ -446,11 +446,11 @@ private int printContainerReport(String containerId) throws YarnException, containerReportStr.print("Container with id '" + containerId + "' doesn't exist in Timeline Server."); containerReportStr.close(); - sysout.println(baos.toString("UTF-8")); + sysout.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); return -1; } containerReportStr.close(); - sysout.println(baos.toString("UTF-8")); + sysout.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); return 0; } @@ -468,7 +468,7 @@ private void listApplications(Set appTypes, EnumSet appStates, Set appTags) throws YarnException, IOException { PrintWriter writer = new PrintWriter( - new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); + new OutputStreamWriter(sysout, StandardCharsets.UTF_8)); if (allAppStates) { for (YarnApplicationState appState : YarnApplicationState.values()) { appStates.add(appState); @@ -610,7 +610,7 @@ private int printApplicationReport(String applicationId) // Use PrintWriter.println, which uses correct platform line ending. ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter appReportStr = new PrintWriter( - new OutputStreamWriter(baos, Charset.forName("UTF-8"))); + new OutputStreamWriter(baos, StandardCharsets.UTF_8)); if (appReport != null) { appReportStr.println("Application Report : "); appReportStr.print("\tApplication-Id : "); @@ -668,11 +668,11 @@ private int printApplicationReport(String applicationId) appReportStr.print("Application with id '" + applicationId + "' doesn't exist in RM."); appReportStr.close(); - sysout.println(baos.toString("UTF-8")); + sysout.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); return -1; } appReportStr.close(); - sysout.println(baos.toString("UTF-8")); + sysout.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); return 0; } @@ -713,7 +713,7 @@ private String getAllValidApplicationStates() { private void listApplicationAttempts(String applicationId) throws YarnException, IOException { PrintWriter writer = new PrintWriter( - new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); + new OutputStreamWriter(sysout, StandardCharsets.UTF_8)); List appAttemptsReport = client .getApplicationAttempts(ApplicationId.fromString(applicationId)); @@ -741,7 +741,7 @@ private void listApplicationAttempts(String applicationId) throws YarnException, private void listContainers(String appAttemptId) throws YarnException, IOException { PrintWriter writer = new PrintWriter( - new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); + new OutputStreamWriter(sysout, StandardCharsets.UTF_8)); List appsReport = client.getContainers( ApplicationAttemptId.fromString(appAttemptId)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java index 676a0cfe58ca2..ee063fb2e0a94 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ClusterCLI.java @@ -23,7 +23,7 @@ import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.io.UnsupportedEncodingException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -123,12 +123,12 @@ public int run(String[] args) throws Exception { private void printClusterNodeAttributes() throws IOException, YarnException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter( - new OutputStreamWriter(baos, Charset.forName("UTF-8"))); + new OutputStreamWriter(baos, StandardCharsets.UTF_8)); for (NodeAttributeInfo attribute : client.getClusterAttributes()) { pw.println(attribute.toString()); } pw.close(); - sysout.println(baos.toString("UTF-8")); + sysout.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); } void printClusterNodeLabels() throws YarnException, IOException { @@ -158,11 +158,11 @@ void printClusterNodeLabels() throws YarnException, IOException { void printUsage(Options opts) throws UnsupportedEncodingException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = - new PrintWriter(new OutputStreamWriter(baos, Charset.forName("UTF-8"))); + new PrintWriter(new OutputStreamWriter(baos, StandardCharsets.UTF_8)); new HelpFormatter().printHelp(pw, HelpFormatter.DEFAULT_WIDTH, TITLE, null, opts, HelpFormatter.DEFAULT_LEFT_PAD, HelpFormatter.DEFAULT_DESC_PAD, null); pw.close(); - sysout.println(baos.toString("UTF-8")); + sysout.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java index 116b429718880..625e6f58c7b8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeAttributesCLI.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.client.cli; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; @@ -57,7 +57,7 @@ import java.io.PrintStream; import java.io.PrintWriter; import java.io.UnsupportedEncodingException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -140,10 +140,10 @@ private void print(StringBuilder usageBuilder) throws UnsupportedEncodingException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = - new PrintWriter(new OutputStreamWriter(baos, Charset.forName("UTF-8"))); + new PrintWriter(new OutputStreamWriter(baos, StandardCharsets.UTF_8)); pw.write(usageBuilder.toString()); pw.close(); - errOut.println(baos.toString("UTF-8")); + errOut.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); } private Options buildOptions(CommandHandler... handlers) { @@ -379,7 +379,7 @@ public int printNodesByAttributes(String[] attrs) protocol.getAttributesToNodes(request); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter writer = new PrintWriter( - new OutputStreamWriter(baos, Charset.forName("UTF-8"))); + new OutputStreamWriter(baos, StandardCharsets.UTF_8)); writer.format(HOSTNAMEVAL, "Hostname", "Attribute-value"); response.getAttributesToNodes().forEach((attributeKey, v) -> { writer.println(getKeyString(attributeKey) + " :"); @@ -388,7 +388,7 @@ public int printNodesByAttributes(String[] attrs) attrVal.getAttributeValue())); }); writer.close(); - sysOut.println(baos.toString("UTF-8")); + sysOut.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); return 0; } @@ -404,7 +404,7 @@ private int printAttributesByNode(String[] nodeArray) response.getNodeToAttributes(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter writer = new PrintWriter( - new OutputStreamWriter(baos, Charset.forName("UTF-8"))); + new OutputStreamWriter(baos, StandardCharsets.UTF_8)); writer.printf(NODEATTRIBUTE, "Attribute", "Type", "Value"); nodeToAttrs.forEach((node, v) -> { // print node header @@ -414,7 +414,7 @@ private int printAttributesByNode(String[] nodeArray) attr.getAttributeType().name(), attr.getAttributeValue())); }); writer.close(); - sysOut.println(baos.toString("UTF-8")); + sysOut.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); return 0; } @@ -426,14 +426,14 @@ private int printClusterAttributes() throws IOException, YarnException { protocol.getClusterNodeAttributes(request); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter writer = new PrintWriter( - new OutputStreamWriter(baos, Charset.forName("UTF-8"))); + new OutputStreamWriter(baos, StandardCharsets.UTF_8)); writer.format(NODEATTRIBUTEINFO, "Attribute", "Type"); for (NodeAttributeInfo attr : response.getNodeAttributes()) { writer.format(NODEATTRIBUTEINFO, getKeyString(attr.getAttributeKey()), attr.getAttributeType().name()); } writer.close(); - sysOut.println(baos.toString("UTF-8")); + sysOut.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); return 0; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java index 6120a8496a611..317f30cdde914 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.Date; @@ -177,7 +177,7 @@ private void printUsage(Options opts) { private void listClusterNodes(Set nodeStates) throws YarnException, IOException { PrintWriter writer = new PrintWriter( - new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); + new OutputStreamWriter(sysout, StandardCharsets.UTF_8)); List nodesReport = client.getNodeReports( nodeStates.toArray(new NodeState[0])); writer.println("Total Nodes:" + nodesReport.size()); @@ -202,7 +202,7 @@ private void listClusterNodes(Set nodeStates) private void listDetailedClusterNodes(Set nodeStates) throws YarnException, IOException { PrintWriter writer = new PrintWriter(new OutputStreamWriter(sysout, - Charset.forName("UTF-8"))); + StandardCharsets.UTF_8)); List nodesReport = client.getNodeReports(nodeStates .toArray(new NodeState[0])); writer.println("Total Nodes:" + nodesReport.size()); @@ -265,7 +265,7 @@ private void printNodeStatus(String nodeIdStr) throws YarnException, // Use PrintWriter.println, which uses correct platform line ending. ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter nodeReportStr = new PrintWriter( - new OutputStreamWriter(baos, Charset.forName("UTF-8"))); + new OutputStreamWriter(baos, StandardCharsets.UTF_8)); NodeReport nodeReport = null; for (NodeReport report : nodesReport) { if (!report.getNodeId().equals(nodeId)) { @@ -347,7 +347,7 @@ private void printNodeStatus(String nodeIdStr) throws YarnException, + nodeIdStr); } nodeReportStr.close(); - sysout.println(baos.toString("UTF-8")); + sysout.println(new String(baos.toByteArray(), StandardCharsets.UTF_8)); } private String getAllValidNodeStates() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java index 6ce38424d3b08..69fb26aef4f1c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/QueueCLI.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.text.DecimalFormat; import java.util.Set; @@ -88,17 +88,17 @@ public int run(String[] args) throws Exception { /** * It prints the usage of the command - * + * * @param opts */ @VisibleForTesting void printUsage(Options opts) { new HelpFormatter().printHelp(QUEUE, opts); } - + /** * Lists the Queue Information matching the given queue name - * + * * @param queueName * @throws YarnException * @throws IOException @@ -106,7 +106,7 @@ void printUsage(Options opts) { private int listQueue(String queueName) throws YarnException, IOException { int rc; PrintWriter writer = new PrintWriter( - new OutputStreamWriter(sysout, Charset.forName("UTF-8"))); + new OutputStreamWriter(sysout, StandardCharsets.UTF_8)); QueueInfo queueInfo = client.getQueueInfo(queueName); if (queueInfo != null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java index d165ebf8b42b2..5c0929750f851 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/RMAdminCLI.java @@ -81,7 +81,7 @@ import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import static org.apache.hadoop.yarn.client.util.YarnClientUtils.NO_LABEL_ERR_MSG; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestSharedCacheClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestSharedCacheClientImpl.java index b297d926c054f..276905b0b78e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestSharedCacheClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestSharedCacheClientImpl.java @@ -27,6 +27,7 @@ import java.io.DataOutputStream; import java.io.FileNotFoundException; import java.io.IOException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -173,7 +174,7 @@ private Path makeFile(String filename) throws Exception { DataOutputStream out = null; try { out = localFs.create(file); - out.write(input.getBytes("UTF-8")); + out.write(input.getBytes(StandardCharsets.UTF_8)); } finally { if(out != null) { out.close(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java index cab4bda76c477..12ac21bc98118 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestNodeAttributesCLI.java @@ -41,6 +41,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.Set; @@ -60,7 +61,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; /** @@ -528,8 +528,8 @@ private int runTool(String... args) throws Exception { sysOutBytes.reset(); LOG.info("Running: NodeAttributesCLI " + Joiner.on(" ").join(args)); int ret = nodeAttributesCLI.run(args); - errOutput = new String(errOutBytes.toByteArray(), Charsets.UTF_8); - sysOutput = new String(sysOutBytes.toByteArray(), Charsets.UTF_8); + errOutput = new String(errOutBytes.toByteArray(), StandardCharsets.UTF_8); + sysOutput = new String(sysOutBytes.toByteArray(), StandardCharsets.UTF_8); LOG.info("Err_output:\n" + errOutput); LOG.info("Sys_output:\n" + sysOutput); return ret; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java index 76035c2130ec2..dd60d233e7c80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestRMAdminCLI.java @@ -38,6 +38,7 @@ import java.io.IOException; import java.io.InputStream; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import java.util.HashSet; import java.util.Iterator; import java.util.Map; @@ -85,7 +86,6 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet; public class TestRMAdminCLI { @@ -1061,7 +1061,7 @@ public void testRMHAErrorUsage() throws Exception { try { String[] args = {"-transitionToActive"}; assertEquals(-1, rmAdminCLIWithHAEnabled.run(args)); - String errOut = new String(errOutBytes.toByteArray(), Charsets.UTF_8); + String errOut = new String(errOutBytes.toByteArray(), StandardCharsets.UTF_8); errOutBytes.reset(); assertTrue(errOut.contains("Usage: rmadmin")); } finally { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoRequestPBImpl.java index 24661e2054701..7212dd55133cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoRequestPBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.GetPluginInfoRequest; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoResponsePBImpl.java index ece903f78936a..b094fa2621d83 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetPluginInfoResponsePBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.GetPluginInfoResponse; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeRequestPBImpl.java index 82bb6e314393c..72854ad5497cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeRequestPBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableMap; import org.apache.hadoop.thirdparty.protobuf.TextFormat; import org.apache.hadoop.yarn.api.protocolrecords.NodePublishVolumeRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeResponsePBImpl.java index 43c4ada4f0f4f..48742cc2587fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodePublishVolumeResponsePBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.NodePublishVolumeResponse; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeRequestPBImpl.java index 46a59b46fd739..2d64650e9c574 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeRequestPBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.protobuf.TextFormat; import org.apache.hadoop.yarn.api.protocolrecords.NodeUnpublishVolumeRequest; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeResponsePBImpl.java index 8b170ea88e072..2ad583718970c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/NodeUnpublishVolumeResponsePBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.NodeUnpublishVolumeResponse; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesRequestPBImpl.java index 09aabe05ffa5f..660b24bc66fee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesRequestPBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesRequest; import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesResponsePBImpl.java index 2d5421d7d0775..3653371e7de66 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/ValidateVolumeCapabilitiesResponsePBImpl.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesResponse; import org.apache.hadoop.yarn.proto.CsiAdaptorProtos; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java index 607d83245e7c0..e7b2078568e7c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptIdPBImpl.java @@ -25,7 +25,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java index 1141ef31834bc..31b40f3857bb5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationIdPBImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; @Private @Unstable @@ -72,4 +72,4 @@ protected void build() { proto = builder.build(); builder = null; } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java index 2a0f0eef8f3fa..134ce6617b653 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java @@ -25,7 +25,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java index 95bc29b2e5c0a..8c426c4902bba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeIdPBImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; @Private @Unstable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationIdPBImpl.java index a87c7d90e45fd..ab2c9a654fce9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationIdPBImpl.java @@ -23,7 +23,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.proto.YarnProtos.ReservationIdProto; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; @Private @Unstable @@ -72,4 +72,4 @@ protected void build() { proto = builder.build(); builder = null; } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java index 35fe945a07fab..1c77381590896 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceOptionPBImpl.java @@ -24,7 +24,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceOptionProtoOrBuilder; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; public class ResourceOptionPBImpl extends ResourceOption { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java index c69a9df561bb5..c56f9b30c8e1e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ClientRMProxy.java @@ -42,7 +42,7 @@ import org.apache.hadoop.yarn.server.api.ResourceManagerAdministrationProtocol; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; @InterfaceAudience.Public @InterfaceStability.Stable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java index b7775b6fbd59d..92677af283260 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/ServerProxy.java @@ -41,7 +41,7 @@ import org.apache.hadoop.yarn.exceptions.NMNotYetReadyException; import org.apache.hadoop.yarn.ipc.YarnRPC; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; @Public @Unstable @@ -104,4 +104,4 @@ public T run() { }); return (T) RetryProxy.create(protocol, proxy, retryPolicy); } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java index d665f56955ae1..f813aeb102c97 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineClientImpl.java @@ -32,8 +32,8 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java index 35b821e4df1ad..9e7cc657a5984 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/api/impl/TimelineConnector.java @@ -39,8 +39,8 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; @@ -59,7 +59,7 @@ import org.apache.hadoop.classification.VisibleForTesting; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientHandlerException; import com.sun.jersey.api.client.ClientRequest; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java index f4a03b8950b62..a46474ef1b1e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java @@ -31,7 +31,6 @@ import java.io.OutputStream; import java.io.PrintStream; import java.io.Writer; -import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; @@ -288,7 +287,7 @@ public void write(DataOutputStream out, Set pendingUploadFiles) this.uploadedFiles.add(logFile); } catch (IOException e) { String message = logErrorMessage(logFile, e); - out.write(message.getBytes(Charset.forName("UTF-8"))); + out.write(message.getBytes(StandardCharsets.UTF_8)); } finally { IOUtils.cleanupWithLogger(LOG, in); } @@ -1064,7 +1063,7 @@ public String nextLog() throws IOException { new BoundedInputStream(valueStream, currentLogLength); currentLogData.setPropagateClose(false); currentLogISR = new InputStreamReader(currentLogData, - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); currentLogType = logType; } catch (EOFException e) { } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java index 3c56b0290d74e..cc137ba696ece 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java @@ -25,7 +25,7 @@ import java.nio.channels.Channels; import java.nio.channels.FileChannel; import java.nio.channels.WritableByteChannel; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; @@ -68,7 +68,7 @@ private static byte[] formatContainerLogHeader(String containerId, .append("LogLastModifiedTime:" + lastModifiedTime + "\n") .append("LogLength:" + fileLength + "\n") .append("LogContents:\n"); - return sb.toString().getBytes(Charset.forName("UTF-8")); + return sb.toString().getBytes(StandardCharsets.UTF_8); } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java index 4268c095d3915..fea5be66243af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileControllerFactory.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.logaggregation.filecontroller; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationHtmlBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationHtmlBlock.java index 4ec8794b14587..b374f5314521d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationHtmlBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationHtmlBlock.java @@ -27,7 +27,7 @@ import java.io.IOException; import java.io.InputStream; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.Map; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; @@ -196,7 +196,7 @@ protected void processContainerLog(Block html, long[] range, InputStream in, Hamlet.PRE pre = html.pre(); while (toRead > 0 && (len = in.read(cbuf, 0, currentToRead)) > 0) { - pre.__(new String(cbuf, 0, len, Charset.forName("UTF-8"))); + pre.__(new String(cbuf, 0, len, StandardCharsets.UTF_8)); toRead = toRead - len; currentToRead = toRead > bufferSize ? bufferSize : (int) toRead; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java index e2f7832f64256..53b1c683e2c89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java @@ -26,7 +26,7 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.Serializable; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; @@ -217,7 +217,7 @@ public Object run() throws Exception { // append a simple character("\n") to move the writer cursor, so // we could get the correct position when we call // fsOutputStream.getStartPos() - final byte[] dummyBytes = "\n".getBytes(Charset.forName("UTF-8")); + final byte[] dummyBytes = "\n".getBytes(StandardCharsets.UTF_8); fsDataOStream.write(dummyBytes); fsDataOStream.flush(); @@ -282,7 +282,7 @@ private Pair initializeWriterInRolling( int actualLength = checksumFileInputStream.read(b); if (actualLength == nameLength) { String recoveredLogFile = new String( - b, Charset.forName("UTF-8")); + b, StandardCharsets.UTF_8); if (recoveredLogFile.equals( currentRemoteLogFile.getName())) { overwriteCheckSum = false; @@ -336,7 +336,7 @@ private Pair initializeWriterInRolling( String fileName = aggregatedLogFile.getName(); checksumFileOutputStream.writeInt(fileName.length()); checksumFileOutputStream.write(fileName.getBytes( - Charset.forName("UTF-8"))); + StandardCharsets.UTF_8)); checksumFileOutputStream.writeLong( currentAggregatedLogFileLength); checksumFileOutputStream.flush(); @@ -399,7 +399,7 @@ public void write(LogKey logKey, LogValue logValue) throws IOException { if (outputStreamState != null && outputStreamState.getOutputStream() != null) { outputStreamState.getOutputStream().write( - message.getBytes(Charset.forName("UTF-8"))); + message.getBytes(StandardCharsets.UTF_8)); } } finally { IOUtils.cleanupWithLogger(LOG, in); @@ -594,7 +594,7 @@ public boolean readAggregatedLogs(ContainerLogsRequest logRequest, Times.format(candidate.getLastModifiedTime()), in, os, buf, ContainerLogAggregationType.AGGREGATED); byte[] b = aggregatedLogSuffix(candidate.getFileName()) - .getBytes(Charset.forName("UTF-8")); + .getBytes(StandardCharsets.UTF_8); os.write(b, 0, b.length); findLogs = true; } catch (IOException e) { @@ -725,7 +725,7 @@ public Map parseCheckSumFiles( byte[] b = new byte[nameLength]; int actualLength = checksumFileInputStream.read(b); if (actualLength == nameLength) { - nodeName = new String(b, Charset.forName("UTF-8")); + nodeName = new String(b, StandardCharsets.UTF_8); index = checksumFileInputStream.readLong(); } else { continue; @@ -870,9 +870,9 @@ public IndexedLogsMeta loadIndexedLogsMeta(Path remoteLogPath, long end, if (LOG.isDebugEnabled()) { LOG.debug("the length of loaded UUID:{}", uuidReadLen); LOG.debug("the loaded UUID:{}", new String(uuidRead, - Charset.forName("UTF-8"))); + StandardCharsets.UTF_8)); LOG.debug("the expected UUID:{}", new String(this.uuid, - Charset.forName("UTF-8"))); + StandardCharsets.UTF_8)); } throw new IOException("The UUID from " + remoteLogPath + " is not correct. The offset of loaded UUID is " @@ -1278,7 +1278,7 @@ private byte[] createUUID(ApplicationId appId) throws IOException { try { MessageDigest digest = MessageDigest.getInstance("SHA-256"); return digest.digest(appId.toString().getBytes( - Charset.forName("UTF-8"))); + StandardCharsets.UTF_8)); } catch (NoSuchAlgorithmException ex) { throw new IOException(ex); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java index 2355d30640337..63a7d1c16f72c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/tfile/LogAggregationTFileController.java @@ -22,7 +22,7 @@ import java.io.EOFException; import java.io.IOException; import java.io.OutputStream; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.List; @@ -218,7 +218,7 @@ public boolean readAggregatedLogs(ContainerLogsRequest logRequest, valueStream, os, buf, ContainerLogAggregationType.AGGREGATED); byte[] b = aggregatedLogSuffix(fileType).getBytes( - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); os.write(b, 0, b.length); findLogs = true; } else { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/Graph.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/Graph.java index 66ccafa92a516..1d240347309ca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/Graph.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/state/Graph.java @@ -20,7 +20,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -190,7 +190,7 @@ public String generateGraphViz() { public void save(String filepath) throws IOException { try (OutputStreamWriter fout = new OutputStreamWriter( - new FileOutputStream(filepath), Charset.forName("UTF-8"))) { + new FileOutputStream(filepath), StandardCharsets.UTF_8)) { fout.write(generateGraphViz()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java index db7103e1d6e5b..6d582ca1ec799 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BoundedAppender.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.util; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java index 8b5ca2155c6f8..8b6133c9e2242 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/DockerClientConfigHandler.java @@ -38,7 +38,6 @@ import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.util.Iterator; @@ -114,7 +113,7 @@ public static Credentials readCredentialsFromConfigFile(Path configFile, new DockerCredentialTokenIdentifier(registryUrl, applicationId); Token token = new Token<>(tokenId.getBytes(), - registryCred.getBytes(Charset.forName("UTF-8")), + registryCred.getBytes(StandardCharsets.UTF_8), tokenId.getKind(), new Text(registryUrl)); credentials.addToken( new Text(registryUrl + "-" + applicationId), token); @@ -171,7 +170,7 @@ public static boolean writeDockerCredentialsToPath(File outConfigFile, ObjectNode registryCredNode = mapper.createObjectNode(); registryUrlNode.put(ti.getRegistryUrl(), registryCredNode); registryCredNode.put(CONFIG_AUTH_KEY, - new String(tk.getPassword(), Charset.forName("UTF-8"))); + new String(tk.getPassword(), StandardCharsets.UTF_8)); LOG.debug("Prepared token for write: {}", tk); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java index 8bf54b5ddc869..6e13d7a55d0d8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java @@ -26,7 +26,7 @@ import java.io.IOException; import java.io.InputStreamReader; import java.math.BigInteger; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayDeque; import java.util.ArrayList; import java.util.Collections; @@ -519,7 +519,7 @@ private static ProcessInfo constructProcessInfo(ProcessInfo pinfo, File pidDir = new File(procfsDir, pinfo.getPid()); fReader = new InputStreamReader( new FileInputStream( - new File(pidDir, PROCFS_STAT_FILE)), Charset.forName("UTF-8")); + new File(pidDir, PROCFS_STAT_FILE)), StandardCharsets.UTF_8); in = new BufferedReader(fReader); } catch (FileNotFoundException f) { // The process vanished in the interim! @@ -712,7 +712,7 @@ public String getCmdLine(String procfsDir) { fReader = new InputStreamReader( new FileInputStream( new File(new File(procfsDir, pid.toString()), PROCFS_CMDLINE_FILE)), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); } catch (FileNotFoundException f) { // The process vanished in the interim! return ret; @@ -770,7 +770,7 @@ private static void constructProcessSMAPInfo(ProcessTreeSmapMemInfo pInfo, return; } fReader = new InputStreamReader( - new FileInputStream(file), Charset.forName("UTF-8")); + new FileInputStream(file), StandardCharsets.UTF_8); in = new BufferedReader(fReader); ProcessSmapMemoryInfo memoryMappingInfo = null; List lines = IOUtils.readLines(in); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java index d0503876b879a..a859ffbc1f235 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Dispatcher.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkState; +import static org.apache.hadoop.util.Preconditions.checkState; import java.io.IOException; import java.util.Timer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java index 5e7aa68b948c9..45e42ea3a5d86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/Router.java @@ -18,8 +18,8 @@ package org.apache.hadoop.yarn.webapp; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkState; +import static org.apache.hadoop.util.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkState; import static org.apache.hadoop.yarn.util.StringHelper.djoin; import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.util.StringHelper.pjoin; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java index 366edaa99473f..e06f3fc839e50 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApp.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; import java.net.InetSocketAddress; import java.util.ArrayList; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 2f02fd7a0762f..f7e3872a24ca8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.webapp; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.checkNotNull; +import static org.apache.hadoop.util.Preconditions.checkNotNull; import java.io.IOException; import java.net.ConnectException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java index 375fb6045d6a3..a85395523c7e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet/HamletImpl.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.webapp.hamlet; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java index c1f5195d6d7a3..ae07ccb245ced 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/hamlet2/HamletImpl.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.webapp.hamlet2; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; -import static org.apache.hadoop.thirdparty.com.google.common.base.Preconditions.*; +import static org.apache.hadoop.util.Preconditions.*; import org.apache.hadoop.thirdparty.com.google.common.base.Splitter; import org.apache.hadoop.thirdparty.com.google.common.collect.Iterables; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java index 73351813e7108..aafddd9fd7dfa 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexedFileController.java @@ -25,7 +25,7 @@ import java.io.PrintStream; import java.io.Writer; import java.net.URL; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -263,7 +263,7 @@ public boolean isRollover(final FileContext fc, fInput = FileSystem.create(fs, checksumFile, LOG_FILE_UMASK); fInput.writeInt(nodeName.length()); fInput.write(nodeName.getBytes( - Charset.forName("UTF-8"))); + StandardCharsets.UTF_8)); fInput.writeLong(0); } finally { IOUtils.closeQuietly(fInput); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java index 9502af469d1bc..5689ecf298f69 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryClientService.java @@ -58,7 +58,7 @@ import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.timeline.security.authorize.TimelinePolicyProvider; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java index d55cb0c1f5041..a55aa29332165 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/LeveldbTimelineStore.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.timeline; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.commons.collections.map.LRUMap; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -48,7 +48,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.*; import java.util.Map.Entry; import java.util.concurrent.locks.ReentrantLock; @@ -127,25 +127,25 @@ public class LeveldbTimelineStore extends AbstractService //call LevelDb recovery static final String BACKUP_EXT = ".backup-"; - private static final byte[] START_TIME_LOOKUP_PREFIX = "k".getBytes(Charset.forName("UTF-8")); - private static final byte[] ENTITY_ENTRY_PREFIX = "e".getBytes(Charset.forName("UTF-8")); - private static final byte[] INDEXED_ENTRY_PREFIX = "i".getBytes(Charset.forName("UTF-8")); + private static final byte[] START_TIME_LOOKUP_PREFIX = "k".getBytes(StandardCharsets.UTF_8); + private static final byte[] ENTITY_ENTRY_PREFIX = "e".getBytes(StandardCharsets.UTF_8); + private static final byte[] INDEXED_ENTRY_PREFIX = "i".getBytes(StandardCharsets.UTF_8); - private static final byte[] EVENTS_COLUMN = "e".getBytes(Charset.forName("UTF-8")); - private static final byte[] PRIMARY_FILTERS_COLUMN = "f".getBytes(Charset.forName("UTF-8")); - private static final byte[] OTHER_INFO_COLUMN = "i".getBytes(Charset.forName("UTF-8")); - private static final byte[] RELATED_ENTITIES_COLUMN = "r".getBytes(Charset.forName("UTF-8")); + private static final byte[] EVENTS_COLUMN = "e".getBytes(StandardCharsets.UTF_8); + private static final byte[] PRIMARY_FILTERS_COLUMN = "f".getBytes(StandardCharsets.UTF_8); + private static final byte[] OTHER_INFO_COLUMN = "i".getBytes(StandardCharsets.UTF_8); + private static final byte[] RELATED_ENTITIES_COLUMN = "r".getBytes(StandardCharsets.UTF_8); private static final byte[] INVISIBLE_REVERSE_RELATED_ENTITIES_COLUMN = - "z".getBytes(Charset.forName("UTF-8")); - private static final byte[] DOMAIN_ID_COLUMN = "d".getBytes(Charset.forName("UTF-8")); + "z".getBytes(StandardCharsets.UTF_8); + private static final byte[] DOMAIN_ID_COLUMN = "d".getBytes(StandardCharsets.UTF_8); - private static final byte[] DOMAIN_ENTRY_PREFIX = "d".getBytes(Charset.forName("UTF-8")); - private static final byte[] OWNER_LOOKUP_PREFIX = "o".getBytes(Charset.forName("UTF-8")); - private static final byte[] DESCRIPTION_COLUMN = "d".getBytes(Charset.forName("UTF-8")); - private static final byte[] OWNER_COLUMN = "o".getBytes(Charset.forName("UTF-8")); - private static final byte[] READER_COLUMN = "r".getBytes(Charset.forName("UTF-8")); - private static final byte[] WRITER_COLUMN = "w".getBytes(Charset.forName("UTF-8")); - private static final byte[] TIMESTAMP_COLUMN = "t".getBytes(Charset.forName("UTF-8")); + private static final byte[] DOMAIN_ENTRY_PREFIX = "d".getBytes(StandardCharsets.UTF_8); + private static final byte[] OWNER_LOOKUP_PREFIX = "o".getBytes(StandardCharsets.UTF_8); + private static final byte[] DESCRIPTION_COLUMN = "d".getBytes(StandardCharsets.UTF_8); + private static final byte[] OWNER_COLUMN = "o".getBytes(StandardCharsets.UTF_8); + private static final byte[] READER_COLUMN = "r".getBytes(StandardCharsets.UTF_8); + private static final byte[] WRITER_COLUMN = "w".getBytes(StandardCharsets.UTF_8); + private static final byte[] TIMESTAMP_COLUMN = "t".getBytes(StandardCharsets.UTF_8); private static final byte[] EMPTY_BYTES = new byte[0]; @@ -456,7 +456,7 @@ private static TimelineEntity getEntity(String entityId, String entityType, } } else if (key[prefixlen] == DOMAIN_ID_COLUMN[0]) { byte[] v = iterator.peekNext().getValue(); - String domainId = new String(v, Charset.forName("UTF-8")); + String domainId = new String(v, StandardCharsets.UTF_8); entity.setDomainId(domainId); } else { if (key[prefixlen] != @@ -839,7 +839,7 @@ private void put(TimelineEntity entity, TimelinePutResponse response, if (domainIdBytes == null) { domainId = TimelineDataManager.DEFAULT_DOMAIN_ID; } else { - domainId = new String(domainIdBytes, Charset.forName("UTF-8")); + domainId = new String(domainIdBytes, StandardCharsets.UTF_8); } if (!domainId.equals(entity.getDomainId())) { // in this case the entity will be put, but the relation will be @@ -894,9 +894,9 @@ private void put(TimelineEntity entity, TimelinePutResponse response, return; } } else { - writeBatch.put(key, entity.getDomainId().getBytes(Charset.forName("UTF-8"))); + writeBatch.put(key, entity.getDomainId().getBytes(StandardCharsets.UTF_8)); writePrimaryFilterEntries(writeBatch, primaryFilters, key, - entity.getDomainId().getBytes(Charset.forName("UTF-8"))); + entity.getDomainId().getBytes(StandardCharsets.UTF_8)); } db.write(writeBatch); } catch (DBException de) { @@ -928,7 +928,7 @@ private void put(TimelineEntity entity, TimelinePutResponse response, // This is the new entity, the domain should be the same byte[] key = createDomainIdKey(relatedEntity.getId(), relatedEntity.getType(), relatedEntityStartTime); - db.put(key, entity.getDomainId().getBytes(Charset.forName("UTF-8"))); + db.put(key, entity.getDomainId().getBytes(StandardCharsets.UTF_8)); db.put(createRelatedEntityKey(relatedEntity.getId(), relatedEntity.getType(), relatedEntityStartTime, entity.getEntityId(), entity.getEntityType()), EMPTY_BYTES); @@ -1255,7 +1255,7 @@ private static byte[] createOtherInfoKey(String entityId, String entityType, * to the end of the array (for parsing other info keys). */ private static String parseRemainingKey(byte[] b, int offset) { - return new String(b, offset, b.length - offset, Charset.forName("UTF-8")); + return new String(b, offset, b.length - offset, StandardCharsets.UTF_8); } /** @@ -1629,9 +1629,9 @@ public void put(TimelineDomain domain) throws IOException { domain.getOwner(), domain.getId(), DESCRIPTION_COLUMN); if (domain.getDescription() != null) { writeBatch.put(domainEntryKey, domain.getDescription(). - getBytes(Charset.forName("UTF-8"))); + getBytes(StandardCharsets.UTF_8)); writeBatch.put(ownerLookupEntryKey, domain.getDescription(). - getBytes(Charset.forName("UTF-8"))); + getBytes(StandardCharsets.UTF_8)); } else { writeBatch.put(domainEntryKey, EMPTY_BYTES); writeBatch.put(ownerLookupEntryKey, EMPTY_BYTES); @@ -1642,17 +1642,17 @@ public void put(TimelineDomain domain) throws IOException { ownerLookupEntryKey = createOwnerLookupKey( domain.getOwner(), domain.getId(), OWNER_COLUMN); // Null check for owner is done before - writeBatch.put(domainEntryKey, domain.getOwner().getBytes(Charset.forName("UTF-8"))); - writeBatch.put(ownerLookupEntryKey, domain.getOwner().getBytes(Charset.forName("UTF-8"))); + writeBatch.put(domainEntryKey, domain.getOwner().getBytes(StandardCharsets.UTF_8)); + writeBatch.put(ownerLookupEntryKey, domain.getOwner().getBytes(StandardCharsets.UTF_8)); // Write readers domainEntryKey = createDomainEntryKey(domain.getId(), READER_COLUMN); ownerLookupEntryKey = createOwnerLookupKey( domain.getOwner(), domain.getId(), READER_COLUMN); if (domain.getReaders() != null && domain.getReaders().length() > 0) { - writeBatch.put(domainEntryKey, domain.getReaders().getBytes(Charset.forName("UTF-8"))); + writeBatch.put(domainEntryKey, domain.getReaders().getBytes(StandardCharsets.UTF_8)); writeBatch.put(ownerLookupEntryKey, domain.getReaders(). - getBytes(Charset.forName("UTF-8"))); + getBytes(StandardCharsets.UTF_8)); } else { writeBatch.put(domainEntryKey, EMPTY_BYTES); writeBatch.put(ownerLookupEntryKey, EMPTY_BYTES); @@ -1663,9 +1663,9 @@ public void put(TimelineDomain domain) throws IOException { ownerLookupEntryKey = createOwnerLookupKey( domain.getOwner(), domain.getId(), WRITER_COLUMN); if (domain.getWriters() != null && domain.getWriters().length() > 0) { - writeBatch.put(domainEntryKey, domain.getWriters().getBytes(Charset.forName("UTF-8"))); + writeBatch.put(domainEntryKey, domain.getWriters().getBytes(StandardCharsets.UTF_8)); writeBatch.put(ownerLookupEntryKey, domain.getWriters(). - getBytes(Charset.forName("UTF-8"))); + getBytes(StandardCharsets.UTF_8)); } else { writeBatch.put(domainEntryKey, EMPTY_BYTES); writeBatch.put(ownerLookupEntryKey, EMPTY_BYTES); @@ -1802,13 +1802,13 @@ private static TimelineDomain getTimelineDomain( byte[] value = iterator.peekNext().getValue(); if (value != null && value.length > 0) { if (key[prefix.length] == DESCRIPTION_COLUMN[0]) { - domain.setDescription(new String(value, Charset.forName("UTF-8"))); + domain.setDescription(new String(value, StandardCharsets.UTF_8)); } else if (key[prefix.length] == OWNER_COLUMN[0]) { - domain.setOwner(new String(value, Charset.forName("UTF-8"))); + domain.setOwner(new String(value, StandardCharsets.UTF_8)); } else if (key[prefix.length] == READER_COLUMN[0]) { - domain.setReaders(new String(value, Charset.forName("UTF-8"))); + domain.setReaders(new String(value, StandardCharsets.UTF_8)); } else if (key[prefix.length] == WRITER_COLUMN[0]) { - domain.setWriters(new String(value, Charset.forName("UTF-8"))); + domain.setWriters(new String(value, StandardCharsets.UTF_8)); } else if (key[prefix.length] == TIMESTAMP_COLUMN[0]) { domain.setCreatedTime(readReverseOrderedLong(value, 0)); domain.setModifiedTime(readReverseOrderedLong(value, 8)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java index 7e71eab182f0c..86799bfdca58b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.timeline; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import java.io.IOException; import java.util.ArrayList; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/ZKClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/ZKClient.java index ba130c61ba0bf..69a8388b239bb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/ZKClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/lib/ZKClient.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.lib; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.List; import org.apache.zookeeper.CreateMode; @@ -56,7 +56,7 @@ public ZKClient(String string) throws IOException { public void registerService(String path, String data) throws IOException, InterruptedException { try { - zkClient.create(path, data.getBytes(Charset.forName("UTF-8")), + zkClient.create(path, data.getBytes(StandardCharsets.UTF_8), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); } catch(KeeperException ke) { throw new IOException(ke); @@ -110,7 +110,7 @@ public String getServiceData(String path) throws IOException, try { Stat stat = new Stat(); byte[] byteData = zkClient.getData(path, false, stat); - data = new String(byteData, Charset.forName("UTF-8")); + data = new String(byteData, StandardCharsets.UTF_8); } catch(KeeperException ke) { throw new IOException(ke); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java index 9542798b93a95..9a73fb308ce0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMHeartbeatRequestHandler.java @@ -33,7 +33,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Extends Thread and provides an implementation that is used for processing the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java index 9f1870188ece6..7c1a1ee0995c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ServerRMProxy.java @@ -26,7 +26,7 @@ import org.apache.hadoop.yarn.client.RMProxy; import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -104,4 +104,4 @@ public void checkAllowedProtocols(Class protocol) { .checkArgument(protocol.isAssignableFrom(ServerRMProtocols.class), "ResourceManager does not support this protocol"); } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java index e523ea1c5df35..e0912c8c811b8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/failover/FederationRMFailoverProxyProvider.java @@ -43,7 +43,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * A FailoverProxyProvider implementation that uses the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java index c3da16bc0410c..8299602497826 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/policies/amrmproxy/LocalityMulticastAMRMProxyPolicy.java @@ -51,7 +51,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * An implementation of the {@link FederationAMRMProxyPolicy} interface that diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java index 04feb4a7f2ebc..ab365d8687495 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/records/impl/pb/SubClusterInfoPBImpl.java @@ -27,7 +27,7 @@ import org.apache.hadoop.yarn.server.federation.store.records.SubClusterInfo; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterState; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.protobuf.TextFormat; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java index 4c8eae0116073..3f0ea6b8e099a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/uam/UnmanagedApplicationManager.java @@ -69,7 +69,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * UnmanagedApplicationManager is used to register unmanaged application and diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebServiceUtils.java index c4568be8011a7..f396c29a3396e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebServiceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/LogWebServiceUtils.java @@ -46,7 +46,7 @@ import java.io.IOException; import java.io.OutputStream; import java.lang.reflect.UndeclaredThrowableException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.HashSet; import java.util.Set; @@ -120,7 +120,7 @@ private static StreamingOutput getStreamingOutput( .readAggregatedLogs(request, os); if (!findLogs) { os.write(("Can not find logs for container:" + containerIdStr) - .getBytes(Charset.forName("UTF-8"))); + .getBytes(StandardCharsets.UTF_8)); } else { if (printEmptyLocalContainerLog) { StringBuilder sb = new StringBuilder(); @@ -129,7 +129,7 @@ private static StreamingOutput getStreamingOutput( + "\n"); sb.append("LogContents:\n"); sb.append(getNoRedirectWarning() + "\n"); - os.write(sb.toString().getBytes(Charset.forName("UTF-8"))); + os.write(sb.toString().getBytes(StandardCharsets.UTF_8)); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index e899215291b36..462c371aa7979 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -49,6 +49,7 @@ import java.io.FileOutputStream; import java.io.IOException; import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -1050,7 +1051,7 @@ public synchronized void updateYarnSysFS(Context ctx, String user, if (file.createNewFile()) { FileOutputStream output = new FileOutputStream(file); try { - output.write(spec.getBytes("UTF-8")); + output.write(spec.getBytes(StandardCharsets.UTF_8)); } finally { output.close(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java index c4d6918cf1486..50b3bcf54794f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java @@ -29,7 +29,7 @@ import java.io.PrintStream; import java.net.InetSocketAddress; import java.net.URISyntaxException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -60,20 +60,20 @@ /** * Windows secure container executor (WSCE). - * This class offers a secure container executor on Windows, similar to the - * LinuxContainerExecutor. As the NM does not run on a high privileged context, - * this class delegates elevated operations to the helper hadoopwintuilsvc, + * This class offers a secure container executor on Windows, similar to the + * LinuxContainerExecutor. As the NM does not run on a high privileged context, + * this class delegates elevated operations to the helper hadoopwintuilsvc, * implemented by the winutils.exe running as a service. * JNI and LRPC is used to communicate with the privileged service. */ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor { - + private static final Logger LOG = LoggerFactory .getLogger(WindowsSecureContainerExecutor.class); - + public static final String LOCALIZER_PID_FORMAT = "STAR_LOCALIZER_%s"; - - + + /** * This class is a container for the JNI Win32 native methods used by WSCE. */ @@ -94,8 +94,8 @@ private static class Native { /** Initialize the JNI method ID and class ID cache */ private static native void initWsceNative(); - - + + /** * This class contains methods used by the WindowsSecureContainerExecutor * file system operations. @@ -110,93 +110,93 @@ public static void mkdir(Path dirName) throws IOException { } elevatedMkDirImpl(dirName.toString()); } - - private static native void elevatedMkDirImpl(String dirName) + + private static native void elevatedMkDirImpl(String dirName) throws IOException; - - public static void chown(Path fileName, String user, String group) + + public static void chown(Path fileName, String user, String group) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for chown"); } elevatedChownImpl(fileName.toString(), user, group); } - - private static native void elevatedChownImpl(String fileName, String user, + + private static native void elevatedChownImpl(String fileName, String user, String group) throws IOException; - - public static void move(Path src, Path dst, boolean replaceExisting) + + public static void move(Path src, Path dst, boolean replaceExisting) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for move"); } - elevatedCopyImpl(MOVE_FILE, src.toString(), dst.toString(), + elevatedCopyImpl(MOVE_FILE, src.toString(), dst.toString(), replaceExisting); } - - public static void copy(Path src, Path dst, boolean replaceExisting) + + public static void copy(Path src, Path dst, boolean replaceExisting) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for copy"); } - elevatedCopyImpl(COPY_FILE, src.toString(), dst.toString(), + elevatedCopyImpl(COPY_FILE, src.toString(), dst.toString(), replaceExisting); } - - private static native void elevatedCopyImpl(int operation, String src, + + private static native void elevatedCopyImpl(int operation, String src, String dst, boolean replaceExisting) throws IOException; - + public static void chmod(Path fileName, int mode) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for chmod"); } elevatedChmodImpl(fileName.toString(), mode); } - - private static native void elevatedChmodImpl(String path, int mode) + + private static native void elevatedChmodImpl(String path, int mode) throws IOException; - + public static void killTask(String containerName) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for killTask"); } elevatedKillTaskImpl(containerName); } - - private static native void elevatedKillTaskImpl(String containerName) + + private static native void elevatedKillTaskImpl(String containerName) throws IOException; - public static OutputStream create(Path f, boolean append) + public static OutputStream create(Path f, boolean append) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for create"); } - + long desiredAccess = Windows.GENERIC_WRITE; long shareMode = 0L; - long creationDisposition = append ? + long creationDisposition = append ? Windows.OPEN_ALWAYS : Windows.CREATE_ALWAYS; long flags = Windows.FILE_ATTRIBUTE_NORMAL; - + String fileName = f.toString(); fileName = fileName.replace('/', '\\'); - + long hFile = elevatedCreateImpl( fileName, desiredAccess, shareMode, creationDisposition, flags); return new FileOutputStream( WinutilsProcessStub.getFileDescriptorFromHandle(hFile)); } - - private static native long elevatedCreateImpl(String path, + + private static native long elevatedCreateImpl(String path, long desiredAccess, long shareMode, long creationDisposition, long flags) throws IOException; - - + + public static boolean deleteFile(Path path) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for deleteFile"); } - + return elevatedDeletePathImpl(path.toString(), false); } @@ -204,11 +204,11 @@ public static boolean deleteDirectory(Path path) throws IOException { if (!nativeLoaded) { throw new IOException("Native WSCE libraries are required for deleteDirectory"); } - + return elevatedDeletePathImpl(path.toString(), true); } - public native static boolean elevatedDeletePathImpl(String path, + public native static boolean elevatedDeletePathImpl(String path, boolean isDir) throws IOException; } @@ -217,33 +217,33 @@ public native static boolean elevatedDeletePathImpl(String path, * */ public static class WinutilsProcessStub extends Process { - + private final long hProcess; private final long hThread; private boolean disposed = false; - + private final InputStream stdErr; private final InputStream stdOut; private final OutputStream stdIn; - - public WinutilsProcessStub(long hProcess, long hThread, long hStdIn, + + public WinutilsProcessStub(long hProcess, long hThread, long hStdIn, long hStdOut, long hStdErr) { this.hProcess = hProcess; this.hThread = hThread; - + this.stdIn = new FileOutputStream(getFileDescriptorFromHandle(hStdIn)); this.stdOut = new FileInputStream(getFileDescriptorFromHandle(hStdOut)); this.stdErr = new FileInputStream(getFileDescriptorFromHandle(hStdErr)); } - + public static native FileDescriptor getFileDescriptorFromHandle(long handle); - + @Override public native void destroy(); - + @Override public native int exitValue(); - + @Override public InputStream getErrorStream() { return stdErr; @@ -263,7 +263,7 @@ public OutputStream getOutputStream() { public native void resume() throws NativeIOException; } - + public synchronized static WinutilsProcessStub createTaskAsUser( String cwd, String jobName, String user, String pidFile, String cmdLine) throws IOException { @@ -282,12 +282,12 @@ private static native WinutilsProcessStub createTaskAsUser0( } /** - * A shell script wrapper builder for WSCE. - * Overwrites the default behavior to remove the creation of the PID file in - * the script wrapper. WSCE creates the pid file as part of launching the + * A shell script wrapper builder for WSCE. + * Overwrites the default behavior to remove the creation of the PID file in + * the script wrapper. WSCE creates the pid file as part of launching the * task in winutils. */ - private class WindowsSecureWrapperScriptBuilder + private class WindowsSecureWrapperScriptBuilder extends LocalWrapperScriptBuilder { public WindowsSecureWrapperScriptBuilder(Path containerWorkDir) { @@ -310,12 +310,12 @@ protected void writeLocalWrapperScript(Path launchDst, Path pidFile, PrintStream private static class ElevatedFileSystem extends DelegateToFileSystem { /** - * This overwrites certain RawLocalSystem operations to be performed by a + * This overwrites certain RawLocalSystem operations to be performed by a * privileged process. - * + * */ private static class ElevatedRawLocalFilesystem extends RawLocalFileSystem { - + @Override protected boolean mkOneDirWithMode(Path path, File p2f, FsPermission permission) throws IOException { @@ -339,26 +339,26 @@ protected boolean mkOneDirWithMode(Path path, File p2f, } return ret; } - + @Override - public void setPermission(Path p, FsPermission permission) + public void setPermission(Path p, FsPermission permission) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(String.format("EFS:setPermission: %s %s", p, permission)); } Native.Elevated.chmod(p, permission.toShort()); } - + @Override - public void setOwner(Path p, String username, String groupname) + public void setOwner(Path p, String username, String groupname) throws IOException { if (LOG.isDebugEnabled()) { - LOG.debug(String.format("EFS:setOwner: %s %s %s", + LOG.debug(String.format("EFS:setOwner: %s %s %s", p, username, groupname)); } Native.Elevated.chown(p, username, groupname); } - + @Override protected OutputStream createOutputStreamWithMode(Path f, boolean append, FsPermission permission) throws IOException { @@ -374,19 +374,19 @@ protected OutputStream createOutputStreamWithMode(Path f, boolean append, return os; } finally { if (!success) { - IOUtils.cleanup(LOG, os); + IOUtils.cleanupWithLogger(LOG, os); } } } - + @Override public boolean delete(Path p, boolean recursive) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug(String.format("EFS:delete: %s %b", p, recursive)); } - - // The super delete uses the FileUtil.fullyDelete, - // but we cannot rely on that because we need to use the elevated + + // The super delete uses the FileUtil.fullyDelete, + // but we cannot rely on that because we need to use the elevated // operations to remove the files // File f = pathToFile(p); @@ -396,15 +396,15 @@ public boolean delete(Path p, boolean recursive) throws IOException { } else if (f.isFile()) { return Native.Elevated.deleteFile(p); - } + } else if (f.isDirectory()) { - + // This is a best-effort attempt. There are race conditions in that - // child files can be created/deleted after we snapped the list. + // child files can be created/deleted after we snapped the list. // No need to protect against that case. File[] files = FileUtil.listFiles(f); int childCount = files.length; - + if (recursive) { for(File child:files) { if (delete(new Path(child.getPath()), recursive)) { @@ -414,15 +414,15 @@ else if (f.isDirectory()) { } if (childCount == 0) { return Native.Elevated.deleteDirectory(p); - } + } else { throw new IOException("Directory " + f.toString() + " is not empty"); } } else { - // This can happen under race conditions if an external agent + // This can happen under race conditions if an external agent // is messing with the file type between IFs - throw new IOException("Path " + f.toString() + + throw new IOException("Path " + f.toString() + " exists, but is neither a file nor a directory"); } } @@ -430,27 +430,27 @@ else if (f.isDirectory()) { protected ElevatedFileSystem() throws IOException, URISyntaxException { super(FsConstants.LOCAL_FS_URI, - new ElevatedRawLocalFilesystem(), + new ElevatedRawLocalFilesystem(), new Configuration(), FsConstants.LOCAL_FS_URI.getScheme(), false); } } - - private static class WintuilsProcessStubExecutor + + private static class WintuilsProcessStubExecutor implements Shell.CommandExecutor { private Native.WinutilsProcessStub processStub; private StringBuilder output = new StringBuilder(); private int exitCode; - + private enum State { INIT, RUNNING, COMPLETE }; - + private State state;; - + private final String cwd; private final String jobName; private final String userName; @@ -458,9 +458,9 @@ private enum State { private final String cmdLine; public WintuilsProcessStubExecutor( - String cwd, - String jobName, - String userName, + String cwd, + String jobName, + String userName, String pidFile, String cmdLine) { this.cwd = cwd; @@ -469,24 +469,24 @@ public WintuilsProcessStubExecutor( this.pidFile = pidFile; this.cmdLine = cmdLine; this.state = State.INIT; - } - + } + private void assertComplete() throws IOException { if (state != State.COMPLETE) { throw new IOException("Process is not complete"); } } - + public String getOutput () throws IOException { assertComplete(); return output.toString(); } - + public int getExitCode() throws IOException { assertComplete(); return exitCode; } - + public void validateResult() throws IOException { assertComplete(); if (0 != exitCode) { @@ -494,15 +494,15 @@ public void validateResult() throws IOException { throw new IOException("Processs exit code is:" + exitCode); } } - - private Thread startStreamReader(final InputStream stream) + + private Thread startStreamReader(final InputStream stream) throws IOException { Thread streamReaderThread = new Thread() { - + @Override public void run() { try (BufferedReader lines = new BufferedReader( - new InputStreamReader(stream, Charset.forName("UTF-8")))) { + new InputStreamReader(stream, StandardCharsets.UTF_8))) { char[] buf = new char[512]; int nRead; while ((nRead = lines.read(buf, 0, buf.length)) > 0) { @@ -527,7 +527,7 @@ public void execute() throws IOException { Thread stdOutReader = startStreamReader(processStub.getInputStream()); Thread stdErrReader = startStreamReader(processStub.getErrorStream()); - + try { processStub.resume(); processStub.waitFor(); @@ -537,7 +537,7 @@ public void execute() throws IOException { catch(InterruptedException ie) { throw new IOException(ie); } - + exitCode = processStub.exitValue(); state = State.COMPLETE; } @@ -551,15 +551,15 @@ public void close() { } private String nodeManagerGroup; - - /** + + /** * Permissions for user WSCE dirs. */ - static final short DIR_PERM = (short)0750; - - public WindowsSecureContainerExecutor() + static final short DIR_PERM = (short)0750; + + public WindowsSecureContainerExecutor() throws IOException, URISyntaxException { - super(FileContext.getFileContext(new ElevatedFileSystem(), + super(FileContext.getFileContext(new ElevatedFileSystem(), new Configuration())); } @@ -569,26 +569,26 @@ public void setConf(Configuration conf) { nodeManagerGroup = conf.get( YarnConfiguration.NM_WINDOWS_SECURE_CONTAINER_GROUP); } - + @Override protected String[] getRunCommand(String command, String groupId, String userName, Path pidFile, Configuration conf) { File f = new File(command); if (LOG.isDebugEnabled()) { - LOG.debug(String.format("getRunCommand: %s exists:%b", + LOG.debug(String.format("getRunCommand: %s exists:%b", command, f.exists())); } return new String[] { Shell.getWinUtilsPath(), "task", "createAsUser", groupId, userName, pidFile.toString(), "cmd /c " + command }; } - + @Override protected LocalWrapperScriptBuilder getLocalWrapperScriptBuilder( String containerIdStr, Path containerWorkDir) { return new WindowsSecureWrapperScriptBuilder(containerWorkDir); } - + @Override protected void copyFile(Path src, Path dst, String owner) throws IOException { LOG.debug("copyFile: {} -> {} owner:{}", src, dst, owner); @@ -599,19 +599,19 @@ protected void copyFile(Path src, Path dst, String owner) throws IOException { @Override protected void createDir(Path dirPath, FsPermission perms, boolean createParent, String owner) throws IOException { - + // WSCE requires dirs to be 750, not 710 as DCE. // This is similar to how LCE creates dirs // perms = new FsPermission(DIR_PERM); LOG.debug("createDir: {} perm:{} owner:{}", dirPath, perms, owner); - + super.createDir(dirPath, perms, createParent, owner); lfs.setOwner(dirPath, owner, nodeManagerGroup); } @Override - protected void setScriptExecutable(Path script, String owner) + protected void setScriptExecutable(Path script, String owner) throws IOException { LOG.debug("setScriptExecutable: {} owner:{}", script, owner); super.setScriptExecutable(script, owner); @@ -619,7 +619,7 @@ protected void setScriptExecutable(Path script, String owner) } @Override - public Path localizeClasspathJar(Path jarPath, Path target, String owner) + public Path localizeClasspathJar(Path jarPath, Path target, String owner) throws IOException { LOG.debug("localizeClasspathJar: {} {} o:{}", jarPath, target, owner); createDir(target, new FsPermission(DIR_PERM), true, owner); @@ -721,10 +721,10 @@ protected CommandExecutor buildCommandExecutor(String wrapperScriptPath, File wordDir, Map environment) { return new WintuilsProcessStubExecutor( wordDir.toString(), - containerIdStr, userName, pidFile.toString(), + containerIdStr, userName, pidFile.toString(), "cmd /c " + wrapperScriptPath); } - + @Override protected void killContainer(String pid, Signal signal) throws IOException { Native.Elevated.killTask(pid); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java index 821524dd6619d..b0d66ca027d81 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java @@ -81,7 +81,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * AMRMProxyService is a service that runs on each node manager that can be used diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java index cfc44f72afd5e..d9815c9950f5a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AbstractRequestInterceptor.java @@ -21,7 +21,7 @@ import java.io.IOException; import java.util.Map; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.exceptions.YarnException; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java index 1eaaedccdb6c6..9cc7005e54995 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/FederationInterceptor.java @@ -96,7 +96,7 @@ import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Extends the AbstractRequestInterceptor and provides an implementation for diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java index e607c930da9be..794ef9d9a4326 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java @@ -80,7 +80,7 @@ import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.deletion.task.FileDeletionTask; import org.apache.hadoop.yarn.util.FSDownload; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; public class AuxServices extends AbstractService implements ServiceStateChangeListener, EventHandler { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java index 70b28d4fcaa79..c7bdd81a76e4a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/SlidingWindowRetryPolicy.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.nodemanager.containermanager.container; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.yarn.api.records.ContainerRetryContext; import org.apache.hadoop.yarn.api.records.ContainerRetryPolicy; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java index 4aabff7fdad9a..63d9374764970 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerCleanup.java @@ -18,7 +18,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java index 2267cf50edef9..f5e987deee074 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsResourceCalculator.java @@ -35,7 +35,7 @@ import java.io.IOException; import java.io.InputStreamReader; import java.math.BigInteger; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.function.Function; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -316,7 +316,7 @@ private void processFile(File file, Function processLine) throws YarnException { // Read "procfsDir//stat" file - typically /proc//stat try (InputStreamReader fReader = new InputStreamReader( - new FileInputStream(file), Charset.forName("UTF-8"))) { + new FileInputStream(file), StandardCharsets.UTF_8)) { try (BufferedReader in = new BufferedReader(fReader)) { try { String str; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java index e95b9b7333342..e189d9563f0bd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/RuncContainerRuntime.java @@ -21,8 +21,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; @@ -135,7 +135,7 @@ @InterfaceStability.Unstable public class RuncContainerRuntime extends OCIContainerRuntime { - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( RuncContainerRuntime.class); @InterfaceAudience.Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java index da18509921e5b..785f035947fa2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/DockerImagesCommand.java @@ -17,7 +17,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.docker; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Encapsulates the docker images command and its command diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/ImageTagToManifestPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/ImageTagToManifestPlugin.java index 792310360e5f4..070af4e5151fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/ImageTagToManifestPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/runc/ImageTagToManifestPlugin.java @@ -20,8 +20,8 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.runtime.runc; import org.apache.commons.io.IOUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; @@ -77,7 +77,7 @@ public class ImageTagToManifestPlugin extends AbstractService private String manifestDir; private String localImageTagToHashFile; - private static final Log LOG = LogFactory.getLog( + private static final Logger LOG = LoggerFactory.getLogger( ImageTagToManifestPlugin.class); private static final int SHA256_HASH_LENGTH = 64; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java index 8fef64c3c89d0..604a810ec9468 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ContainerLocalizer.java @@ -19,7 +19,7 @@ import static org.apache.hadoop.util.Shell.getAllShells; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 1dd67959ca36f..eceb7b25e48ad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.CGroupElasticMemoryController; import org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources.ResourceHandlerModule; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDevice.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDevice.java index 1dafd07cf4cb9..def45dff7f8e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDevice.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/fpga/FpgaDevice.java @@ -20,7 +20,7 @@ import java.io.Serializable; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** A class that represents an FPGA card. */ public class FpgaDevice implements Serializable { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java index 2769788569864..d0acc9a0bb4bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java @@ -28,7 +28,7 @@ import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.ArrayList; @@ -214,7 +214,7 @@ private void printLocalLogFile(Block html, File logFile) { IOUtils.skipFully(logByteStream, start); InputStreamReader reader = - new InputStreamReader(logByteStream, Charset.forName("UTF-8")); + new InputStreamReader(logByteStream, StandardCharsets.UTF_8); int bufferSize = 65536; char[] cbuf = new char[bufferSize]; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerShellWebSocket.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerShellWebSocket.java index 138f9e0b02ec9..175ee09f8db88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerShellWebSocket.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerShellWebSocket.java @@ -20,7 +20,7 @@ import java.io.IOException; import java.net.URI; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.List; import java.util.Map; @@ -77,7 +77,7 @@ public void onText(Session session, String message) throws IOException { if (!message.equals("1{}")) { // Send keystroke to process input byte[] payload; - payload = message.getBytes(Charset.forName("UTF-8")); + payload = message.getBytes(StandardCharsets.UTF_8); if (payload != null) { pair.out.write(payload); pair.out.flush(); @@ -86,7 +86,7 @@ public void onText(Session session, String message) throws IOException { // Render process output int no = pair.in.available(); pair.in.read(buffer, 0, Math.min(no, buffer.length)); - String formatted = new String(buffer, Charset.forName("UTF-8")) + String formatted = new String(buffer, StandardCharsets.UTF_8) .replaceAll("\n", "\r\n"); session.getRemote().sendString(formatted); } @@ -142,7 +142,7 @@ public void onClose(Session session, int status, String reason) { try { LOG.info(session.getRemoteAddress().getHostString() + " closed!"); String exit = "exit\r\n"; - pair.out.write(exit.getBytes(Charset.forName("UTF-8"))); + pair.out.write(exit.getBytes(StandardCharsets.UTF_8)); pair.out.flush(); pair.in.close(); pair.out.close(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java index d485c55bc0e84..b3def5d2683d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java @@ -21,7 +21,7 @@ import java.io.FileInputStream; import java.io.IOException; import java.io.OutputStream; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.security.Principal; import java.util.ArrayList; import java.util.Collection; @@ -490,7 +490,7 @@ public void write(OutputStream os) throws IOException, } sb.append(StringUtils.repeat("*", endOfFile.length() + 50) + "\n\n"); - os.write(sb.toString().getBytes(Charset.forName("UTF-8"))); + os.write(sb.toString().getBytes(StandardCharsets.UTF_8)); // If we have aggregated logs for this container, // output the aggregation logs as well. ApplicationId appId = containerId.getApplicationAttemptId() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index e66b89cee8a35..bb391234bed1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -46,13 +46,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.util.Sets; import java.io.File; import java.io.IOException; import java.net.URL; import java.net.URLClassLoader; import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.nio.file.attribute.FileTime; @@ -426,7 +426,7 @@ public void testCustomizedAuxServiceClassPath() throws Exception { Assert.assertTrue(meta.size() == 1); for(Entry i : meta.entrySet()) { auxName = i.getKey(); - String auxClassPath = Charsets.UTF_8.decode(i.getValue()).toString(); + String auxClassPath = StandardCharsets.UTF_8.decode(i.getValue()).toString(); defaultAuxClassPath = new HashSet(Arrays.asList(StringUtils .getTrimmedStrings(auxClassPath))); } @@ -478,7 +478,7 @@ public void testCustomizedAuxServiceClassPath() throws Exception { Set customizedAuxClassPath = null; for(Entry i : meta.entrySet()) { Assert.assertTrue(auxName.equals(i.getKey())); - String classPath = Charsets.UTF_8.decode(i.getValue()).toString(); + String classPath = StandardCharsets.UTF_8.decode(i.getValue()).toString(); customizedAuxClassPath = new HashSet(Arrays.asList(StringUtils .getTrimmedStrings(classPath))); Assert.assertTrue(classPath.contains(testJar.getName())); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index 8946d3b5c77d5..2b7b4ada89101 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -35,7 +35,6 @@ import java.io.PrintStream; import java.io.PrintWriter; import java.nio.ByteBuffer; -import java.nio.charset.Charset; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; @@ -217,7 +216,7 @@ public void testSpecialCharSymlinks() throws IOException { //Capture output from prelaunch.out List output = Files.readAllLines(Paths.get(localLogDir.getAbsolutePath(), ContainerLaunch.CONTAINER_PRE_LAUNCH_STDOUT), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); assert(output.contains("hello")); symLinkFile = new File(tmpDir, badSymlink); @@ -548,7 +547,7 @@ public void testInvalidEnvSyntaxDiagnostics() throws IOException { } catch(ExitCodeException e){ //Capture diagnostics from prelaunch.stderr List error = Files.readAllLines(Paths.get(localLogDir.getAbsolutePath(), ContainerLaunch.CONTAINER_PRE_LAUNCH_STDERR), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); diagnostics = StringUtils.join("\n", error); } Assert.assertTrue(diagnostics.contains(Shell.WINDOWS ? diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficController.java index 63c654463f0b0..33b8434c9a8cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestTrafficController.java @@ -36,7 +36,7 @@ import java.io.File; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.util.Arrays; import java.util.List; @@ -120,7 +120,7 @@ private void verifyTrafficControlOperation(PrivilegedOperation op, Assert.assertTrue(tcCmdsFile.exists()); List tcCmds = Files.readAllLines(tcCmdsFile.toPath(), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); //Verify that the number of commands is the same as expected and verify //that each command is the same, in sequence diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java index fbfa36be8a509..584092725d4ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/TestDockerContainerRuntime.java @@ -77,7 +77,7 @@ import java.io.FileWriter; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.nio.file.attribute.FileAttribute; @@ -1998,7 +1998,7 @@ private List getDockerCommandsForDockerStop( PrivilegedOperation.OperationType.RUN_DOCKER_CMD); String dockerCommandFile = op.getArguments().get(0); return Files.readAllLines(Paths.get(dockerCommandFile), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); } private List getDockerCommandsForSignal( @@ -2464,7 +2464,7 @@ public void testLaunchContainerWithDockerTokens() String dockerCommandFile = args.get(argsCounter++); List dockerCommands = Files - .readAllLines(Paths.get(dockerCommandFile), Charset.forName("UTF-8")); + .readAllLines(Paths.get(dockerCommandFile), StandardCharsets.UTF_8); int expected = 14; int counter = 0; @@ -2600,7 +2600,7 @@ private List readDockerCommands(int invocations) throws IOException, String dockerCommandFile = args.get((https) ? 14 : 12); List dockerCommands = Files.readAllLines( - Paths.get(dockerCommandFile), Charset.forName("UTF-8")); + Paths.get(dockerCommandFile), StandardCharsets.UTF_8); return dockerCommands; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java index dcba179a28cc7..e5737d9246cec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/docker/TestDockerCommandExecutor.java @@ -37,7 +37,7 @@ import org.junit.Test; import java.io.IOException; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Paths; import java.util.ArrayList; @@ -408,7 +408,7 @@ private List getValidatedDockerCommands( String dockerCommandFile = op.getArguments().get(0); List dockerCommandFileContents = Files .readAllLines(Paths.get(dockerCommandFile), - Charset.forName("UTF-8")); + StandardCharsets.UTF_8); dockerCommands.addAll(dockerCommandFileContents); } return dockerCommands; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/TestGpuDeviceInformationParser.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/TestGpuDeviceInformationParser.java index 28e06b72bc4b4..49c4e2cbb1862 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/TestGpuDeviceInformationParser.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/gpu/TestGpuDeviceInformationParser.java @@ -26,12 +26,12 @@ import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; public class TestGpuDeviceInformationParser { - private static final String UTF_8 = "UTF-8"; private static final double DELTA = 1e-6; @Rule @@ -40,7 +40,7 @@ public class TestGpuDeviceInformationParser { @Test public void testParse() throws IOException, YarnException { File f = new File("src/test/resources/nvidia-smi-sample-output.xml"); - String s = FileUtils.readFileToString(f, UTF_8); + String s = FileUtils.readFileToString(f, StandardCharsets.UTF_8); GpuDeviceInformationParser parser = new GpuDeviceInformationParser(); GpuDeviceInformation info = parser.parseXml(s); @@ -54,7 +54,7 @@ public void testParse() throws IOException, YarnException { @Test public void testParseExcerpt() throws IOException, YarnException { File f = new File("src/test/resources/nvidia-smi-output-excerpt.xml"); - String s = FileUtils.readFileToString(f, UTF_8); + String s = FileUtils.readFileToString(f, StandardCharsets.UTF_8); GpuDeviceInformationParser parser = new GpuDeviceInformationParser(); GpuDeviceInformation info = parser.parseXml(s); @@ -69,7 +69,7 @@ public void testParseExcerpt() throws IOException, YarnException { public void testParseConsecutivelyWithSameParser() throws IOException, YarnException { File f = new File("src/test/resources/nvidia-smi-sample-output.xml"); - String s = FileUtils.readFileToString(f, UTF_8); + String s = FileUtils.readFileToString(f, StandardCharsets.UTF_8); for (int i = 0; i < 3; i++) { GpuDeviceInformationParser parser = new GpuDeviceInformationParser(); @@ -99,7 +99,7 @@ public void testParseInvalidRootElement() throws YarnException { @Test public void testParseMissingTags() throws IOException, YarnException { File f = new File("src/test/resources/nvidia-smi-output-missing-tags.xml"); - String s = FileUtils.readFileToString(f, UTF_8); + String s = FileUtils.readFileToString(f, StandardCharsets.UTF_8); GpuDeviceInformationParser parser = new GpuDeviceInformationParser(); GpuDeviceInformation info = parser.parseXml(s); @@ -119,7 +119,7 @@ public void testParseMissingTags() throws IOException, YarnException { @Test public void testParseMissingInnerTags() throws IOException, YarnException { File f =new File("src/test/resources/nvidia-smi-output-missing-tags2.xml"); - String s = FileUtils.readFileToString(f, UTF_8); + String s = FileUtils.readFileToString(f, StandardCharsets.UTF_8); GpuDeviceInformationParser parser = new GpuDeviceInformationParser(); GpuDeviceInformation info = parser.parseXml(s); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java index 1b3a49433fd20..4d81940198372 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAuditLogger.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.yarn.server.resourcemanager; -import java.io.UnsupportedEncodingException; import java.net.InetAddress; +import java.nio.charset.StandardCharsets; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -160,12 +160,8 @@ private static void appendCallerContext(StringBuilder sb, CallerContext callerCo } if (signature != null) { - try { - String sigStr = new String(signature, "UTF-8"); - add(Keys.CALLERSIGNATURE, sigStr, sb); - } catch (UnsupportedEncodingException e) { - // ignore this signature - } + String sigStr = new String(signature, StandardCharsets.UTF_8); + add(Keys.CALLERSIGNATURE, sigStr, sb); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMInfo.java index 84d49cd25b36d..ce9af99854a7e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMInfo.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.yarn.server.resourcemanager; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; @@ -31,7 +31,7 @@ * JMX bean for RM info. */ public class RMInfo implements RMInfoMXBean { - private static final Log LOG = LogFactory.getLog(RMNMInfo.class); + private static final Logger LOG = LoggerFactory.getLogger(RMNMInfo.class); private ResourceManager resourceManager; private ObjectName rmStatusBeanName; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 55c8ffb3c0f39..2075b1b4cadc9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -144,7 +144,7 @@ import java.net.InetSocketAddress; import java.net.URI; import java.net.URL; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.security.PrivilegedExceptionAction; import java.security.SecureRandom; import java.util.ArrayList; @@ -415,7 +415,7 @@ public ZKCuratorManager createAndStartZKManager(Configuration String defaultFencingAuth = zkRootNodeUsername + ":" + zkRootNodePassword; byte[] defaultFencingAuthData = - defaultFencingAuth.getBytes(Charset.forName("UTF-8")); + defaultFencingAuth.getBytes(StandardCharsets.UTF_8); String scheme = new DigestAuthenticationProvider().getScheme(); AuthInfo authInfo = new AuthInfo(scheme, defaultFencingAuthData); authInfos.add(authInfo); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java index 2ea44d2aa67fb..756c0b7d27cf7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/invariants/MetricsInvariantChecker.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.yarn.server.resourcemanager.monitor.invariants; -import org.apache.hadoop.thirdparty.com.google.common.base.Charsets; import org.apache.hadoop.thirdparty.com.google.common.io.Files; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.AbstractMetric; @@ -39,6 +38,7 @@ import javax.script.SimpleBindings; import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -113,7 +113,7 @@ public void init(Configuration config, RMContext rmContext, StringBuilder sb = new StringBuilder(); try { List tempInv = - Files.readLines(new File(invariantFile), Charsets.UTF_8); + Files.readLines(new File(invariantFile), StandardCharsets.UTF_8); boolean first = true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java index 92b36147b7216..14509d10fe54b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index a00e8c894729e..ddf4ba6a61b7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -91,7 +91,7 @@ import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ConcurrentHashMultiset; /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 9d9deddffb049..07f80b58b11e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -163,7 +163,7 @@ import org.apache.hadoop.yarn.util.resource.Resources; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.QUEUE_MAPPING; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 0b06feae18170..e85eedebff36d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -19,7 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.util.Lists; import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.SettableFuture; import org.slf4j.Logger; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java index a31434b3adc6c..ff58948bbfb01 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/JAXBContextResolver.java @@ -29,8 +29,8 @@ import javax.ws.rs.ext.Provider; import javax.xml.bind.JAXBContext; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UserInfo; @@ -41,8 +41,8 @@ @Provider public class JAXBContextResolver implements ContextResolver { - private static final Log LOG = - LogFactory.getLog(JAXBContextResolver.class.getName()); + private static final Logger LOG = + LoggerFactory.getLogger(JAXBContextResolver.class.getName()); private final Map typesContextMap; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java index 0c8f742fae246..1799735ce3493 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesAppsModification.java @@ -30,6 +30,7 @@ import java.io.StringReader; import java.io.StringWriter; import java.net.URI; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -925,7 +926,7 @@ public void testAppSubmit(String acceptMedia, String contentMedia) Text key = new Text("secret1"); assertTrue("Secrets missing from credentials object", cs .getAllSecretKeys().contains(key)); - assertEquals("mysecret", new String(cs.getSecretKey(key), "UTF-8")); + assertEquals("mysecret", new String(cs.getSecretKey(key), StandardCharsets.UTF_8)); // Check LogAggregationContext ApplicationSubmissionContext asc = app.getApplicationSubmissionContext(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLogInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLogInfo.java index bccf4b844069c..3d6898a518240 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLogInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timeline-pluginstorage/src/test/java/org/apache/hadoop/yarn/server/timeline/TestLogInfo.java @@ -20,6 +20,7 @@ import com.fasterxml.jackson.core.JsonGenerator; import com.fasterxml.jackson.core.util.MinimalPrettyPrinter; import com.fasterxml.jackson.databind.ObjectMapper; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileContext; @@ -40,7 +41,6 @@ import java.io.IOException; import java.io.OutputStream; -import java.nio.charset.Charset; import java.util.EnumSet; import static org.junit.Assert.assertEquals; @@ -217,7 +217,7 @@ private void writeBrokenFile(Path logPath) throws IOException { try { String broken = "{ broken { [[]} broken"; out = PluginStoreTestUtils.createLogFile(logPath, fs); - out.write(broken.getBytes(Charset.forName("UTF-8"))); + out.write(broken.getBytes(StandardCharsets.UTF_8)); out.close(); out = null; } finally { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml index 2f1c96072caad..3337d1551b503 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/pom.xml @@ -126,6 +126,10 @@ commons-beanutils commons-beanutils + + log4j + log4j + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/test/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/DocumentStoreTestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/test/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/DocumentStoreTestUtils.java index 8e5cf93d587e1..e004cdb592f54 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/test/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/DocumentStoreTestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-documentstore/src/test/java/org/apache/hadoop/yarn/server/timelineservice/documentstore/DocumentStoreTestUtils.java @@ -51,7 +51,7 @@ public static List bakeYarnAppTimelineEntities() throws IOException { String jsonStr = IOUtils.toString( DocumentStoreTestUtils.class.getClassLoader().getResourceAsStream( - "documents/test-timeline-entities-doc.json"), "UTF-8"); + "documents/test-timeline-entities-doc.json"), StandardCharsets.UTF_8); return JsonUtils.fromJson(jsonStr, new TypeReference>() {}); } @@ -60,7 +60,7 @@ public static TimelineEntityDocument bakeTimelineEntityDoc() throws IOException { String jsonStr = IOUtils.toString( DocumentStoreTestUtils.class.getClassLoader().getResourceAsStream( - "documents/timeline-app-doc.json"), "UTF-8"); + "documents/timeline-app-doc.json"), StandardCharsets.UTF_8); return JsonUtils.fromJson(jsonStr, new TypeReference() {}); } @@ -68,7 +68,7 @@ public static TimelineEntityDocument bakeTimelineEntityDoc() public static FlowActivityDocument bakeFlowActivityDoc() throws IOException { String jsonStr = IOUtils.toString( DocumentStoreTestUtils.class.getClassLoader().getResourceAsStream( - "documents/flowactivity-doc.json"), "UTF-8"); + "documents/flowactivity-doc.json"), StandardCharsets.UTF_8); return JsonUtils.fromJson(jsonStr, new TypeReference() {}); } @@ -76,7 +76,7 @@ public static FlowActivityDocument bakeFlowActivityDoc() throws IOException { public static FlowRunDocument bakeFlowRunDoc() throws IOException { String jsonStr = IOUtils.toString( DocumentStoreTestUtils.class.getClassLoader().getResourceAsStream( - "documents/flowrun-doc.json"), "UTF-8"); + "documents/flowrun-doc.json"), StandardCharsets.UTF_8); return JsonUtils.fromJson(jsonStr, new TypeReference(){}); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml index 84b1aa2967e6a..f0473d5bf89e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/pom.xml @@ -207,6 +207,14 @@ org.eclipse.jetty jetty-http + + org.checkerframework + checker-qual + + + org.javassist + javassist + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java index 6b492c3553e99..f9a063a8c5738 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/ApplicationEntityReader.java @@ -58,7 +58,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.TimelineStorageUtils; import org.apache.hadoop.yarn.webapp.BadRequestException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Timeline entity reader for application entities that are stored in the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java index 59526f43d2003..a71001df35953 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/EntityTypeReader.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.yarn.server.timelineservice.storage.reader; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java index 04810ac6b2b4d..baff86c2a08f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowActivityEntityReader.java @@ -46,7 +46,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTableRW; import org.apache.hadoop.yarn.webapp.BadRequestException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Timeline entity reader for flow activity entities that are stored in the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java index 5b2e642634dd4..dfe3775f26eb6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/FlowRunEntityReader.java @@ -57,7 +57,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowRunTableRW; import org.apache.hadoop.yarn.webapp.BadRequestException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Timeline entity reader for flow run entities that are stored in the flow run diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java index 8a701e5ffdf7f..a09b4493ffe9e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/GenericEntityReader.java @@ -63,7 +63,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.entity.EntityTableRW; import org.apache.hadoop.yarn.webapp.BadRequestException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; /** * Timeline entity reader for generic entities that are stored in the entity diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java index 08651e158c458..e769d61cc0588 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-client/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/reader/SubApplicationEntityReader.java @@ -56,7 +56,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.subapplication.SubApplicationTableRW; import org.apache.hadoop.yarn.webapp.BadRequestException; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; class SubApplicationEntityReader extends GenericEntityReader { private static final SubApplicationTableRW SUB_APPLICATION_TABLE = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml index 044586cde290b..37507c75176e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-server/hadoop-yarn-server-timelineservice-hbase-server-2/pom.xml @@ -156,6 +156,14 @@ org.eclipse.jetty jetty-http + + org.checkerframework + checker-qual + + + org.javassist + javassist + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java index bb89b7967ad58..a9ae2b5734409 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/collector/AppLevelTimelineCollector.java @@ -30,7 +30,7 @@ import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; import org.apache.hadoop.classification.VisibleForTesting; -import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions; +import org.apache.hadoop.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java index 4e6d22c37a5f4..89979ea3c039a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineReaderImpl.java @@ -23,7 +23,7 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStreamReader; -import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; import java.util.Comparator; import java.util.EnumSet; import java.util.HashSet; @@ -174,7 +174,7 @@ private String getFlowRunPath(String userId, String clusterId, APP_FLOW_MAPPING_FILE); try (BufferedReader reader = new BufferedReader(new InputStreamReader( - fs.open(appFlowMappingFilePath), Charset.forName("UTF-8"))); + fs.open(appFlowMappingFilePath), StandardCharsets.UTF_8)); CSVParser parser = new CSVParser(reader, csvFormat)) { for (CSVRecord record : parser.getRecords()) { if (record.size() < 4) { @@ -299,7 +299,7 @@ public int compare(Long l1, Long l2) { } try (BufferedReader reader = new BufferedReader( new InputStreamReader(fs.open(entityFile), - Charset.forName("UTF-8")))) { + StandardCharsets.UTF_8))) { TimelineEntity entity = readEntityFromFile(reader); if (!entity.getType().equals(entityType)) { continue; @@ -399,7 +399,7 @@ public TimelineEntity getEntity(TimelineReaderContext context, try (BufferedReader reader = new BufferedReader(new InputStreamReader( - fs.open(entityFilePath), Charset.forName("UTF-8")))) { + fs.open(entityFilePath), StandardCharsets.UTF_8))) { TimelineEntity entity = readEntityFromFile(reader); return createEntityToBeReturned( entity, dataToRetrieve.getFieldsToRetrieve()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java index d5c70a0607ebb..d6edf50133375 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/FileSystemTimelineWriterImpl.java @@ -20,6 +20,7 @@ import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -138,7 +139,7 @@ private synchronized void writeInternal(String clusterId, String userId, byte[] record = new StringBuilder() .append(TimelineUtils.dumpTimelineRecordtoJSON(entity)) - .append("\n").toString().getBytes("UTF-8"); + .append("\n").toString().getBytes(StandardCharsets.UTF_8); writeFileWithRetries(filePath, record); } catch (Exception ioe) { LOG.warn("Interrupted operation:" + ioe.getMessage()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java index c0187ea08d367..8a3666f11265d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java @@ -30,6 +30,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; import java.util.Arrays; import java.util.EnumSet; import java.util.Enumeration; @@ -283,7 +284,7 @@ private void proxyLink(final HttpServletRequest req, StringBuilder sb = new StringBuilder(); BufferedReader reader = new BufferedReader( - new InputStreamReader(req.getInputStream(), "UTF-8")); + new InputStreamReader(req.getInputStream(), StandardCharsets.UTF_8)); String line; while ((line = reader.readLine()) != null) { sb.append(line); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java index 6c8993f6e80b7..acfd21eb3f85c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java @@ -38,6 +38,7 @@ import java.net.URI; import java.net.URL; import java.net.SocketTimeoutException; +import java.nio.charset.StandardCharsets; import java.util.Collections; import java.util.Enumeration; import java.util.List; @@ -510,7 +511,7 @@ private String readInputStream(InputStream input) throws Exception { while ((read = input.read(buffer)) >= 0) { data.write(buffer, 0, read); } - return new String(data.toByteArray(), "UTF-8"); + return new String(data.toByteArray(), StandardCharsets.UTF_8); } private boolean isResponseCookiePresent(HttpURLConnection proxyConn,