diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/CheckedRunnable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/CheckedRunnable.java new file mode 100644 index 000000000000..bc1ab873fb12 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/CheckedRunnable.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements.  See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership.  The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License.  You may obtain a copy of the License at + * + *      http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import java.io.IOException; + +/** + * This presents a block of code with a possibility of throwing an IOException. + */ +@FunctionalInterface +public interface CheckedRunnable { + void run() throws E; +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/CheckedSupplier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/CheckedSupplier.java new file mode 100644 index 000000000000..1a2bd48e7ba0 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/CheckedSupplier.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements.  See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership.  The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License.  You may obtain a copy of the License at + * + *      http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import java.io.IOException; + +/** + * Similar to {@link java.util.function.Supplier}, this class presents a block + * of code generating a value with a possibility of throwing an IOException. + */ +@FunctionalInterface +public interface CheckedSupplier { + T get() throws E; +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/MetricUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/MetricUtil.java new file mode 100644 index 000000000000..48dfc7a533b0 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/MetricUtil.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements.  See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership.  The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License.  You may obtain a copy of the License at + * + *      http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.util; + +import org.apache.hadoop.metrics2.lib.MutableRate; + +import java.io.IOException; + +/** + * Encloses helpers to deal with metrics. + */ +public final class MetricUtil { + private MetricUtil() { + } + + public static T captureLatencyNs( + MutableRate metric, + CheckedSupplier block) throws E { + long start = Time.monotonicNowNanos(); + try { + return block.get(); + } finally { + metric.add(Time.monotonicNowNanos() - start); + } + } + + public static void captureLatencyNs( + MutableRate metric, + CheckedRunnable block) throws IOException { + long start = Time.monotonicNowNanos(); + try { + block.run(); + } finally { + metric.add(Time.monotonicNowNanos() - start); + } + } +} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java index 93d5f714222d..98acf63746cc 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java @@ -162,7 +162,7 @@ private void fetchBlocks() throws IOException { OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volume.getName()) .setBucketName(bucket.getName()).setKeyName(keyName) - .setRefreshPipeline(true).setSortDatanodesInPipeline(true) + .setSortDatanodesInPipeline(true) .setLatestVersionLocation(true).build(); keyInfo = ozoneManagerClient.lookupKey(keyArgs); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index a588859f1cf3..876ed2ade13d 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1242,7 +1242,6 @@ public OzoneInputStream getKey( .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true) .setSortDatanodesInPipeline(topologyAwareReadEnabled) .setLatestVersionLocation(getLatestVersionLocation) .build(); @@ -1266,7 +1265,6 @@ public OzoneInputStream getKey( .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true) .setSortDatanodesInPipeline(topologyAwareReadEnabled) .build(); @@ -1412,7 +1410,6 @@ public OzoneKeyDetails getKeyDetails( .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true) .setSortDatanodesInPipeline(topologyAwareReadEnabled) .setLatestVersionLocation(getLatestVersionLocation) .build(); @@ -1644,7 +1641,6 @@ public OzoneFileStatus getOzoneFileStatus(String volumeName, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true) .setSortDatanodesInPipeline(topologyAwareReadEnabled) .setLatestVersionLocation(getLatestVersionLocation) .build(); @@ -1702,7 +1698,6 @@ private OzoneInputStream getInputStreamWithRetryFunction( .setVolumeName(omKeyInfo.getVolumeName()) .setBucketName(omKeyInfo.getBucketName()) .setKeyName(omKeyInfo.getKeyName()) - .setRefreshPipeline(true) .setSortDatanodesInPipeline(topologyAwareReadEnabled) .setLatestVersionLocation(getLatestVersionLocation) .build(); @@ -1747,7 +1742,6 @@ private OmKeyArgs prepareOmKeyArgs(String volumeName, String bucketName, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true) .setSortDatanodesInPipeline(topologyAwareReadEnabled) .setLatestVersionLocation(getLatestVersionLocation) .build(); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 65a2f67883c9..53e64a6199d9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -277,6 +277,7 @@ public static boolean isReadOnly( // write to OM DB. And therefore it doesn't need a OMClientRequest. // Although indirectly the Ranger sync service task could invoke write // operation SetRangerServiceVersion. + case GetKeyInfo: return true; case CreateVolume: case SetVolumeProperty: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyInfoWithVolumeContext.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyInfoWithVolumeContext.java new file mode 100644 index 000000000000..217a543d90ee --- /dev/null +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyInfoWithVolumeContext.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.helpers; + +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoResponse; + +import java.io.IOException; +import java.util.Optional; + +/** + * Encloses a {@link OmKeyInfo} and optionally a volume context. + */ +public class KeyInfoWithVolumeContext { + /** + * Volume arguments. + */ + private final Optional volumeArgs; + + /** + * To be used for client-side operations involving KMS like getDEK(). + */ + private final Optional userPrincipal; + + private final OmKeyInfo keyInfo; + + public KeyInfoWithVolumeContext(OmVolumeArgs volumeArgs, + String userPrincipal, + OmKeyInfo keyInfo) { + this.volumeArgs = Optional.ofNullable(volumeArgs); + this.userPrincipal = Optional.ofNullable(userPrincipal); + this.keyInfo = keyInfo; + } + + public static KeyInfoWithVolumeContext fromProtobuf( + GetKeyInfoResponse proto) throws IOException { + return newBuilder() + .setVolumeArgs(proto.hasVolumeInfo() ? + OmVolumeArgs.getFromProtobuf(proto.getVolumeInfo()) : null) + .setUserPrincipal(proto.getUserPrincipal()) + .setKeyInfo(OmKeyInfo.getFromProtobuf(proto.getKeyInfo())) + .build(); + } + + public GetKeyInfoResponse toProtobuf(int clientVersion) { + GetKeyInfoResponse.Builder builder = GetKeyInfoResponse.newBuilder(); + volumeArgs.ifPresent(v -> builder.setVolumeInfo(v.getProtobuf())); + userPrincipal.ifPresent(builder::setUserPrincipal); + builder.setKeyInfo(keyInfo.getProtobuf(clientVersion)); + return builder.build(); + } + + public OmKeyInfo getKeyInfo() { + return keyInfo; + } + + public Optional getVolumeArgs() { + return volumeArgs; + } + + public Optional getUserPrincipal() { + return userPrincipal; + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder for KeyInfoWithVolumeContext. + */ + public static class Builder { + private OmVolumeArgs volumeArgs; + private String userPrincipal; + private OmKeyInfo keyInfo; + + public Builder setVolumeArgs(OmVolumeArgs volumeArgs) { + this.volumeArgs = volumeArgs; + return this; + } + + public Builder setUserPrincipal(String userPrincipal) { + this.userPrincipal = userPrincipal; + return this; + } + + public Builder setKeyInfo(OmKeyInfo keyInfo) { + this.keyInfo = keyInfo; + return this; + } + + public KeyInfoWithVolumeContext build() { + return new KeyInfoWithVolumeContext(volumeArgs, userPrincipal, keyInfo); + } + } +} diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index 9e0ba115fc82..fead19f64517 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -21,6 +21,8 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.Auditable; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.jetbrains.annotations.NotNull; import java.util.ArrayList; import java.util.HashMap; @@ -43,21 +45,22 @@ public final class OmKeyArgs implements Auditable { private final String multipartUploadID; private final int multipartUploadPartNumber; private Map metadata; - private boolean refreshPipeline; private boolean sortDatanodesInPipeline; private List acls; private boolean latestVersionLocation; private boolean recursive; private boolean headOp; + private boolean forceUpdateContainerCacheFromSCM; @SuppressWarnings("parameternumber") private OmKeyArgs(String volumeName, String bucketName, String keyName, long dataSize, ReplicationConfig replicationConfig, List locationInfoList, boolean isMultipart, String uploadID, int partNumber, - Map metadataMap, boolean refreshPipeline, + Map metadataMap, List acls, boolean sortDatanode, - boolean latestVersionLocation, boolean recursive, boolean headOp) { + boolean latestVersionLocation, boolean recursive, boolean headOp, + boolean forceUpdateContainerCacheFromSCM) { this.volumeName = volumeName; this.bucketName = bucketName; this.keyName = keyName; @@ -68,12 +71,12 @@ private OmKeyArgs(String volumeName, String bucketName, String keyName, this.multipartUploadID = uploadID; this.multipartUploadPartNumber = partNumber; this.metadata = metadataMap; - this.refreshPipeline = refreshPipeline; this.acls = acls; this.sortDatanodesInPipeline = sortDatanode; this.latestVersionLocation = latestVersionLocation; this.recursive = recursive; this.headOp = headOp; + this.forceUpdateContainerCacheFromSCM = forceUpdateContainerCacheFromSCM; } public boolean getIsMultipartKey() { @@ -132,10 +135,6 @@ public List getLocationInfoList() { return locationInfoList; } - public boolean getRefreshPipeline() { - return refreshPipeline; - } - public boolean getSortDatanodes() { return sortDatanodesInPipeline; } @@ -152,6 +151,10 @@ public boolean isHeadOp() { return headOp; } + public boolean isForceUpdateContainerCacheFromSCM() { + return forceUpdateContainerCacheFromSCM; + } + @Override public Map toAuditMap() { Map auditMap = new LinkedHashMap<>(); @@ -185,11 +188,26 @@ public OmKeyArgs.Builder toBuilder() { .setMultipartUploadID(multipartUploadID) .setMultipartUploadPartNumber(multipartUploadPartNumber) .addAllMetadata(metadata) - .setRefreshPipeline(refreshPipeline) .setSortDatanodesInPipeline(sortDatanodesInPipeline) .setHeadOp(headOp) .setLatestVersionLocation(latestVersionLocation) - .setAcls(acls); + .setAcls(acls) + .setForceUpdateContainerCacheFromSCM(forceUpdateContainerCacheFromSCM); + } + + @NotNull + public KeyArgs toProtobuf() { + return KeyArgs.newBuilder() + .setVolumeName(getVolumeName()) + .setBucketName(getBucketName()) + .setKeyName(getKeyName()) + .setDataSize(getDataSize()) + .setSortDatanodes(getSortDatanodes()) + .setLatestVersionLocation(getLatestVersionLocation()) + .setHeadOp(isHeadOp()) + .setForceUpdateContainerCacheFromSCM( + isForceUpdateContainerCacheFromSCM()) + .build(); } /** @@ -206,12 +224,12 @@ public static class Builder { private String multipartUploadID; private int multipartUploadPartNumber; private Map metadata = new HashMap<>(); - private boolean refreshPipeline; private boolean sortDatanodesInPipeline; private boolean latestVersionLocation; private List acls; private boolean recursive; private boolean headOp; + private boolean forceUpdateContainerCacheFromSCM; public Builder setVolumeName(String volume) { this.volumeName = volume; @@ -273,11 +291,6 @@ public Builder addAllMetadata(Map metadatamap) { return this; } - public Builder setRefreshPipeline(boolean refresh) { - this.refreshPipeline = refresh; - return this; - } - public Builder setSortDatanodesInPipeline(boolean sort) { this.sortDatanodesInPipeline = sort; return this; @@ -298,12 +311,18 @@ public Builder setHeadOp(boolean isHeadOp) { return this; } + public Builder setForceUpdateContainerCacheFromSCM(boolean value) { + this.forceUpdateContainerCacheFromSCM = value; + return this; + } + public OmKeyArgs build() { return new OmKeyArgs(volumeName, bucketName, keyName, dataSize, replicationConfig, locationInfoList, isMultipartKey, multipartUploadID, - multipartUploadPartNumber, metadata, refreshPipeline, acls, - sortDatanodesInPipeline, latestVersionLocation, recursive, headOp); + multipartUploadPartNumber, metadata, acls, + sortDatanodesInPipeline, latestVersionLocation, recursive, headOp, + forceUpdateContainerCacheFromSCM); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index aa69da3ca690..40f515adc448 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.DBUpdates; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; +import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmDeleteKeys; @@ -272,6 +273,18 @@ default OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, */ OmKeyInfo lookupKey(OmKeyArgs args) throws IOException; + /** + * Lookup for the container of an existing key. + * + * @param args the args of the key. + * @param assumeS3Context if true OM will automatically lookup the S3 + * volume context and includes in the response. + * @return KeyInfoWithVolumeContext includes info that client uses to talk + * to containers and S3 volume context info if assumeS3Context is set. + */ + KeyInfoWithVolumeContext getKeyInfo(OmKeyArgs args, boolean assumeS3Context) + throws IOException; + /** * Rename an existing key within a bucket. * @param args the args of the key. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index e8b12a3389ef..7a248169cd44 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.DBUpdates; import org.apache.hadoop.ozone.om.helpers.DeleteTenantState; +import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -101,6 +102,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetDelegationTokenResponseProto; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3SecretResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetS3VolumeContextRequest; @@ -774,20 +777,10 @@ public void commitKey(OmKeyArgs args, long clientId) } - @Override public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { LookupKeyRequest.Builder req = LookupKeyRequest.newBuilder(); - KeyArgs keyArgs = KeyArgs.newBuilder() - .setVolumeName(args.getVolumeName()) - .setBucketName(args.getBucketName()) - .setKeyName(args.getKeyName()) - .setDataSize(args.getDataSize()) - .setSortDatanodes(args.getSortDatanodes()) - .setLatestVersionLocation(args.getLatestVersionLocation()) - .setHeadOp(args.isHeadOp()) - .build(); - req.setKeyArgs(keyArgs); + req.setKeyArgs(args.toProtobuf()); OMRequest omRequest = createOMRequest(Type.LookupKey) .setLookupKeyRequest(req) @@ -799,6 +792,23 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { return OmKeyInfo.getFromProtobuf(resp.getKeyInfo()); } + @Override + public KeyInfoWithVolumeContext getKeyInfo(OmKeyArgs args, + boolean assumeS3Context) + throws IOException { + GetKeyInfoRequest.Builder req = GetKeyInfoRequest.newBuilder(); + req.setKeyArgs(args.toProtobuf()); + req.setAssumeS3Context(assumeS3Context); + + OMRequest omRequest = createOMRequest(Type.GetKeyInfo) + .setGetKeyInfoRequest(req) + .build(); + + GetKeyInfoResponse resp = + handleError(submitRequest(omRequest)).getGetKeyInfoResponse(); + return KeyInfoWithVolumeContext.fromProtobuf(resp); + } + @Override @Deprecated public void renameKeys(OmRenameKeys omRenameKeys) throws IOException { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index 6b003122adf3..48d152e8da1c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -89,7 +89,6 @@ public Map createKeys(int numOfKeys, int keySize) .setVolumeName(bucket.getVolumeName()) .setBucketName(bucket.getName()) .setKeyName(key) - .setRefreshPipeline(true) .build(); OmKeyInfo location = cluster.getOzoneManager() .lookupKey(arg); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java index 3374fa3969ae..8aa27c29e493 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java @@ -132,7 +132,6 @@ public void testBCSID() throws Exception { RatisReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setKeyName("ratis") - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); List keyLocationInfos = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java index 816aae70e0ba..d14dda2ef208 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java @@ -145,7 +145,6 @@ public void testBlockWritesWithFlushAndClose() throws Exception { .setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(ONE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); waitForContainerClose(key); @@ -179,7 +178,6 @@ public void testBlockWritesCloseConsistency() throws Exception { .setBucketName(bucketName) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); waitForContainerClose(key); @@ -214,7 +212,6 @@ public void testMultiBlockWrites() throws Exception { .setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(ONE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); waitForContainerClose(key); @@ -275,7 +272,6 @@ public void testMultiBlockWrites2() throws Exception { .setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); waitForContainerClose(key); @@ -320,7 +316,6 @@ public void testMultiBlockWrites3() throws Exception { .setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); waitForContainerClose(key); @@ -383,7 +378,6 @@ public void testBlockWriteViaRatis() throws Exception { setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); @@ -418,7 +412,6 @@ public void testBlockWrites() throws Exception { .setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(ONE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); waitForContainerClose(key); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index ea881b03439d..664b71e0b286 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -781,7 +781,6 @@ private void validateData(String key, int locationCount, String payload) { .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(key) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = null; try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index e5c18d5e4f93..6fa5324af606 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -196,7 +196,6 @@ public void testBlockWritesWithDnFailures() throws Exception { .setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); @@ -339,7 +338,6 @@ public void testWriteSmallFile() throws Exception { .setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); @@ -401,7 +399,6 @@ public void testContainerExclusionWithClosedContainerException() .setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); @@ -466,7 +463,6 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception { .setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); @@ -530,7 +526,6 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { .setBucketName(bucketName) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index 29793b21de4f..7244970ead9c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -217,7 +217,6 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { RatisReplicationConfig .getInstance(HddsProtos.ReplicationFactor.THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index f932dc8e7b58..d6e550116ee6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -181,7 +181,6 @@ public void testMultiBlockWritesWithDnFailures() throws Exception { RatisReplicationConfig .getInstance(HddsProtos.ReplicationFactor.THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); Assert.assertEquals(2 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); @@ -234,7 +233,6 @@ public void testMultiBlockWritesWithIntermittentDnFailures() RatisReplicationConfig .getInstance(HddsProtos.ReplicationFactor.THREE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); Assert.assertEquals(4 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 3480738d8fe3..563e628f94c7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -382,7 +382,6 @@ private boolean verifyRatisReplication(String volumeName, String bucketName, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); HddsProtos.ReplicationType replicationType = HddsProtos.ReplicationType.valueOf(type.toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index d3729fd1131d..a6566141d797 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -855,7 +855,6 @@ private void verifyReplication(String volumeName, String bucketName, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = ozoneManager.lookupKey(keyArgs); for (OmKeyLocationInfo info: @@ -1350,7 +1349,7 @@ public void testValidateBlockLengthWithCommitKey() throws IOException { out.close(); OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); builder.setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setRefreshPipeline(true); + .setKeyName(keyName); OmKeyInfo keyInfo = ozoneManager.lookupKey(builder.build()); List locationInfoList = @@ -1620,7 +1619,7 @@ public void testGetKeyDetails() throws IOException { // First, confirm the key info from the client matches the info in OM. OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); builder.setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setRefreshPipeline(true); + .setKeyName(keyName); OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()). getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0); long containerID = keyInfo.getContainerID(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index 362a218af263..94bfaefcc014 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -110,7 +110,7 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { // equal to getClosestNode. OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); builder.setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setRefreshPipeline(true); + .setKeyName(keyName); // read key with topology aware read enabled try (OzoneInputStream is = bucket.readKey(keyName)) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java index 791a2267eac8..aeb6ff3de85f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java @@ -178,7 +178,7 @@ public void testPutKeyAndGetKeyThreeNodes() // First, confirm the key info from the client matches the info in OM. OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); builder.setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setRefreshPipeline(true); + .setKeyName(keyName); OmKeyLocationInfo keyInfo = ozoneManager.lookupKey(builder.build()). getKeyLocationVersions().get(0).getBlocksLatestVersionOnly().get(0); long containerID = keyInfo.getContainerID(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index 1e61d5ee8f80..82af642e5b5a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -315,7 +315,6 @@ private boolean verifyRatisReplication(String volumeName, String bucketName, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); HddsProtos.ReplicationType replicationType = HddsProtos.ReplicationType.valueOf(type.toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index d12a90e930b0..854ea997fdd6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -201,7 +201,6 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setKeyName(keyName).setDataSize(0) .setReplicationConfig(repConfig) - .setRefreshPipeline(true) .build(); List omKeyLocationInfoGroupList = om.lookupKey(keyArgs).getKeyLocationVersions(); @@ -312,7 +311,6 @@ public void testContainerStatisticsAfterDelete() throws Exception { .setReplicationConfig( RatisReplicationConfig .getInstance(HddsProtos.ReplicationFactor.THREE)) - .setRefreshPipeline(true) .build(); List omKeyLocationInfoGroupList = om.lookupKey(keyArgs).getKeyLocationVersions(); @@ -508,7 +506,6 @@ public void testBlockDeleteCommandParallelProcess() throws Exception { .setReplicationConfig( RatisReplicationConfig .getInstance(HddsProtos.ReplicationFactor.THREE)) - .setRefreshPipeline(true) .build(); writeClient.deleteKey(keyArgs); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index c1d4fa3e8274..6c5e897472a3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -120,7 +120,7 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception { new OmKeyArgs.Builder().setVolumeName("test").setBucketName("test") .setReplicationConfig(RatisReplicationConfig.getInstance(ONE)) .setDataSize(1024) - .setKeyName(keyName).setRefreshPipeline(true).build(); + .setKeyName(keyName).build(); OmKeyLocationInfo omKeyLocationInfo = cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); @@ -178,7 +178,6 @@ public void testCloseContainerViaStandAlone() .setReplicationConfig(RatisReplicationConfig.getInstance(ONE)) .setDataSize(1024) .setKeyName("standalone") - .setRefreshPipeline(true) .build(); OmKeyLocationInfo omKeyLocationInfo = @@ -236,7 +235,7 @@ public void testCloseContainerViaRatis() throws IOException, setBucketName("test") .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .setDataSize(1024) - .setKeyName("ratis").setRefreshPipeline(true).build(); + .setKeyName("ratis").build(); OmKeyLocationInfo omKeyLocationInfo = cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() @@ -302,7 +301,6 @@ public void testQuasiCloseTransitionViaRatis() .setReplicationConfig(RatisReplicationConfig.getInstance(ONE)) .setDataSize(1024) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyLocationInfo omKeyLocationInfo = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index 494e482c86b4..feb0adbaddfe 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -109,7 +109,6 @@ public void test() throws Exception { .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .setDataSize(1024) .setKeyName("test") - .setRefreshPipeline(true) .build(); OmKeyLocationInfo omKeyLocationInfo = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index a8aaf04fbdea..e39fe7ffd5f6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -280,7 +280,6 @@ private ContainerID getContainerID(String keyName) throws IOException { .setBucketName(bucketName) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyLocationInfo omKeyLocationInfo = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestDataScanner.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestDataScanner.java index 03aa39f91bbd..0075982eb9bb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestDataScanner.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scanner/TestDataScanner.java @@ -210,7 +210,6 @@ private boolean verifyRatisReplication(String volumeName, String bucketName, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); HddsProtos.ReplicationType replicationType = HddsProtos.ReplicationType.valueOf(type.toString()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index f3ad2a722211..12bba86b13eb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -123,7 +123,6 @@ public void testContainerReportKeyWrite() throws Exception { StandaloneReplicationConfig .getInstance(HddsProtos.ReplicationFactor.ONE)) .setDataSize(keySize) - .setRefreshPipeline(true) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java index cc6bceb5cbf8..0e23f941bd66 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java @@ -146,7 +146,7 @@ public void testUniqueTrxnIndexOnOMRestart() throws Exception { .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true).build()); + .build()); long keyTrxnIndex = OmUtils.getTxIdFromObjectId( omKeyInfo.getObjectID()); Assert.assertEquals(3, keyTrxnIndex); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index 4df9ac695b00..e41b15af78b5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -106,7 +106,6 @@ public void testAllocateCommit() throws Exception { .setBucketName(bucketName) .setKeyName(keyName) .setDataSize(1000) - .setRefreshPipeline(true) .setAcls(new ArrayList<>()) .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)) .build(); @@ -186,7 +185,6 @@ public void testReadLatestVersion() throws Exception { .setBucketName(bucketName) .setKeyName(keyName) .setDataSize(1000) - .setRefreshPipeline(true) .build(); String dataString = RandomStringUtils.randomAlphabetic(100); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java index fdf3c1982020..c4d29b991a88 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java @@ -182,7 +182,7 @@ public void testPutKey() throws Exception { .setReplicationConfig(RatisReplicationConfig.getInstance( HddsProtos.ReplicationFactor.ONE)) .setKeyName(keyName) - .setRefreshPipeline(true).build(); + .build(); final OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); final List keyLocationInfos = keyInfo.getKeyLocationVersions().get(0).getBlocksLatestVersionOnly(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java index b36a68191bfc..9b03f69614d9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java @@ -100,7 +100,7 @@ private void writeNumBytes(int numBytes) throws Exception { OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); builder.setVolumeName(volumeName).setBucketName(bucketName) - .setKeyName(keyName).setRefreshPipeline(true); + .setKeyName(keyName); OzoneKeyDetails keyDetails = bucket.getKey(keyName); Assertions.assertEquals(keyName, keyDetails.getName()); diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 88501120bf05..76f224b7d7ad 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -123,6 +123,7 @@ enum Type { SetRangerServiceVersion = 107; RangerBGSync = 109; EchoRPC = 110; + GetKeyInfo = 111; } message OMRequest { @@ -228,6 +229,7 @@ message OMRequest { optional SetRangerServiceVersionRequest SetRangerServiceVersionRequest = 107; optional RangerBGSyncRequest RangerBGSyncRequest = 109; optional EchoRPCRequest EchoRPCRequest = 110; + optional GetKeyInfoRequest GetKeyInfoRequest = 111; } message OMResponse { @@ -326,6 +328,7 @@ message OMResponse { optional SetRangerServiceVersionResponse SetRangerServiceVersionResponse = 107; optional RangerBGSyncResponse RangerBGSyncResponse = 109; optional EchoRPCResponse EchoRPCResponse = 110; + optional GetKeyInfoResponse GetKeyInfoResponse = 111; } enum Status { @@ -867,6 +870,8 @@ message KeyArgs { // When it is a head operation which is to check whether key exist optional bool headOp = 18; optional hadoop.hdds.ECReplicationConfig ecReplicationConfig = 19; + // Force OM to update container cache location from SCL + optional bool forceUpdateContainerCacheFromSCM = 20; } message KeyLocation { @@ -1053,6 +1058,17 @@ message LookupKeyResponse { optional uint64 openVersion = 4; } +message GetKeyInfoRequest { + required KeyArgs keyArgs = 1; + optional bool assumeS3Context = 2; +} + +message GetKeyInfoResponse { + optional KeyInfo keyInfo = 1; + optional VolumeInfo volumeInfo = 2; + optional string UserPrincipal = 3; +} + message RenameKeysRequest { required RenameKeysArgs renameKeysArgs = 1; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 8bcffa60d24f..e28179d4be65 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -64,6 +64,17 @@ public interface KeyManager extends OzoneManagerFS, IOzoneAcl { */ OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) throws IOException; + /** + * Return info of an existing key to client side to access to data on + * datanodes. + * @param args the args of the key provided by client. + * @param clientAddress a hint to key manager, order the datanode in returned + * pipeline by distance between client and datanode. + * @return a OmKeyInfo instance client uses to talk to container. + * @throws IOException + */ + OmKeyInfo getKeyInfo(OmKeyArgs args, String clientAddress) throws IOException; + /** * Returns a list of keys represented by {@link OmKeyInfo} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index cd6a0db4e8ae..81ffe464486a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.scm.storage.BlockLocationInfo; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.db.CodecRegistry; import org.apache.hadoop.hdds.utils.db.RDBStore; @@ -128,6 +129,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.SCM_GET_PIPELINE_EXCEPTION; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.VOLUME_NOT_FOUND; +import static org.apache.hadoop.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; import static org.apache.hadoop.util.Time.monotonicNow; @@ -321,10 +323,36 @@ public EncryptedKeyVersion run() throws IOException { public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) throws IOException { Preconditions.checkNotNull(args); + + OmKeyInfo value = captureLatencyNs(metrics.getLookupReadKeyInfoLatencyNs(), + () -> readKeyInfo(args)); + + // If operation is head, do not perform any additional steps based on flags. + // As head operation does not need any of those details. + if (!args.isHeadOp()) { + + // add block token for read. + captureLatencyNs(metrics.getLookupGenerateBlockTokenLatencyNs(), + () -> addBlockToken4Read(value)); + + // Refresh container pipeline info from SCM + // based on OmKeyArgs.refreshPipeline flag + // value won't be null as the check is done inside try/catch block. + captureLatencyNs(metrics.getLookupRefreshLocationLatencyNs(), + () -> refresh(value)); + + if (args.getSortDatanodes()) { + sortDatanodes(clientAddress, value); + } + } + + return value; + } + + private OmKeyInfo readKeyInfo(OmKeyArgs args) throws IOException { String volumeName = args.getVolumeName(); String bucketName = args.getBucketName(); String keyName = args.getKeyName(); - long start = Time.monotonicNowNanos(); metadataManager.getLock().acquireReadLock(BUCKET_LOCK, volumeName, bucketName); @@ -355,44 +383,17 @@ public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) metadataManager.getLock().releaseReadLock(BUCKET_LOCK, volumeName, bucketName); } - metrics.addLookupReadKeyInfoLatency(Time.monotonicNowNanos() - start); if (value == null) { if (LOG.isDebugEnabled()) { LOG.debug("volume:{} bucket:{} Key:{} not found", volumeName, - bucketName, keyName); + bucketName, keyName); } throw new OMException("Key:" + keyName + " not found", KEY_NOT_FOUND); } - - if (args.getLatestVersionLocation()) { slimLocationVersion(value); } - - // If operation is head, do not perform any additional steps based on flags. - // As head operation does not need any of those details. - if (!args.isHeadOp()) { - - // add block token for read. - start = Time.monotonicNowNanos(); - addBlockToken4Read(value); - metrics.addLookupGenerateBlockTokenLatency( - Time.monotonicNowNanos() - start); - - // Refresh container pipeline info from SCM - // based on OmKeyArgs.refreshPipeline flag - // value won't be null as the check is done inside try/catch block. - start = Time.monotonicNowNanos(); - refresh(value); - metrics.addLookupRefreshLocationLatency(Time.monotonicNowNanos() - start); - - if (args.getSortDatanodes()) { - sortDatanodes(clientAddress, value); - } - - } - return value; } @@ -1932,4 +1933,59 @@ public boolean isBucketFSOptimized(String volName, String buckName) } return false; } + + @Override + public OmKeyInfo getKeyInfo(OmKeyArgs args, String clientAddress) + throws IOException { + Preconditions.checkNotNull(args); + + OmKeyInfo value = captureLatencyNs( + metrics.getGetKeyInfoReadKeyInfoLatencyNs(), + () -> readKeyInfo(args)); + + // If operation is head, do not perform any additional steps based on flags. + // As head operation does not need any of those details. + if (!args.isHeadOp()) { + + // add block token for read. + captureLatencyNs(metrics.getGetKeyInfoGenerateBlockTokenLatencyNs(), + () -> addBlockToken4Read(value)); + + // get container pipeline info from cache. + captureLatencyNs(metrics.getGetKeyInfoRefreshLocationLatencyNs(), + () -> refreshPipelineFromCache(value, + args.isForceUpdateContainerCacheFromSCM())); + + if (args.getSortDatanodes()) { + sortDatanodes(clientAddress, value); + } + } + return value; + } + + protected void refreshPipelineFromCache(OmKeyInfo keyInfo, + boolean forceRefresh) + throws IOException { + Set containerIds = keyInfo.getKeyLocationVersions().stream() + .flatMap(v -> v.getLocationList().stream()) + .map(BlockLocationInfo::getContainerID) + .collect(Collectors.toSet()); + + Map containerLocations = + scmClient.getContainerLocations(containerIds, forceRefresh); + + for (OmKeyLocationInfoGroup key : keyInfo.getKeyLocationVersions()) { + for (List omKeyLocationInfoList : + key.getLocationLists()) { + for (OmKeyLocationInfo omKeyLocationInfo : omKeyLocationInfoList) { + Pipeline pipeline = containerLocations.get( + omKeyLocationInfo.getContainerID()); + if (pipeline != null && + !pipeline.equals(omKeyLocationInfo.getPipeline())) { + omKeyLocationInfo.setPipeline(pipeline); + } + } + } + } + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index b0ad24f5febc..fbf6fe27c373 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -83,6 +83,7 @@ public class OMMetrics { private @Metric MutableCounterLong numSetAcl; private @Metric MutableCounterLong numGetAcl; private @Metric MutableCounterLong numRemoveAcl; + private @Metric MutableCounterLong numGetKeyInfo; // Failure Metrics private @Metric MutableCounterLong numVolumeCreateFails; @@ -150,6 +151,7 @@ public class OMMetrics { private @Metric MutableCounterLong numCreateFileFails; private @Metric MutableCounterLong numLookupFileFails; private @Metric MutableCounterLong numListStatusFails; + private @Metric MutableCounterLong getNumGetKeyInfoFails; // Metrics for total amount of data written private @Metric MutableCounterLong totalDataCommitted; @@ -721,6 +723,15 @@ public void incNumRemoveAcl() { numRemoveAcl.incr(); } + public void incNumGetKeyInfo() { + numGetKeyInfo.incr(); + numKeyOps.incr(); + } + + public void incNumGetKeyInfoFails() { + getNumGetKeyInfoFails.incr(); + } + @VisibleForTesting public long getNumVolumeCreates() { return numVolumeCreates.value(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java index e86264a85e4f..870d4c1d5c7e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java @@ -58,6 +58,25 @@ public static void unregister() { @Metric(about = "resolveBucketLink latency nanoseconds") private MutableRate lookupResolveBucketLatencyNs; + + @Metric(about = "Overall getKeyInfo in nanoseconds") + private MutableRate getKeyInfoLatencyNs; + + @Metric(about = "Read key info from db in getKeyInfo") + private MutableRate getKeyInfoReadKeyInfoLatencyNs; + + @Metric(about = "Block token generation latency in getKeyInfo") + private MutableRate getKeyInfoGenerateBlockTokenLatencyNs; + + @Metric(about = "Refresh location latency in getKeyInfo") + private MutableRate getKeyInfoRefreshLocationLatencyNs; + + @Metric(about = "ACLs check in getKeyInfo") + private MutableRate getKeyInfoAclCheckLatencyNs; + + @Metric(about = "resolveBucketLink latency in getKeyInfo") + private MutableRate getKeyInfoResolveBucketLatencyNs; + @Metric(about = "s3VolumeInfo latency nanoseconds") private MutableRate s3VolumeContextLatencyNs; @@ -66,27 +85,52 @@ public void addLookupLatency(long latencyInNs) { lookupLatencyNs.add(latencyInNs); } - public void addLookupGenerateBlockTokenLatency(long latencyInNs) { - lookupGenerateBlockTokenLatencyNs.add(latencyInNs); + public MutableRate getLookupRefreshLocationLatencyNs() { + return lookupRefreshLocationLatencyNs; } - public void addLookupRefreshLocationLatency(long latencyInNs) { - lookupRefreshLocationLatencyNs.add(latencyInNs); + + public MutableRate getLookupGenerateBlockTokenLatencyNs() { + return lookupGenerateBlockTokenLatencyNs; } - public void addLookupAclCheckLatency(long latencyInNs) { - lookupAclCheckLatencyNs.add(latencyInNs); + public MutableRate getLookupReadKeyInfoLatencyNs() { + return lookupReadKeyInfoLatencyNs; } - public void addLookupReadKeyInfoLatency(long latencyInNs) { - lookupReadKeyInfoLatencyNs.add(latencyInNs); + public MutableRate getLookupAclCheckLatencyNs() { + return lookupAclCheckLatencyNs; } public void addS3VolumeContextLatencyNs(long latencyInNs) { s3VolumeContextLatencyNs.add(latencyInNs); } - public void addLookupResolveBucketLatencyNs(long latencyInNs) { - lookupResolveBucketLatencyNs.add(latencyInNs); + public MutableRate getLookupResolveBucketLatencyNs() { + return lookupResolveBucketLatencyNs; + } + + public void addGetKeyInfoLatencyNs(long value) { + getKeyInfoLatencyNs.add(value); + } + + public MutableRate getGetKeyInfoAclCheckLatencyNs() { + return getKeyInfoAclCheckLatencyNs; + } + + public MutableRate getGetKeyInfoGenerateBlockTokenLatencyNs() { + return getKeyInfoGenerateBlockTokenLatencyNs; + } + + public MutableRate getGetKeyInfoReadKeyInfoLatencyNs() { + return getKeyInfoReadKeyInfoLatencyNs; + } + + public MutableRate getGetKeyInfoRefreshLocationLatencyNs() { + return getKeyInfoRefreshLocationLatencyNs; + } + + public MutableRate getGetKeyInfoResolveBucketLatencyNs() { + return getKeyInfoResolveBucketLatencyNs; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 479a8c962730..ec6aa4a3a411 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -74,6 +74,7 @@ import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneManagerVersion; +import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.service.OMRangerBGSyncService; import org.apache.hadoop.ozone.util.OzoneNetUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -270,6 +271,7 @@ import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_REQUEST; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.PERMISSION_DENIED; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.TOKEN_ERROR_OTHER; +import static org.apache.hadoop.util.MetricUtil.captureLatencyNs; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.VOLUME_LOCK; import static org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus.LEADER_AND_READY; @@ -2815,25 +2817,25 @@ public OmBucketInfo getBucketInfo(String volume, String bucket) @Override public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { long start = Time.monotonicNowNanos(); - ResolvedBucket bucket = resolveBucketLink(args); - perfMetrics.addLookupResolveBucketLatencyNs( - Time.monotonicNowNanos() - start); + ResolvedBucket bucket = captureLatencyNs( + perfMetrics.getLookupResolveBucketLatencyNs(), + () -> resolveBucketLink(args)); if (isAclEnabled) { - long startAcl = Time.monotonicNowNanos(); - checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, - bucket.realVolume(), bucket.realBucket(), args.getKeyName()); - perfMetrics.addLookupAclCheckLatency(Time.monotonicNowNanos() - startAcl); + captureLatencyNs(perfMetrics.getLookupAclCheckLatencyNs(), + () -> checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, + bucket.realVolume(), bucket.realBucket(), args.getKeyName()) + ); } boolean auditSuccess = true; Map auditMap = bucket.audit(args.toAuditMap()); - args = bucket.update(args); + OmKeyArgs resolvedArgs = bucket.update(args); try { metrics.incNumKeyLookups(); - return keyManager.lookupKey(args, getClientAddress()); + return keyManager.lookupKey(resolvedArgs, getClientAddress()); } catch (Exception ex) { metrics.incNumKeyLookupFails(); auditSuccess = false; @@ -2850,6 +2852,67 @@ public OmKeyInfo lookupKey(OmKeyArgs args) throws IOException { } } + @Override + public KeyInfoWithVolumeContext getKeyInfo(final OmKeyArgs args, + boolean assumeS3Context) + throws IOException { + long start = Time.monotonicNowNanos(); + + java.util.Optional s3VolumeContext = + java.util.Optional.empty(); + + final OmKeyArgs resolvedVolumeArgs; + if (assumeS3Context) { + S3VolumeContext context = getS3VolumeContext(); + s3VolumeContext = java.util.Optional.of(context); + resolvedVolumeArgs = args.toBuilder() + .setVolumeName(context.getOmVolumeArgs().getVolume()) + .build(); + } else { + resolvedVolumeArgs = args; + } + + final ResolvedBucket bucket = captureLatencyNs( + perfMetrics.getGetKeyInfoResolveBucketLatencyNs(), + () -> resolveBucketLink(resolvedVolumeArgs)); + + if (isAclEnabled) { + captureLatencyNs(perfMetrics.getGetKeyInfoAclCheckLatencyNs(), () -> + checkAcls(ResourceType.KEY, StoreType.OZONE, ACLType.READ, + bucket.realVolume(), bucket.realBucket(), args.getKeyName()) + ); + } + + boolean auditSuccess = true; + OmKeyArgs resolvedArgs = bucket.update(args); + + try { + metrics.incNumGetKeyInfo(); + OmKeyInfo keyInfo = + keyManager.getKeyInfo(resolvedArgs, getClientAddress()); + KeyInfoWithVolumeContext.Builder builder = KeyInfoWithVolumeContext + .newBuilder() + .setKeyInfo(keyInfo); + s3VolumeContext.ifPresent(context -> { + builder.setVolumeArgs(context.getOmVolumeArgs()); + builder.setUserPrincipal(context.getUserPrincipal()); + }); + return builder.build(); + } catch (Exception ex) { + metrics.incNumGetKeyInfoFails(); + auditSuccess = false; + AUDIT.logReadFailure(buildAuditMessageForFailure(OMAction.READ_KEY, + bucket.audit(resolvedVolumeArgs.toAuditMap()), ex)); + throw ex; + } finally { + if (auditSuccess) { + AUDIT.logReadSuccess(buildAuditMessageForSuccess(OMAction.READ_KEY, + bucket.audit(resolvedVolumeArgs.toAuditMap()))); + } + perfMetrics.addGetKeyInfoLatencyNs(Time.monotonicNowNanos() - start); + } + } + @Override public List listKeys(String volumeName, String bucketName, String startKey, String keyPrefix, int maxKeys) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java index 2869d133d377..2c49bb968c72 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzonePrefixPathImpl.java @@ -56,7 +56,6 @@ public OzonePrefixPathImpl(String volumeName, String bucketName, .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyPrefix) - .setRefreshPipeline(false) .setHeadOp(true) .build(); try { @@ -158,7 +157,6 @@ List getNextListOfKeys(String prevKey) throws .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyPrefix) - .setRefreshPipeline(false) .setHeadOp(true) .build(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 7f20d1664006..e4e74b006abd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -39,6 +39,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.DBUpdates; +import org.apache.hadoop.ozone.om.helpers.KeyInfoWithVolumeContext; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -70,6 +71,8 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.FinalizeUpgradeProgressResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetFileStatusResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetKeyInfoResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoBucketResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.InfoVolumeRequest; @@ -277,6 +280,10 @@ public OMResponse handleReadRequest(OMRequest request) { request.getTenantListUserRequest()); responseBuilder.setTenantListUserResponse(listUserResponse); break; + case GetKeyInfo: + responseBuilder.setGetKeyInfoResponse( + getKeyInfo(request.getGetKeyInfoRequest(), request.getVersion())); + break; default: responseBuilder.setSuccess(false); responseBuilder.setMessage("Unrecognized Command Type: " + cmdType); @@ -499,6 +506,25 @@ private LookupKeyResponse lookupKey(LookupKeyRequest request, return resp.build(); } + private GetKeyInfoResponse getKeyInfo(GetKeyInfoRequest request, + int clientVersion) throws IOException { + KeyArgs keyArgs = request.getKeyArgs(); + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + .setVolumeName(keyArgs.getVolumeName()) + .setBucketName(keyArgs.getBucketName()) + .setKeyName(keyArgs.getKeyName()) + .setLatestVersionLocation(keyArgs.getLatestVersionLocation()) + .setSortDatanodesInPipeline(keyArgs.getSortDatanodes()) + .setHeadOp(keyArgs.getHeadOp()) + .setForceUpdateContainerCacheFromSCM( + keyArgs.getForceUpdateContainerCacheFromSCM()) + .build(); + KeyInfoWithVolumeContext keyInfo = impl.getKeyInfo(omKeyArgs, + request.getAssumeS3Context()); + + return keyInfo.toProtobuf(clientVersion); + } + @RequestFeatureValidator( conditions = ValidationCondition.OLDER_CLIENT_REQUESTS, processingPhase = RequestProcessingPhase.POST_PROCESS, @@ -875,7 +901,6 @@ private GetFileStatusResponse getOzoneFileStatus( .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) - .setRefreshPipeline(true) .build(); GetFileStatusResponse.Builder rb = GetFileStatusResponse.newBuilder(); @@ -987,7 +1012,6 @@ private LookupFileResponse lookupFile(LookupFileRequest request, .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) - .setRefreshPipeline(true) .setSortDatanodesInPipeline(keyArgs.getSortDatanodes()) .setLatestVersionLocation(keyArgs.getLatestVersionLocation()) .build(); @@ -1062,7 +1086,6 @@ private ListStatusResponse listStatus( .setVolumeName(keyArgs.getVolumeName()) .setBucketName(keyArgs.getBucketName()) .setKeyName(keyArgs.getKeyName()) - .setRefreshPipeline(true) .setLatestVersionLocation(keyArgs.getLatestVersionLocation()) .setHeadOp(keyArgs.getHeadOp()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index 748519ef2d04..d418950bd5b6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import com.google.common.base.Optional; @@ -78,10 +79,12 @@ import org.junit.Test; import org.mockito.Mockito; +import static com.google.common.collect.Sets.newHashSet; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static java.util.Comparator.comparing; import static java.util.stream.Collectors.toList; +import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -345,6 +348,100 @@ private void abortMultipart( RandomUtils.nextInt())); } + @Test + @SuppressWarnings ({"unchecked", "varargs"}) + public void testGetKeyInfo() throws IOException { + final DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails(); + final DatanodeDetails dn2 = MockDatanodeDetails.randomDatanodeDetails(); + final DatanodeDetails dn3 = MockDatanodeDetails.randomDatanodeDetails(); + final DatanodeDetails dn4 = MockDatanodeDetails.randomDatanodeDetails(); + final long containerID = 1L; + Set containerIDs = newHashSet(containerID); + + final Pipeline pipeline1 = Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .setState(Pipeline.PipelineState.OPEN) + .setLeaderId(dn1.getUuid()) + .setNodes(Arrays.asList(dn1, dn2, dn3)) + .build(); + + final Pipeline pipeline2 = Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .setState(Pipeline.PipelineState.OPEN) + .setLeaderId(dn1.getUuid()) + .setNodes(Arrays.asList(dn2, dn3, dn4)) + .build(); + + ContainerInfo ci = Mockito.mock(ContainerInfo.class); + when(ci.getContainerID()).thenReturn(1L); + + // Setup SCM containerClient so that 1st call returns pipeline1 and + // 2nd call returns pipeline2. + when(containerClient.getContainerWithPipelineBatch(containerIDs)) + .thenReturn( + singletonList(new ContainerWithPipeline(ci, pipeline1)), + singletonList(new ContainerWithPipeline(ci, pipeline2))); + + insertVolume("volumeOne"); + + insertBucket("volumeOne", "bucketOne"); + + BlockID blockID1 = new BlockID(containerID, 1L); + insertKey(null, "volumeOne", "bucketOne", "keyOne", blockID1); + BlockID blockID2 = new BlockID(containerID, 2L); + insertKey(null, "volumeOne", "bucketOne", "keyTwo", blockID2); + + // 1st call to get key1. + OmKeyArgs keyArgs = new Builder() + .setVolumeName("volumeOne") + .setBucketName("bucketOne") + .setKeyName("keyOne") + .build(); + OmKeyInfo keyInfo = keyManager.getKeyInfo(keyArgs, "test"); + final OmKeyLocationInfo blockLocation1 = keyInfo + .getLatestVersionLocations().getBlocksLatestVersionOnly().get(0); + Assert.assertEquals(blockID1, blockLocation1.getBlockID()); + Assert.assertEquals(pipeline1, blockLocation1.getPipeline()); + // Ensure SCM is called. + verify(containerClient, times(1)) + .getContainerWithPipelineBatch(containerIDs); + + // subsequent call to key2 in same container sound result no scm calls. + keyArgs = new Builder() + .setVolumeName("volumeOne") + .setBucketName("bucketOne") + .setKeyName("keyTwo") + .build(); + OmKeyInfo keyInfo2 = keyManager.getKeyInfo(keyArgs, "test"); + OmKeyLocationInfo blockLocation2 = keyInfo2 + .getLatestVersionLocations().getBlocksLatestVersionOnly().get(0); + Assert.assertEquals(blockID2, blockLocation2.getBlockID()); + Assert.assertEquals(pipeline1, blockLocation2.getPipeline()); + // Ensure SCM is not called. + verify(containerClient, times(1)) + .getContainerWithPipelineBatch(containerIDs); + + // Yet, another call with forceCacheUpdate should trigger a call to SCM. + keyArgs = new Builder() + .setVolumeName("volumeOne") + .setBucketName("bucketOne") + .setKeyName("keyTwo") + .setForceUpdateContainerCacheFromSCM(true) + .build(); + keyInfo2 = keyManager.getKeyInfo(keyArgs, "test"); + blockLocation2 = keyInfo2 + .getLatestVersionLocations().getBlocksLatestVersionOnly().get(0); + Assert.assertEquals(blockID2, blockLocation2.getBlockID()); + Assert.assertEquals(pipeline2, blockLocation2.getPipeline()); + // Ensure SCM is called. + verify(containerClient, times(2)) + .getContainerWithPipelineBatch(containerIDs); + } + @Test public void testLookupFileWithDnFailure() throws IOException { final DatanodeDetails dnOne = MockDatanodeDetails.randomDatanodeDetails(); @@ -384,41 +481,12 @@ public void testLookupFileWithDnFailure() throws IOException { when(containerClient.getContainerWithPipelineBatch(containerIDs)) .thenReturn(cps); - final OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() - .setVolume("volumeOne") - .setAdminName("admin") - .setOwnerName("admin") - .build(); - OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); - - final OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() - .setVolumeName("volumeOne") - .setBucketName("bucketOne") - .build(); - OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); + insertVolume("volumeOne"); - final OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() - .setBlockID(new BlockID(1L, 1L)) - .setPipeline(pipelineOne) - .setOffset(0) - .setLength(256000) - .build(); + insertBucket("volumeOne", "bucketOne"); - final OmKeyInfo keyInfo = new OmKeyInfo.Builder() - .setVolumeName("volumeOne") - .setBucketName("bucketOne") - .setKeyName("keyOne") - .setOmKeyLocationInfos(singletonList( - new OmKeyLocationInfoGroup(0, - singletonList(keyLocationInfo)))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(256000) - .setReplicationConfig( - RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) - .setAcls(Collections.emptyList()) - .build(); - OMRequestTestUtils.addKeyToOM(metadataManager, keyInfo); + insertKey(pipelineOne, "volumeOne", "bucketOne", "keyOne", + new BlockID(1L, 1L)); final OmKeyArgs.Builder keyArgs = new OmKeyArgs.Builder() .setVolumeName("volumeOne") @@ -444,6 +512,51 @@ public void testLookupFileWithDnFailure() throws IOException { .getNodes().contains(dnSix)); } + private void insertKey(Pipeline pipeline, String volumeName, + String bucketName, String keyName, + BlockID blockID) throws IOException { + final OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() + .setBlockID(blockID) + .setPipeline(pipeline) + .setOffset(0) + .setLength(256000) + .build(); + + final OmKeyInfo keyInfo = new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setOmKeyLocationInfos(singletonList( + new OmKeyLocationInfoGroup(0, + singletonList(keyLocationInfo)))) + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setDataSize(256000) + .setReplicationConfig( + RatisReplicationConfig.getInstance(ReplicationFactor.THREE)) + .setAcls(Collections.emptyList()) + .build(); + OMRequestTestUtils.addKeyToOM(metadataManager, keyInfo); + } + + private void insertBucket(String volumeName, String bucketName) + throws IOException { + final OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .build(); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); + } + + private void insertVolume(String volumeName) throws IOException { + final OmVolumeArgs volumeArgs = OmVolumeArgs.newBuilder() + .setVolume(volumeName) + .setAdminName("admin") + .setOwnerName("admin") + .build(); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); + } + @Test public void listStatus() throws Exception { String volume = "vol"; diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java index 8c121679738e..adc064b241bb 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java @@ -210,7 +210,7 @@ public static FileChecksum getFileChecksumWithCombineMode(OzoneVolume volume, } OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volume.getName()) .setBucketName(bucket.getName()).setKeyName(keyName) - .setRefreshPipeline(true).setSortDatanodesInPipeline(true) + .setSortDatanodesInPipeline(true) .setLatestVersionLocation(true).build(); OmKeyInfo keyInfo = rpcClient.getOzoneManagerClient().lookupKey(keyArgs); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index 87e44c9eb9e7..82d4bbd98060 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -97,7 +97,6 @@ protected void execute(OzoneClient client, OzoneAddress address) .setVolumeName(volumeName) .setBucketName(bucketName) .setKeyName(keyName) - .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); // querying the keyLocations.The OM is queried to get containerID and