Skip to content
Merged
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
Expand Down Expand Up @@ -279,6 +280,7 @@ public void testFileSystem() throws Exception {
testRenameDestinationParentDoesntExist();
testRenameToParentDir();
testSeekOnFileLength();
testAllocateMoreThanOneBlock();
testDeleteRoot();

testRecursiveDelete();
Expand Down Expand Up @@ -662,6 +664,38 @@ public void testSeekOnFileLength() throws IOException {
}
}

public void testAllocateMoreThanOneBlock() throws IOException {
Path file = new Path("/file");
String str = "TestOzoneFileSystemV1.testSeekOnFileLength";
byte[] strBytes = str.getBytes();
long numBlockAllocationsOrg =
cluster.getOzoneManager().getMetrics().getNumBlockAllocates();

try (FSDataOutputStream out1 = fs.create(file, FsPermission.getDefault(),
true, 8, (short) 3, 1, null)) {
for (int i = 0; i < 100000; i++) {
out1.write(strBytes);
}
}

try (FSDataInputStream stream = fs.open(file)) {
FileStatus fileStatus = fs.getFileStatus(file);
long blkSize = fileStatus.getBlockSize();
long fileLength = fileStatus.getLen();
Assert.assertTrue("Block allocation should happen",
fileLength > blkSize);

long newNumBlockAllocations =
cluster.getOzoneManager().getMetrics().getNumBlockAllocates();

Assert.assertTrue("Block allocation should happen",
(newNumBlockAllocations > numBlockAllocationsOrg));

stream.seek(fileLength);
assertEquals(-1, stream.read());
}
}

public void testDeleteRoot() throws IOException {
Path dir = new Path("/dir");
fs.mkdirs(dir);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,9 @@ public void testFileSystem() throws Exception {
testSeekOnFileLength();
deleteRootDir();

testAllocateMoreThanOneBlock();
deleteRootDir();

testFileDelete();
deleteRootDir();

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequestV1;
import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequestV1;
import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
Expand Down Expand Up @@ -138,6 +139,9 @@ public static OMClientRequest createClientRequest(OMRequest omRequest) {
case SetBucketProperty:
return new OMBucketSetPropertyRequest(omRequest);
case AllocateBlock:
if (omLayoutVersionV1) {
return new OMAllocateBlockRequestV1(omRequest);
}
return new OMAllocateBlockRequest(omRequest);
case CreateKey:
return new OMKeyCreateRequest(omRequest);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
pathInfoV1.getLeafNodeName());
dbFileInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
omMetadataManager, dbFileKey, keyName);
if (dbFileInfo != null) {
ozoneManager.getKeyManager().refresh(dbFileInfo);
}
}

// check if the file or directory already existed in OM
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
Expand All @@ -52,6 +53,8 @@

import javax.annotation.Nonnull;

import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;

/**
* Base class for file requests.
Expand Down Expand Up @@ -545,6 +548,7 @@ public static OmKeyInfo getOmKeyInfoFromFileTable(boolean openFileTable,
throws IOException {

OmKeyInfo dbOmKeyInfo;
OmKeyInfo omKeyInfoCopy = null;
if (openFileTable) {
dbOmKeyInfo = omMetadataMgr.getOpenKeyTable().get(dbOpenFileKey);
} else {
Expand All @@ -556,9 +560,10 @@ public static OmKeyInfo getOmKeyInfoFromFileTable(boolean openFileTable,
// For example, the user given key path is '/a/b/c/d/e/file1', then in DB
// keyName field stores only the leaf node name, which is 'file1'.
if (dbOmKeyInfo != null) {
dbOmKeyInfo.setKeyName(keyName);
omKeyInfoCopy = dbOmKeyInfo.copyObject();
omKeyInfoCopy.setKeyName(keyName);
}
return dbOmKeyInfo;
return omKeyInfoCopy;
}

/**
Expand Down Expand Up @@ -851,4 +856,72 @@ private static boolean checkSubFileExists(OmKeyInfo omKeyInfo,
public static boolean isImmediateChild(long parentId, long ancestorId) {
return parentId == ancestorId;
}


/**
* Check for directory exists with same name, if it exists throw error.
*
* @param keyName key name
* @param ozoneManager Ozone Manager
* @param reachedLastPathComponent true if the path component is a fileName
* @throws IOException if directory exists with same name
*/
private static void checkDirectoryAlreadyExists(String keyName,
OzoneManager ozoneManager, boolean reachedLastPathComponent)
throws IOException {
// Reached last component, which would be a file. Returns its parentID.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Minor: Comment says Returns its parentID.

In code we just check if lastComponent is true and fileSystem enabled, we return exception.

And also as current V1 always assumes fs enabled true, do we need && ozoneManager.getEnableFileSystemPaths() check?

And also these 3 lines can be in the calling method, as it is not a utility method which is called by different classes.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Agreed, Done the changes!

if (reachedLastPathComponent && ozoneManager.getEnableFileSystemPaths()) {
throw new OMException("Can not create file: " + keyName +
" as there is already directory in the given path", NOT_A_FILE);
}
}

/**
* Get parent id for the user given path.
*
* @param bucketId bucket id
* @param pathComponents fie path elements
* @param keyName user given key name
* @param ozoneManager ozone manager
* @return lastKnownParentID
* @throws IOException DB failure or parent not exists in DirectoryTable
*/
public static long getParentID(long bucketId, Iterator<Path> pathComponents,
String keyName, OzoneManager ozoneManager) throws IOException {

long lastKnownParentId = bucketId;

// If no sub-dirs then bucketID is the root/parent.
if(!pathComponents.hasNext()){
return bucketId;
}

OmDirectoryInfo omDirectoryInfo;
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
while (pathComponents.hasNext()) {
String nodeName = pathComponents.next().toString();
boolean reachedLastPathComponent = !pathComponents.hasNext();
String dbNodeName =
omMetadataManager.getOzonePathKey(lastKnownParentId, nodeName);

omDirectoryInfo = omMetadataManager.
getDirectoryTable().get(dbNodeName);
if (omDirectoryInfo != null) {
checkDirectoryAlreadyExists(keyName, ozoneManager,
reachedLastPathComponent);
lastKnownParentId = omDirectoryInfo.getObjectID();
} else {
// One of the sub-dir doesn't exists in DB. Immediate parent should
// exists for committing the key, otherwise will fail the operation.
if (!reachedLastPathComponent) {
throw new OMException("Failed to commit key, as parent directory of "
+ keyName + " entry is not found in DirectoryTable",
KEY_NOT_FOUND);
}
break;
}
}

return lastKnownParentId;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -186,9 +186,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
// Here we don't acquire bucket/volume lock because for a single client
// allocateBlock is called in serial fashion.

openKeyName = omMetadataManager.getOpenKey(volumeName, bucketName,
keyName, clientID);
openKeyInfo = omMetadataManager.getOpenKeyTable().get(openKeyName);
openKeyName = getOpenKeyName(volumeName, bucketName, keyName, clientID,
ozoneManager);
openKeyInfo = getOpenKeyInfo(omMetadataManager, openKeyName, keyName);
if (openKeyInfo == null) {
throw new OMException("Open Key not found " + openKeyName,
KEY_NOT_FOUND);
Expand Down Expand Up @@ -216,17 +216,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
openKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());

// Add to cache.
omMetadataManager.getOpenKeyTable().addCacheEntry(
new CacheKey<>(openKeyName),
new CacheValue<>(Optional.of(openKeyInfo), trxnLogIndex));

addOpenTableCacheEntry(trxnLogIndex, omMetadataManager, openKeyName,
openKeyInfo);
omBucketInfo.incrUsedBytes(preAllocatedSpace);

omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder()
.setKeyLocation(blockLocation).build());
omClientResponse = new OMAllocateBlockResponse(omResponse.build(),
openKeyInfo, clientID, omVolumeArgs, omBucketInfo.copyObject());

omClientResponse = getOmClientResponse(clientID, omResponse,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We don't update omVolumeArgs, but still reading and passing to Response, which is not required.
Not related to your patch BTW, we can fix this in master and get to branch.

openKeyInfo, omVolumeArgs, omBucketInfo.copyObject());
LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}",
volumeName, bucketName, openKeyName);
} catch (IOException ex) {
Expand All @@ -250,4 +248,31 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,

return omClientResponse;
}

protected OmKeyInfo getOpenKeyInfo(OMMetadataManager omMetadataManager,
String openKeyName, String keyName) throws IOException {
return omMetadataManager.getOpenKeyTable().get(openKeyName);
}

protected String getOpenKeyName(String volumeName, String bucketName,
String keyName, long clientID, OzoneManager ozoneManager)
throws IOException {
return ozoneManager.getMetadataManager().getOpenKey(volumeName, bucketName,
keyName, clientID);
}

protected void addOpenTableCacheEntry(long trxnLogIndex,
OMMetadataManager omMetadataManager, String openKeyName,
OmKeyInfo openKeyInfo) {
omMetadataManager.getOpenKeyTable().addCacheEntry(
new CacheKey<>(openKeyName),
new CacheValue<>(Optional.of(openKeyInfo), trxnLogIndex));
}

protected OMClientResponse getOmClientResponse(long clientID,
OMResponse.Builder omResponse, OmKeyInfo openKeyInfo,
OmVolumeArgs omVolumeArgs, OmBucketInfo omBucketInfo) {
return new OMAllocateBlockResponse(omResponse.build(),
openKeyInfo, clientID, omVolumeArgs, omBucketInfo);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.ozone.om.request.key;

import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.key.OMAllocateBlockResponseV1;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
import org.jetbrains.annotations.NotNull;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Iterator;

/**
* Handles allocate block request layout version V1.
*/
public class OMAllocateBlockRequestV1 extends OMAllocateBlockRequest {

private static final Logger LOG =
LoggerFactory.getLogger(OMAllocateBlockRequestV1.class);

public OMAllocateBlockRequestV1(OMRequest omRequest) {
super(omRequest);
}

protected OmKeyInfo getOpenKeyInfo(OMMetadataManager omMetadataManager,
String openKeyName, String keyName) throws IOException {
String fileName = OzoneFSUtils.getFileName(keyName);
return OMFileRequest.getOmKeyInfoFromFileTable(true,
omMetadataManager, openKeyName, fileName);
}

protected String getOpenKeyName(String volumeName, String bucketName,
String keyName, long clientID, OzoneManager ozoneManager)
throws IOException {
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
OmBucketInfo omBucketInfo =
omMetadataManager.getBucketTable().get(bucketKey);
long bucketId = omBucketInfo.getObjectID();
String fileName = OzoneFSUtils.getFileName(keyName);
Iterator<Path> pathComponents = Paths.get(keyName).iterator();
long parentID = OMFileRequest.getParentID(bucketId, pathComponents,
keyName, ozoneManager);
return omMetadataManager.getOpenFileName(parentID, fileName,
clientID);
}

protected void addOpenTableCacheEntry(long trxnLogIndex,
OMMetadataManager omMetadataManager, String openKeyName,
OmKeyInfo openKeyInfo) {
String fileName = openKeyInfo.getFileName();
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, openKeyName,
openKeyInfo, fileName, trxnLogIndex);
}

@NotNull
protected OMClientResponse getOmClientResponse(long clientID,
OMResponse.Builder omResponse, OmKeyInfo openKeyInfo,
OmVolumeArgs omVolumeArgs, OmBucketInfo omBucketInfo) {
return new OMAllocateBlockResponseV1(omResponse.build(),
openKeyInfo, clientID, omVolumeArgs, omBucketInfo);
}
}
Loading