Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
42847f6
HDDS-9041. Intermittent Delete root failed.
Aug 10, 2023
d681d6d
HDDS-9041. Intermittent Delete root failed.
Aug 10, 2023
cffc869
HDDS-9041. Intermittent Delete root failed.
Aug 10, 2023
b97ffa3
HDDS-9041. Intermittent Delete root failed.
Aug 16, 2023
da7ed46
HDDS-9041. Intermittent Delete root failed.
Aug 16, 2023
bccbebd
HDDS-9041. Intermittent Delete root failed.
Aug 16, 2023
16828b0
HDDS-9041. Intermittent Delete root failed.
Aug 16, 2023
452d1c8
HDDS-9041. Intermittent Delete root failed.
Aug 16, 2023
06884b0
HDDS-9041. Intermittent Delete root failed.
Aug 17, 2023
6af6988
HDDS-9041. Intermittent Delete root failed.
Aug 17, 2023
47b2708
HDDS-9041. Intermittent Delete root failed.
Aug 17, 2023
fbe53ce
HDDS-9041. Intermittent Delete root failed.
Aug 17, 2023
0bf588c
HDDS-9041. Intermittent Delete root failed.
Aug 17, 2023
8a7d35b
HDDS-9041. Intermittent Delete root failed.
Aug 18, 2023
4718abf
HDDS-9041. Intermittent Delete root failed.
Aug 18, 2023
f655c9e
HDDS-9041. Intermittent Delete root failed.
Aug 18, 2023
160edf4
HDDS-9041. Intermittent Delete root failed.
Aug 18, 2023
07c48da
HDDS-9041. Intermittent Delete root failed.
Aug 18, 2023
e722c01
HDDS-9041. Intermittent Delete root failed.
Aug 18, 2023
2a7f04c
HDDS-9041. Intermittent Delete root failed.
Aug 21, 2023
af06a7f
HDDS-9041. Intermittent Delete root failed.
Aug 21, 2023
cbe02e8
Merge branch 'apache:master' into HDDS-9041
devmadhuu Aug 21, 2023
60a4eab
HDDS-9041. Intermittent Delete root failed.
Aug 21, 2023
0994c55
Merge remote-tracking branch 'origin/master' into HDDS-9041
Aug 21, 2023
c455972
Merge branch 'HDDS-9041' of github.com:devmadhuu/ozone into HDDS-9041
Aug 21, 2023
1ce9c3e
HDDS-9041. Intermittent Delete root failed.
Aug 21, 2023
4ffb500
HDDS-9041. Intermittent Delete root failed.
Aug 21, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -227,11 +227,9 @@ public static void teardown() {
@After
public void cleanup() {
try {
FileStatus[] fileStatuses = fs.listStatus(ROOT);
for (FileStatus fileStatus : fileStatuses) {
fs.delete(fileStatus.getPath(), true);
}
} catch (IOException ex) {
deleteRootDir();
} catch (IOException | InterruptedException ex) {
LOG.error("Failed to cleanup files.", ex);
fail("Failed to cleanup files.");
}
}
Expand Down Expand Up @@ -797,21 +795,25 @@ public void testListStatusOnKeyNameContainDelimiter() throws Exception {
*
* @throws IOException DB failure
*/
protected void deleteRootDir() throws IOException {
protected void deleteRootDir() throws IOException, InterruptedException {
FileStatus[] fileStatuses = fs.listStatus(ROOT);

if (fileStatuses == null) {
return;
}
deleteRootRecursively(fileStatuses);
fileStatuses = fs.listStatus(ROOT);
if (fileStatuses != null) {
Assert.assertEquals(
"Delete root failed!", 0, fileStatuses.length);
}
}

private static void deleteRootRecursively(FileStatus[] fileStatuses)
throws IOException {
for (FileStatus fStatus : fileStatuses) {
fs.delete(fStatus.getPath(), true);
}

fileStatuses = fs.listStatus(ROOT);
if (fileStatuses != null) {
Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
}
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,12 +85,6 @@ public TestOzoneFileSystemWithFSO(boolean setDefaultFs,
@Override
public void cleanup() {
super.cleanup();
try {
deleteRootDir();
} catch (IOException e) {
LOG.info("Failed to cleanup DB tables.", e);
fail("Failed to cleanup DB tables." + e.getMessage());
}
}

private static final Logger LOG =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_ALREADY_EXISTS;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
Expand All @@ -80,7 +81,8 @@
* Tests to verify Object store with prefix enabled cases.
*/
public class TestObjectStoreWithFSO {

private static final Path ROOT =
new Path(OZONE_URI_DELIMITER);
private static MiniOzoneCluster cluster = null;
private static OzoneConfiguration conf;
private static String clusterId;
Expand Down Expand Up @@ -138,22 +140,25 @@ public void tearDown() throws Exception {
*
* @throws IOException DB failure
*/
private void deleteRootDir() throws IOException {
Path root = new Path("/");
FileStatus[] fileStatuses = fs.listStatus(root);
protected void deleteRootDir() throws IOException {
FileStatus[] fileStatuses = fs.listStatus(ROOT);

if (fileStatuses == null) {
return;
}
deleteRootRecursively(fileStatuses);
fileStatuses = fs.listStatus(ROOT);
if (fileStatuses != null) {
Assert.assertEquals(
"Delete root failed!", 0, fileStatuses.length);
}
}

private static void deleteRootRecursively(FileStatus[] fileStatuses)
throws IOException {
for (FileStatus fStatus : fileStatuses) {
fs.delete(fStatus.getPath(), true);
}

fileStatuses = fs.listStatus(root);
if (fileStatuses != null) {
Assert.assertEquals("Delete root failed!", 0, fileStatuses.length);
}
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@
import java.util.NoSuchElementException;
import java.util.Collection;
import java.util.Collections;
import java.util.stream.Collectors;


import static org.apache.hadoop.ozone.om.exceptions.OMException.
Expand Down Expand Up @@ -214,11 +215,12 @@ public Collection<OzoneFileStatus> listStatusFSO(OmKeyArgs args,
HeapEntry entry = heapIterator.next();
OzoneFileStatus status = entry.getStatus(prefixKey,
scmBlockSize, volumeName, bucketName, replication);
map.put(entry.key, status);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So, this is done with the assumption that the items from the cache iterator always come before those from the rockdb iterator?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes @duongkame as per MinHeapIterator implementation.

map.putIfAbsent(entry.key, status);
}
}

return map.values();
return map.values().stream().filter(e -> e != null).collect(
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will the status ever be null? If it can be, will putting the null check before putting it in the map a better idea?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@duongkame thanks for review. Earlier the null value (means key deleted) from CacheIter was not added in map and was getting lost, so we should allow that and when keyInfo gets null and then getStatus , we'll return null. This was needed to allow deleted key in cache.

Collectors.toList());
}

private String getDbKey(String key, OmKeyArgs args,
Expand Down Expand Up @@ -285,7 +287,7 @@ private static class HeapEntry implements Comparable<HeapEntry> {
private final Object value;

HeapEntry(EntryType entryType, String key, Object value) {
Preconditions.checkArgument(
Preconditions.checkArgument(value == null ||
value instanceof OmDirectoryInfo ||
value instanceof OmKeyInfo);
this.entryType = entryType;
Expand Down Expand Up @@ -322,6 +324,9 @@ public OzoneFileStatus getStatus(
String bucketName,
ReplicationConfig bucketReplication
) {
if (value == null) {
return null;
}
OmKeyInfo keyInfo;
if (entryType.isDir()) {
Preconditions.checkArgument(value instanceof OmDirectoryInfo);
Expand Down Expand Up @@ -455,10 +460,6 @@ private void getCacheValues() {
cacheIter.next();
String cacheKey = entry.getKey().getCacheKey();
Value cacheOmInfo = entry.getValue().getCacheValue();
// cacheOmKeyInfo is null if an entry is deleted in cache
if (cacheOmInfo == null) {
continue;
}

// Copy cache value to local copy and work on it
if (cacheOmInfo instanceof CopyObject) {
Expand Down