Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,8 @@ public boolean test(HddsVolume vol) {
long volumeCapacity = usage.getCapacity();
long free = usage.getAvailable();
long committed = vol.getCommittedBytes();
long available = free - committed;
long importContainerCommitBytes = vol.getContainerImportCommittedBytes();
long available = free - committed - importContainerCommitBytes;
long volumeFreeSpaceToSpare =
new VolumeUsage.MinFreeSpaceCalculator(vol.getConf()).get(volumeCapacity);
boolean hasEnoughSpace = VolumeUsage.hasVolumeEnoughSpace(free, committed,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,8 @@ public class HddsVolume extends StorageVolume {

private final AtomicLong committedBytes = new AtomicLong(); // till Open containers become full

private final AtomicLong containerImportCommittedBytes = new AtomicLong();
Copy link
Contributor

@ChenSammi ChenSammi Feb 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please reuse the committedBytes as there is no difference from HddsVolume point of view.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done


// Mentions the type of volume
private final VolumeType type = VolumeType.DATA_VOLUME;
// The dedicated DbVolume that the db instance of this HddsVolume resides.
Expand Down Expand Up @@ -305,6 +307,14 @@ public long getCommittedBytes() {
return committedBytes.get();
}

public long incImportContainerCommitBytes(long delta) {
return containerImportCommittedBytes.addAndGet(delta);
}

public long getContainerImportCommittedBytes() {
return containerImportCommittedBytes.get();
}

public void setDbVolume(DbVolume dbVolume) {
this.dbVolume = dbVolume;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,4 +172,7 @@ protected TarContainerPacker getPacker(CopyContainerCompression compression) {
return new TarContainerPacker(compression);
}

public long getDefaultContainerSize() {
return containerSize;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,9 @@
import java.nio.file.Path;
import java.util.List;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.ozone.container.replication.AbstractReplicationTask.Status;
Expand All @@ -44,6 +46,7 @@ public class DownloadAndImportReplicator implements ContainerReplicator {
private final ContainerDownloader downloader;
private final ContainerImporter containerImporter;
private final ContainerSet containerSet;
private final long containerSize;

public DownloadAndImportReplicator(
ConfigurationSource conf, ContainerSet containerSet,
Expand All @@ -53,6 +56,9 @@ public DownloadAndImportReplicator(
this.containerSet = containerSet;
this.downloader = downloader;
this.containerImporter = containerImporter;
containerSize = (long) conf.getStorageSize(
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
}

@Override
Expand All @@ -70,9 +76,16 @@ public void replicate(ReplicationTask task) {

LOG.info("Starting replication of container {} from {} using {}",
containerID, sourceDatanodes, compression);
HddsVolume targetVolume = null;

try {
HddsVolume targetVolume = containerImporter.chooseNextVolume();
targetVolume = containerImporter.chooseNextVolume();
targetVolume.incImportContainerCommitBytes(containerSize * 2);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

reserved space is only size of container, not a double size.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is done same way in chooseNextVolume. We are keeping same so that there is no impact.

if (targetVolume.getCurrentUsage().getAvailable() - targetVolume.getCommittedBytes()
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please try to use AvailableSpaceFilter to detect whether volume is full.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Done

- targetVolume.getContainerImportCommittedBytes() <= 0) {
task.setStatus(Status.FAILED);
return;
}
// Wait for the download. This thread pool is limiting the parallel
// downloads, so it's ok to block here and wait for the full download.
Path tarFilePath =
Expand All @@ -95,6 +108,10 @@ public void replicate(ReplicationTask task) {
} catch (IOException e) {
LOG.error("Container {} replication was unsuccessful.", containerID, e);
task.setStatus(Status.FAILED);
} finally {
if (targetVolume != null) {
targetVolume.incImportContainerCommitBytes(-containerSize * 2);
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
import org.apache.hadoop.hdds.utils.IOUtils;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
import org.apache.hadoop.util.DiskChecker;
import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller;
import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver;
import org.slf4j.Logger;
Expand All @@ -50,7 +51,7 @@ class SendContainerRequestHandler
private long containerId = -1;
private long nextOffset;
private OutputStream output;
private HddsVolume volume;
private HddsVolume volume = null;
private Path path;
private CopyContainerCompression compression;
private final ZeroCopyMessageMarshaller<SendContainerRequest> marshaller;
Expand Down Expand Up @@ -85,6 +86,14 @@ public void onNext(SendContainerRequest req) {
if (containerId == -1) {
containerId = req.getContainerID();
volume = importer.chooseNextVolume();
volume.incImportContainerCommitBytes(importer.getDefaultContainerSize() * 2);
if (volume.getCurrentUsage().getAvailable() - volume.getCommittedBytes()
- volume.getContainerImportCommittedBytes() <= 0) {
volume.incImportContainerCommitBytes(-importer.getDefaultContainerSize() * 2);
volume = null;
throw new DiskChecker.DiskOutOfSpaceException("No more available volumes");
}

Path dir = ContainerImporter.getUntarDirectory(volume);
Files.createDirectories(dir);
path = dir.resolve(ContainerUtils.getContainerTarName(containerId));
Expand All @@ -110,32 +119,44 @@ public void onNext(SendContainerRequest req) {

@Override
public void onError(Throwable t) {
LOG.warn("Error receiving container {} at {}", containerId, nextOffset, t);
closeOutput();
deleteTarball();
responseObserver.onError(t);
try {
LOG.warn("Error receiving container {} at {}", containerId, nextOffset, t);
closeOutput();
deleteTarball();
responseObserver.onError(t);
} finally {
if (volume != null) {
volume.incImportContainerCommitBytes(-importer.getDefaultContainerSize() * 2);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Some failure case doesn't require this deduction.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Which failure case? If volume is not assigned i am marking it as null, so that in error it will not be decremented..If volume is assigned it means space is reserved for it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I see.

}
}
}

@Override
public void onCompleted() {
if (output == null) {
LOG.warn("Received container without any parts");
return;
}

LOG.info("Container {} is downloaded with size {}, starting to import.",
containerId, nextOffset);
closeOutput();

try {
importer.importContainer(containerId, path, volume, compression);
LOG.info("Container {} is replicated successfully", containerId);
responseObserver.onNext(SendContainerResponse.newBuilder().build());
responseObserver.onCompleted();
} catch (Throwable t) {
LOG.warn("Failed to import container {}", containerId, t);
deleteTarball();
responseObserver.onError(t);
if (output == null) {
LOG.warn("Received container without any parts");
return;
}

LOG.info("Container {} is downloaded with size {}, starting to import.",
containerId, nextOffset);
closeOutput();

try {
importer.importContainer(containerId, path, volume, compression);
LOG.info("Container {} is replicated successfully", containerId);
responseObserver.onNext(SendContainerResponse.newBuilder().build());
responseObserver.onCompleted();
} catch (Throwable t) {
LOG.warn("Failed to import container {}", containerId, t);
deleteTarball();
responseObserver.onError(t);
}
} finally {
if (volume != null) {
volume.incImportContainerCommitBytes(-importer.getDefaultContainerSize() * 2);
}
}
}

Expand Down