Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix volume undo for unfetched data #5608

Merged
merged 10 commits into from
Aug 9, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.unreleased.md
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ For upgrade instructions, please check the [migration guide](MIGRATIONS.released
- Fix that active segment and node id were not shown in status bar when being in a non-hybrid annotation. [#5638](https://github.com/scalableminds/webknossos/pull/5638)
- Fix that "Compute Mesh File" button was enabled in scenarios where it should not be supported (e.g., when no segmentation layer exists). [#5648](https://github.com/scalableminds/webknossos/pull/5648)
- Fixed a bug where an authentication error was shown when viewing the meshes tab while not logged in. [#5647](https://github.com/scalableminds/webknossos/pull/5647)
- Fixed that undoing of volume annotations might overwrite the backend data on not loaded magnifications with nothing. [#5608](https://github.com/scalableminds/webknossos/pull/5608)

### Removed
-
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,12 @@ export type CopySegmentationLayerAction = {
type: "COPY_SEGMENTATION_LAYER",
source: "previousLayer" | "nextLayer",
};
export type MaybeBucketLoadedPromise = null | Promise<BucketDataArray>;
export type AddBucketToUndoAction = {
type: "ADD_BUCKET_TO_UNDO",
zoomedBucketAddress: Vector4,
bucketData: BucketDataArray,
maybeBucketLoadedPromise: MaybeBucketLoadedPromise,
};
type UpdateDirectionAction = { type: "UPDATE_DIRECTION", centroid: Vector3 };
type ResetContourAction = { type: "RESET_CONTOUR" };
Expand Down Expand Up @@ -141,7 +143,13 @@ export const setContourTracingModeAction = (mode: ContourMode): SetContourTracin
export const addBucketToUndoAction = (
zoomedBucketAddress: Vector4,
bucketData: BucketDataArray,
): AddBucketToUndoAction => ({ type: "ADD_BUCKET_TO_UNDO", zoomedBucketAddress, bucketData });
maybeBucketLoadedPromise: MaybeBucketLoadedPromise,
): AddBucketToUndoAction => ({
type: "ADD_BUCKET_TO_UNDO",
zoomedBucketAddress,
bucketData,
maybeBucketLoadedPromise,
});

export const inferSegmentationInViewportAction = (
position: Vector3,
Expand Down
28 changes: 19 additions & 9 deletions frontend/javascripts/oxalis/model/bucket_data_handling/bucket.js
Original file line number Diff line number Diff line change
Expand Up @@ -240,15 +240,15 @@ export class DataBucket {
return { isVoxelOutside, neighbourBucketAddress, adjustedVoxel };
};

getCopyOfData(): BucketDataArray {
const bucketData = this.getOrCreateData();
getCopyOfData(): { dataClone: BucketDataArray, triggeredBucketFetch: boolean } {
const { data: bucketData, triggeredBucketFetch } = this.getOrCreateData();
const TypedArrayClass = getConstructorForElementClass(this.elementClass)[0];
const dataClone = new TypedArrayClass(bucketData);
return dataClone;
return { dataClone, triggeredBucketFetch };
}

label(labelFunc: BucketDataArray => void) {
const bucketData = this.getOrCreateData();
const { data: bucketData } = this.getOrCreateData();
this.markAndAddBucketForUndo();
labelFunc(bucketData);
this.throttledTriggerLabeled();
Expand All @@ -258,7 +258,16 @@ export class DataBucket {
this.dirty = true;
if (!bucketsAlreadyInUndoState.has(this)) {
bucketsAlreadyInUndoState.add(this);
Store.dispatch(addBucketToUndoAction(this.zoomedAddress, this.getCopyOfData()));
let maybeBucketLoadedPromise = null;
const { dataClone, triggeredBucketFetch } = this.getCopyOfData();
if (triggeredBucketFetch) {
maybeBucketLoadedPromise = new Promise((resolve, _reject) => {
this.once("bucketLoaded", resolve);
});
}
Store.dispatch(
addBucketToUndoAction(this.zoomedAddress, dataClone, maybeBucketLoadedPromise),
);
}
}

Expand Down Expand Up @@ -294,16 +303,17 @@ export class DataBucket {
this.accessed = false;
}

getOrCreateData(): BucketDataArray {
getOrCreateData(): { data: BucketDataArray, triggeredBucketFetch: boolean } {
let triggeredBucketFetch = false;
if (this.data == null) {
const [TypedArrayClass, channelCount] = getConstructorForElementClass(this.elementClass);
this.data = new TypedArrayClass(channelCount * Constants.BUCKET_SIZE);
if (!this.isMissing()) {
triggeredBucketFetch = true;
this.temporalBucketManager.addBucket(this);
}
}

return this.getData();
return { data: this.getData(), triggeredBucketFetch };
}

pull(): void {
Expand Down Expand Up @@ -346,7 +356,7 @@ export class DataBucket {
} else {
this.data = data;
}
this.trigger("bucketLoaded");
this.trigger("bucketLoaded", data);
this.state = BucketStateEnum.LOADED;
break;
default:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@ class DataCube {
);
}
const seedVoxelIndex = this.getVoxelIndex(seedVoxel, zoomStep);
const sourceCellId = seedBucket.getOrCreateData()[seedVoxelIndex];
const sourceCellId = seedBucket.getOrCreateData().data[seedVoxelIndex];
if (sourceCellId === cellId) {
return null;
}
Expand All @@ -428,7 +428,7 @@ class DataCube {
) {
continue;
}
const bucketData = currentBucket.getOrCreateData();
const { data: bucketData } = currentBucket.getOrCreateData();
const initialVoxelIndex = this.getVoxelIndexByVoxelOffset(initialVoxelInBucket);
if (bucketData[initialVoxelIndex] !== sourceCellId) {
// Ignoring neighbour buckets whose cellId at the initial voxel does not match the source cell id.
Expand Down
78 changes: 73 additions & 5 deletions frontend/javascripts/oxalis/model/sagas/save_saga.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import {
type AddBucketToUndoAction,
type FinishAnnotationStrokeAction,
type ImportVolumeTracingAction,
type MaybeBucketLoadedPromise,
} from "oxalis/model/actions/volumetracing_actions";
import {
_all,
Expand Down Expand Up @@ -75,7 +76,12 @@ import { enforceSkeletonTracing } from "../accessors/skeletontracing_accessor";

const byteArrayToLz4Array = createWorker(compressLz4Block);

type UndoBucket = { zoomedBucketAddress: Vector4, data: Uint8Array };
type UndoBucket = {
zoomedBucketAddress: Vector4,
data: Uint8Array,
backendData?: Uint8Array,
maybeBucketLoadedPromise: MaybeBucketLoadedPromise,
};
type VolumeAnnotationBatch = Array<UndoBucket>;
type SkeletonUndoState = { type: "skeleton", data: SkeletonTracing };
type VolumeUndoState = { type: "volume", data: VolumeAnnotationBatch };
Expand Down Expand Up @@ -133,12 +139,13 @@ export function* collectUndoStates(): Saga<void> {
}
previousAction = skeletonUserAction;
} else if (addBucketToUndoAction) {
const { zoomedBucketAddress, bucketData } = addBucketToUndoAction;
const { zoomedBucketAddress, bucketData, maybeBucketLoadedPromise } = addBucketToUndoAction;
pendingCompressions.push(
yield* fork(
compressBucketAndAddToUndoBatch,
zoomedBucketAddress,
bucketData,
maybeBucketLoadedPromise,
currentVolumeAnnotationBatch,
),
);
Expand Down Expand Up @@ -207,6 +214,7 @@ function* getSkeletonTracingToUndoState(
function* compressBucketAndAddToUndoBatch(
zoomedBucketAddress: Vector4,
bucketData: BucketDataArray,
maybeBucketLoadedPromise: MaybeBucketLoadedPromise,
undoBatch: VolumeAnnotationBatch,
): Saga<void> {
const bucketDataAsByteArray = new Uint8Array(
Expand All @@ -216,7 +224,26 @@ function* compressBucketAndAddToUndoBatch(
);
const compressedBucketData = yield* call(byteArrayToLz4Array, bucketDataAsByteArray, true);
if (compressedBucketData != null) {
undoBatch.push({ zoomedBucketAddress, data: compressedBucketData });
const volumeUndoPart: UndoBucket = {
zoomedBucketAddress,
data: compressedBucketData,
maybeBucketLoadedPromise,
};
if (maybeBucketLoadedPromise != null) {
maybeBucketLoadedPromise.then(async backendBucketData => {
// Once the backend data is fetched, do not directly merge it with the already saved undo data
// as this operation is only needed, when the volume action is undone. Additionally merging is more
// expensive than saving the backend data. Thus the data is only merged upon an undo action / when it is needed.
const backendDataAsByteArray = new Uint8Array(
backendBucketData.buffer,
backendBucketData.byteOffset,
backendBucketData.byteLength,
);
const compressedBackendData = await byteArrayToLz4Array(backendDataAsByteArray, true);
volumeUndoPart.backendData = compressedBackendData;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm struggling to wrap my head around all of this, but wouldn't it be possible to do the merging which is done in line 348 and line 367, instead here, directly? In my head, this would greatly simplify the applyAndGetRevertingVolumeBatch, because no special treatment would need to be implemented, there (except of maybe waiting for the promise to be resolved). But it could very well be that I'm overlooking something. Let me know if it's easier to discuss this in a call.

Copy link
Contributor Author

@MichaelBuessemeyer MichaelBuessemeyer Jul 15, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In my head, this would greatly simplify the applyAndGetRevertingVolumeBatch, because no special treatment would need to be implemented, there (except maybe waiting for the promise to be resolved). But it could very well be that I'm overlooking something. Let me know if it's easier to discuss this in a call.

Indeed, it is possible to do the merging once the backend data arrived. But we decided against this to defer the more intensive computation. If the backend data is merged directly once it arrives, the following will be done for each bucket, whose backend data is being fetched, when the user drew over it:
Once the backend data is fetched:

  • decompress the current "revert"-bucket data,
  • merge it with the backend data,
  • compress the result again

And if the user then undos the action:

  • decompress the bucket data
  • apply the decompressed data

The way the code works now is more complicated but should be (not measured) more efficient in normal uses cases. Because we assume that only a few brush strokes are undone by the user and therefore only a few buckets actually need to be merged with the backend data:
Once the backend data is fetched:

  • compress the backend data

And only if the undo action is triggered do the following:

  • decompress the backend data and the "normal revert"-bucket data
  • merge them
  • apply the merged data

As you can see, in the current implementation there are some steps saved in case that the volume action is not undone.

Does this explain the design decision to you and do you think this is justified?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks a lot for the thorough explanation! It does make sense now that I understand the reasoning :) Could you add a comment summarizing this rationale above line 248?

});
}
undoBatch.push(volumeUndoPart);
}
}

Expand Down Expand Up @@ -274,6 +301,15 @@ function* applyStateOfStack(
}
}

function mergeDataWithBackendDataInPlace(
originalData: BucketDataArray,
backendData: BucketDataArray,
) {
for (let i = 0; i < originalData.length; ++i) {
originalData[i] = originalData[i] || backendData[i];
}
}

function* applyAndGetRevertingVolumeBatch(
volumeAnnotationBatch: VolumeAnnotationBatch,
): Saga<VolumeUndoState> {
Expand All @@ -283,19 +319,51 @@ function* applyAndGetRevertingVolumeBatch(
}
const { cube } = segmentationLayer;
const allCompressedBucketsOfCurrentState: VolumeAnnotationBatch = [];
for (const { zoomedBucketAddress, data: compressedBucketData } of volumeAnnotationBatch) {
for (const volumeUndoBucket of volumeAnnotationBatch) {
const {
zoomedBucketAddress,
data: compressedBucketData,
backendData: compressedBackendData,
} = volumeUndoBucket;
let { maybeBucketLoadedPromise } = volumeUndoBucket;
const bucket = cube.getOrCreateBucket(zoomedBucketAddress);
if (bucket.type === "null") {
continue;
}
const bucketData = bucket.getData();
if (compressedBackendData != null) {
// If the backend data for the bucket has been fetched in the meantime,
// we can first merge the data with the current data and then add this to the undo batch.
const decompressedBackendData = yield* call(
byteArrayToLz4Array,
compressedBackendData,
false,
);
if (decompressedBackendData) {
mergeDataWithBackendDataInPlace(bucketData, decompressedBackendData);
}
maybeBucketLoadedPromise = null;
}
yield* call(
compressBucketAndAddToUndoBatch,
zoomedBucketAddress,
bucketData,
maybeBucketLoadedPromise,
allCompressedBucketsOfCurrentState,
);
const decompressedBucketData = yield* call(byteArrayToLz4Array, compressedBucketData, false);
let decompressedBucketData = null;
if (compressedBackendData != null) {
let decompressedBackendData;
[decompressedBucketData, decompressedBackendData] = yield _all([
_call(byteArrayToLz4Array, compressedBucketData, false),
_call(byteArrayToLz4Array, compressedBackendData, false),
]);
if (decompressedBucketData && decompressedBackendData) {
mergeDataWithBackendDataInPlace(decompressedBucketData, decompressedBackendData);
}
} else {
decompressedBucketData = yield* call(byteArrayToLz4Array, compressedBucketData, false);
}
if (decompressedBucketData) {
// Set the new bucket data to add the bucket directly to the pushqueue.
cube.setBucketData(zoomedBucketAddress, decompressedBucketData);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -332,7 +332,7 @@ export function applyVoxelMap(
if (bucket.type === "null") {
continue;
}
const data = bucket.getOrCreateData();
const { data } = bucket.getOrCreateData();
for (let firstDim = 0; firstDim < constants.BUCKET_WIDTH; firstDim++) {
for (let secondDim = 0; secondDim < constants.BUCKET_WIDTH; secondDim++) {
if (voxelMap[firstDim * constants.BUCKET_WIDTH + secondDim] === 1) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -472,7 +472,7 @@ test("A labeledVoxelMap should be applied correctly", t => {
expectedBucketData[addr] = 1;
});
applyVoxelMap(labeledVoxelsMap, cube, 1, get3DAddress, 1, 2, true);
const labeledBucketData = bucket.getOrCreateData();
const { data: labeledBucketData } = bucket.getOrCreateData();
for (let firstDim = 0; firstDim < Constants.BUCKET_WIDTH; firstDim++) {
for (let secondDim = 0; secondDim < Constants.BUCKET_WIDTH; secondDim++) {
const addr = cube.getVoxelIndex([firstDim, secondDim, 5], 0);
Expand Down