Skip to content

Commit

Permalink
Merge pull request #3122 from amicic/copy_with_RB
Browse files Browse the repository at this point in the history
Read Barrier aware ref array GC copy helper
  • Loading branch information
dmitripivkine authored Oct 15, 2018
2 parents 7b95285 + 6878df8 commit b03cae1
Show file tree
Hide file tree
Showing 3 changed files with 68 additions and 23 deletions.
12 changes: 1 addition & 11 deletions runtime/gc_include/ObjectAccessBarrierAPI.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -324,17 +324,7 @@ class MM_ObjectAccessBarrierAPI
bool copyLockword = true;

if (OBJECT_HEADER_SHAPE_POINTERS == J9CLASS_SHAPE(objectClass)) {
if (j9gc_modron_readbar_none != _readBarrierType) {
copyLockword = false;
if (j9gc_modron_readbar_evacuate == _readBarrierType) {
/* TODO implement HW barriers */
currentThread->javaVM->memoryManagerFunctions->j9gc_objaccess_cloneIndexableObject(currentThread, (J9IndexableObject*)original, (J9IndexableObject*)copy);
} else {
currentThread->javaVM->memoryManagerFunctions->j9gc_objaccess_cloneIndexableObject(currentThread, (J9IndexableObject*)original, (J9IndexableObject*)copy);
}
} else {
VM_ArrayCopyHelpers::referenceArrayCopy(currentThread, original, 0, copy, 0, size);
}
VM_ArrayCopyHelpers::referenceArrayCopy(currentThread, original, 0, copy, 0, size);
} else {
VM_ArrayCopyHelpers::primitiveArrayCopy(currentThread, original, 0, copy, 0, size, (((J9ROMArrayClass*)objectClass->romClass)->arrayShape & 0x0000FFFF));
}
Expand Down
72 changes: 61 additions & 11 deletions runtime/gc_modron_standard/StandardAccessBarrier.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -603,6 +603,43 @@ MM_StandardAccessBarrier::getJNICriticalRegionCount(MM_GCExtensions *extensions)
}

#if defined(J9VM_GC_ARRAYLETS)
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
I_32
MM_StandardAccessBarrier::doCopyContiguousBackwardWithReadBarrier(J9VMThread *vmThread, J9IndexableObject *srcObject, J9IndexableObject *destObject, I_32 srcIndex, I_32 destIndex, I_32 lengthInSlots)
{
srcIndex += lengthInSlots;
destIndex += lengthInSlots;

fj9object_t *srcSlot = (fj9object_t *)indexableEffectiveAddress(vmThread, srcObject, srcIndex, sizeof(fj9object_t));
fj9object_t *destSlot = (fj9object_t *)indexableEffectiveAddress(vmThread, destObject, destIndex, sizeof(fj9object_t));
fj9object_t *srcEndSlot = srcSlot - lengthInSlots;

while (srcSlot-- > srcEndSlot) {
preObjectRead(vmThread, (J9Object *)srcObject, srcSlot);

*--destSlot = *srcSlot;
}

return ARRAY_COPY_SUCCESSFUL;
}

I_32
MM_StandardAccessBarrier::doCopyContiguousForwardWithReadBarrier(J9VMThread *vmThread, J9IndexableObject *srcObject, J9IndexableObject *destObject, I_32 srcIndex, I_32 destIndex, I_32 lengthInSlots)
{
fj9object_t *srcSlot = (fj9object_t *)indexableEffectiveAddress(vmThread, srcObject, srcIndex, sizeof(fj9object_t));
fj9object_t *destSlot = (fj9object_t *)indexableEffectiveAddress(vmThread, destObject, destIndex, sizeof(fj9object_t));
fj9object_t *srcEndSlot = srcSlot + lengthInSlots;

while (srcSlot < srcEndSlot) {
preObjectRead(vmThread, (J9Object *)srcObject, srcSlot);

*destSlot++ = *srcSlot++;
}

return ARRAY_COPY_SUCCESSFUL;
}
#endif /* OMR_GC_CONCURRENT_SCAVENGER */

/**
* Finds opportunities for doing the copy without or partially executing writeBarrier.
* @return ARRAY_COPY_SUCCESSFUL if copy was successful, ARRAY_COPY_NOT_DONE no copy is done
Expand All @@ -623,17 +660,24 @@ MM_StandardAccessBarrier::backwardReferenceArrayCopyIndex(J9VMThread *vmThread,
Assert_MM_true(destObject == srcObject);
Assert_MM_true(_extensions->indexableObjectModel.isInlineContiguousArraylet(destObject));

if (!_extensions->isConcurrentScavengerEnabled()) {
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
if (_extensions->isConcurrentScavengerInProgress()) {
/* During active CS cycle, we need a RB for every slot being copied.
* For WB same rules apply - just need the final batch barrier.
*/
retValue = doCopyContiguousBackwardWithReadBarrier(vmThread, srcObject, destObject, srcIndex, destIndex, lengthInSlots);
} else
#endif /* OMR_GC_CONCURRENT_SCAVENGER */
{
retValue = doCopyContiguousBackward(vmThread, srcObject, destObject, srcIndex, destIndex, lengthInSlots);
Assert_MM_true(retValue == ARRAY_COPY_SUCCESSFUL);

preBatchObjectStoreImpl(vmThread, (J9Object *)destObject);
}
Assert_MM_true(retValue == ARRAY_COPY_SUCCESSFUL);

preBatchObjectStoreImpl(vmThread, (J9Object *)destObject);
}
return retValue;
}


/**
* Finds opportunities for doing the copy without or partially executing writeBarrier.
* @return ARRAY_COPY_SUCCESSFUL if copy was successful, ARRAY_COPY_NOT_DONE no copy is done
Expand All @@ -654,14 +698,20 @@ MM_StandardAccessBarrier::forwardReferenceArrayCopyIndex(J9VMThread *vmThread, J
Assert_MM_true(_extensions->indexableObjectModel.isInlineContiguousArraylet(destObject));
Assert_MM_true(_extensions->indexableObjectModel.isInlineContiguousArraylet(srcObject));

if (!_extensions->isConcurrentScavengerEnabled()) {
/* todo: for Concurrent Scavenger, create a helper that will invoke load barrier on each source object slot,
* but only one batchstore barrier on the destination object */
#if defined(OMR_GC_CONCURRENT_SCAVENGER)
if (_extensions->isConcurrentScavengerInProgress()) {
/* During active CS cycle, we need a RB for every slot being copied.
* For WB same rules apply - just need the final batch barrier.
*/
retValue = doCopyContiguousForwardWithReadBarrier(vmThread, srcObject, destObject, srcIndex, destIndex, lengthInSlots);
} else
#endif /* OMR_GC_CONCURRENT_SCAVENGER */
{
retValue = doCopyContiguousForward(vmThread, srcObject, destObject, srcIndex, destIndex, lengthInSlots);
Assert_MM_true(retValue == ARRAY_COPY_SUCCESSFUL);

preBatchObjectStoreImpl(vmThread, (J9Object *)destObject);
}

Assert_MM_true(retValue == ARRAY_COPY_SUCCESSFUL);
preBatchObjectStoreImpl(vmThread, (J9Object *)destObject);
}
return retValue;
}
Expand Down
7 changes: 6 additions & 1 deletion runtime/gc_modron_standard/StandardAccessBarrier.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,12 @@ class MM_StandardAccessBarrier : public MM_ObjectAccessBarrier
#endif /* OMR_GC_REALTIME */
void postObjectStoreImpl(J9VMThread *vmThread, J9Object *dstObject, J9Object *srcObject);
void preBatchObjectStoreImpl(J9VMThread *vmThread, J9Object *dstObject);


#if defined(OMR_GC_CONCURRENT_SCAVENGER)
I_32 doCopyContiguousBackwardWithReadBarrier(J9VMThread *vmThread, J9IndexableObject *srcObject, J9IndexableObject *destObject, I_32 srcIndex, I_32 destIndex, I_32 lengthInSlots);
I_32 doCopyContiguousForwardWithReadBarrier(J9VMThread *vmThread, J9IndexableObject *srcObject, J9IndexableObject *destObject, I_32 srcIndex, I_32 destIndex, I_32 lengthInSlots);
#endif /* OMR_GC_CONCURRENT_SCAVENGER */

protected:
virtual bool initialize(MM_EnvironmentBase *env);
virtual void tearDown(MM_EnvironmentBase *env);
Expand Down

0 comments on commit b03cae1

Please sign in to comment.