Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 27 additions & 30 deletions src/coreclr/jit/importer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11379,38 +11379,35 @@ void Compiler::impImportBlock(BasicBlock* block)
baseTmp = impGetSpillTmpBase(block);
}

/* Spill all stack entries into temps */
unsigned level, tempNum;
// Spill all stack entries into temps

JITDUMP("\nSpilling stack entries into temps\n");
for (level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
for (unsigned level = 0, tempNum = baseTmp; level < verCurrentState.esStackDepth; level++, tempNum++)
{
GenTree* tree = verCurrentState.esStack[level].val;

/* VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
the other. This should merge to a byref in unverifiable code.
However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
successor would be imported assuming there was a TYP_I_IMPL on
the stack. Thus the value would not get GC-tracked. Hence,
change the temp to TYP_BYREF and reimport the successors.
Note: We should only allow this in unverifiable code.
*/
if (tree->gtType == TYP_BYREF && lvaTable[tempNum].lvType == TYP_I_IMPL)
// VC generates code where it pushes a byref from one branch, and an int (ldc.i4 0) from
// the other. This should merge to a byref in unverifiable code.
// However, if the branch which leaves the TYP_I_IMPL on the stack is imported first, the
// successor would be imported assuming there was a TYP_I_IMPL on
// the stack. Thus the value would not get GC-tracked. Hence,
// change the temp to TYP_BYREF and reimport the clique.
LclVarDsc* tempDsc = lvaGetDesc(tempNum);
if (tree->TypeIs(TYP_BYREF) && (tempDsc->TypeGet() == TYP_I_IMPL))
{
lvaTable[tempNum].lvType = TYP_BYREF;
impReimportMarkSuccessors(block);
markImport = true;
tempDsc->lvType = TYP_BYREF;
reimportSpillClique = true;
}

#ifdef TARGET_64BIT
if (genActualType(tree->gtType) == TYP_I_IMPL && lvaTable[tempNum].lvType == TYP_INT)
if ((genActualType(tree) == TYP_I_IMPL) && (tempDsc->TypeGet() == TYP_INT))
{
// Some other block in the spill clique set this to "int", but now we have "native int".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_I_IMPL;
reimportSpillClique = true;
tempDsc->lvType = TYP_I_IMPL;
reimportSpillClique = true;
}
else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_I_IMPL)
else if ((genActualType(tree) == TYP_INT) && (tempDsc->TypeGet() == TYP_I_IMPL))
{
// Spill clique has decided this should be "native int", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique.
Expand All @@ -11425,14 +11422,14 @@ void Compiler::impImportBlock(BasicBlock* block)
// imported already, we need to change the type of the local and reimport the spill clique.
// If the 'byref' side has imported, we insert a cast from int to 'native int' to match
// the 'byref' size.
if (genActualType(tree->gtType) == TYP_BYREF && lvaTable[tempNum].lvType == TYP_INT)
if ((genActualType(tree) == TYP_BYREF) && (tempDsc->TypeGet() == TYP_INT))
{
// Some other block in the spill clique set this to "int", but now we have "byref".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_BYREF;
reimportSpillClique = true;
tempDsc->lvType = TYP_BYREF;
reimportSpillClique = true;
}
else if (genActualType(tree->gtType) == TYP_INT && lvaTable[tempNum].lvType == TYP_BYREF)
else if ((genActualType(tree) == TYP_INT) && (tempDsc->TypeGet() == TYP_BYREF)
{
// Spill clique has decided this should be "byref", but this block only pushes an "int".
// Insert a sign-extension to "native int" so we match the clique size.
Expand All @@ -11441,14 +11438,14 @@ void Compiler::impImportBlock(BasicBlock* block)

#endif // TARGET_64BIT

if (tree->gtType == TYP_DOUBLE && lvaTable[tempNum].lvType == TYP_FLOAT)
if (tree->TypeIs(TYP_DOUBLE) && (tempDsc->lvType == TYP_FLOAT))
{
// Some other block in the spill clique set this to "float", but now we have "double".
// Change the type and go back to re-import any blocks that used the wrong type.
lvaTable[tempNum].lvType = TYP_DOUBLE;
reimportSpillClique = true;
tempDsc->lvType = TYP_DOUBLE;
reimportSpillClique = true;
}
else if (tree->gtType == TYP_FLOAT && lvaTable[tempNum].lvType == TYP_DOUBLE)
else if (tree->TypeIs(TYP_FLOAT) && (tempDsc->TypeGet() == TYP_DOUBLE))
{
// Spill clique has decided this should be "double", but this block only pushes a "float".
// Insert a cast to "double" so we match the clique.
Expand All @@ -11459,11 +11456,11 @@ void Compiler::impImportBlock(BasicBlock* block)
are spilling to the temps already used by a previous block),
we need to spill addStmt */

if (addStmt != nullptr && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum))
if ((addStmt != nullptr) && !newTemps && gtHasRef(addStmt->GetRootNode(), tempNum))
{
GenTree* addTree = addStmt->GetRootNode();

if (addTree->gtOper == GT_JTRUE)
if (addTree->OperIs(GT_JTRUE))
{
GenTree* relOp = addTree->AsOp()->gtOp1;
assert(relOp->OperIsCompare());
Expand All @@ -11488,7 +11485,7 @@ void Compiler::impImportBlock(BasicBlock* block)
}
else
{
assert(addTree->gtOper == GT_SWITCH && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet()));
assert(addTree->OperIs(GT_SWITCH) && genActualTypeIsIntOrI(addTree->AsOp()->gtOp1->TypeGet()));

unsigned temp = lvaGrabTemp(true DEBUGARG("spill addStmt SWITCH"));
impStoreTemp(temp, addTree->AsOp()->gtOp1, level);
Expand Down
1 change: 0 additions & 1 deletion src/coreclr/jit/lclvars.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4146,7 +4146,6 @@ void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt,

// Check that the LCL_VAR node has the same type as the underlying variable, save a few mismatches we allow.
assert(tree->TypeIs(varDsc->TypeGet(), genActualType(varDsc)) ||
(tree->TypeIs(TYP_I_IMPL) && (varDsc->TypeGet() == TYP_BYREF)) || // Created for spill clique import.
(tree->TypeIs(TYP_BYREF) && (varDsc->TypeGet() == TYP_I_IMPL)) || // Created by inliner substitution.
(tree->TypeIs(TYP_INT) && (varDsc->TypeGet() == TYP_LONG))); // Created by "optNarrowTree".
}
Expand Down
10 changes: 7 additions & 3 deletions src/coreclr/jit/morphblock.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1183,8 +1183,7 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField()
addrSpillTemp = m_comp->lvaGrabTemp(true DEBUGARG("BlockOp address local"));

LclVarDsc* addrSpillDsc = m_comp->lvaGetDesc(addrSpillTemp);
addrSpillDsc->lvType = addrSpill->TypeIs(TYP_REF) ? TYP_REF : TYP_BYREF; // TODO-ASG: zero-diff quirk, delete.
addrSpillStore = m_comp->gtNewTempStore(addrSpillTemp, addrSpill);
addrSpillStore = m_comp->gtNewTempStore(addrSpillTemp, addrSpill);
}

auto grabAddr = [=, &result](unsigned offs) {
Expand Down Expand Up @@ -1227,7 +1226,12 @@ GenTree* MorphCopyBlockHelper::CopyFieldByField()
// handling.
GenTreeIntCon* fldOffsetNode = m_comp->gtNewIconNode(fullOffs, TYP_I_IMPL);
fldOffsetNode->gtFieldSeq = addrBaseOffsFldSeq;
addrClone = m_comp->gtNewOperNode(GT_ADD, TYP_BYREF, addrClone, fldOffsetNode);
addrClone = m_comp->gtNewOperNode(GT_ADD, varTypeIsGC(addrClone) ? TYP_BYREF : TYP_I_IMPL, addrClone,
fldOffsetNode);
// Avoid constant prop propagating each field access with a large
// constant address. TODO-Cleanup: We should tune constant prop to
// have better heuristics around this.
addrClone->gtFlags |= GTF_DONT_CSE;
}

return addrClone;
Expand Down