diff --git a/src/coreclr/src/jit/codegenxarch.cpp b/src/coreclr/src/jit/codegenxarch.cpp index f0046ab211696..0eadce2c17f97 100644 --- a/src/coreclr/src/jit/codegenxarch.cpp +++ b/src/coreclr/src/jit/codegenxarch.cpp @@ -4557,7 +4557,8 @@ void CodeGen::genCodeForLclVar(GenTreeLclVar* tree) // lcl_vars are not defs assert((tree->gtFlags & GTF_VAR_DEF) == 0); - bool isRegCandidate = compiler->lvaTable[tree->GetLclNum()].lvIsRegCandidate(); + LclVarDsc* varDsc = compiler->lvaGetDesc(tree); + bool isRegCandidate = varDsc->lvIsRegCandidate(); // If this is a register candidate that has been spilled, genConsumeReg() will // reload it at the point of use. Otherwise, if it's not in a register, we load it here. diff --git a/src/coreclr/src/jit/eeinterface.cpp b/src/coreclr/src/jit/eeinterface.cpp index d2efd790a0e59..d0a08959f43db 100644 --- a/src/coreclr/src/jit/eeinterface.cpp +++ b/src/coreclr/src/jit/eeinterface.cpp @@ -102,23 +102,24 @@ const char* Compiler::eeGetMethodFullName(CORINFO_METHOD_HANDLE hnd) /* add length of methodName and opening bracket */ length += strlen(methodName) + 1; - /* figure out the signature */ + PAL_TRY(FilterSuperPMIExceptionsParam_eeinterface*, pParam, ¶m) + { - param.pThis->eeGetMethodSig(param.hnd, ¶m.sig); + /* figure out the signature */ - // allocate space to hold the class names for each of the parameters + pParam->pThis->eeGetMethodSig(pParam->hnd, &pParam->sig); - if (param.sig.numArgs > 0) - { - param.pArgNames = getAllocator(CMK_DebugOnly).allocate(param.sig.numArgs); - } - else - { - param.pArgNames = nullptr; - } + // allocate space to hold the class names for each of the parameters + + if (pParam->sig.numArgs > 0) + { + pParam->pArgNames = pParam->pThis->getAllocator(CMK_DebugOnly).allocate(pParam->sig.numArgs); + } + else + { + pParam->pArgNames = nullptr; + } - PAL_TRY(FilterSuperPMIExceptionsParam_eeinterface*, pParam, ¶m) - { unsigned i; pParam->argLst = pParam->sig.args; diff --git a/src/coreclr/src/jit/gentree.cpp b/src/coreclr/src/jit/gentree.cpp index a829c4c7e6dac..1bfb4bd006ee1 100644 --- a/src/coreclr/src/jit/gentree.cpp +++ b/src/coreclr/src/jit/gentree.cpp @@ -16869,6 +16869,9 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree) case GT_OBJ: structHnd = tree->AsObj()->GetLayout()->GetClassHandle(); break; + case GT_BLK: + structHnd = tree->AsBlk()->GetLayout()->GetClassHandle(); + break; case GT_CALL: structHnd = tree->AsCall()->gtRetClsHnd; break; @@ -16944,6 +16947,10 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree) } } } + else if (addr->OperGet() == GT_LCL_VAR) + { + structHnd = gtGetStructHandleIfPresent(addr); + } } } break; diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp index 04c1033aad41c..d828dda679e3e 100644 --- a/src/coreclr/src/jit/importer.cpp +++ b/src/coreclr/src/jit/importer.cpp @@ -1214,7 +1214,9 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, // If it is a multi-reg struct return, don't change the oper to GT_LCL_FLD. // That is, the IR will be of the form lclVar = call for multi-reg return // - GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); + GenTreeLclVar* lcl = destAddr->AsOp()->gtOp1->AsLclVar(); + unsigned lclNum = lcl->GetLclNum(); + LclVarDsc* varDsc = lvaGetDesc(lclNum); if (src->AsCall()->HasMultiRegRetVal()) { // Mark the struct LclVar as used in a MultiReg return context @@ -1222,13 +1224,13 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, // TODO-1stClassStructs: Eliminate this pessimization when we can more generally // handle multireg returns. lcl->gtFlags |= GTF_DONT_CSE; - lvaTable[lcl->AsLclVarCommon()->GetLclNum()].lvIsMultiRegRet = true; + varDsc->lvIsMultiRegRet = true; } else if (lcl->gtType != src->gtType) { // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR) lcl->ChangeOper(GT_LCL_FLD); - fgLclFldAssign(lcl->AsLclVarCommon()->GetLclNum()); + fgLclFldAssign(lclNum); lcl->gtType = src->gtType; asgType = src->gtType; } @@ -1238,7 +1240,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, #if defined(TARGET_ARM) // TODO-Cleanup: This should have been taken care of in the above HasMultiRegRetVal() case, // but that method has not been updadted to include ARM. - impMarkLclDstNotPromotable(lcl->AsLclVarCommon()->GetLclNum(), src, structHnd); + impMarkLclDstNotPromotable(lclNum, src, structHnd); lcl->gtFlags |= GTF_DONT_CSE; #elif defined(UNIX_AMD64_ABI) // Not allowed for FEATURE_CORCLR which is the only SKU available for System V OSs. @@ -1250,7 +1252,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, // TODO-Cleanup: Why is this needed here? This seems that it will set this even for // non-multireg returns. lcl->gtFlags |= GTF_DONT_CSE; - lvaTable[lcl->AsLclVarCommon()->GetLclNum()].lvIsMultiRegRet = true; + varDsc->lvIsMultiRegRet = true; #endif } else // we don't have a GT_ADDR of a GT_LCL_VAR @@ -2558,7 +2560,7 @@ BasicBlock* Compiler::impPushCatchArgOnStack(BasicBlock* hndBlk, CORINFO_CLASS_H #if defined(JIT32_GCENCODER) const bool forceInsertNewBlock = isSingleBlockFilter || compStressCompile(STRESS_CATCH_ARG, 5); #else - const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); + const bool forceInsertNewBlock = compStressCompile(STRESS_CATCH_ARG, 5); #endif // defined(JIT32_GCENCODER) /* Spill GT_CATCH_ARG to a temp if there are jumps to the beginning of the handler */ diff --git a/src/coreclr/src/jit/instr.cpp b/src/coreclr/src/jit/instr.cpp index 695cd0bd5dce7..7a0a6c63f37fd 100644 --- a/src/coreclr/src/jit/instr.cpp +++ b/src/coreclr/src/jit/instr.cpp @@ -1817,6 +1817,7 @@ instruction CodeGenInterface::ins_Load(var_types srcType, bool aligned /*=false* */ instruction CodeGen::ins_Copy(var_types dstType) { + assert(emitTypeActSz[dstType] != 0); #if defined(TARGET_XARCH) if (varTypeIsSIMD(dstType)) { diff --git a/src/coreclr/src/jit/lower.cpp b/src/coreclr/src/jit/lower.cpp index 192c05479363f..4a7a098efc9ee 100644 --- a/src/coreclr/src/jit/lower.cpp +++ b/src/coreclr/src/jit/lower.cpp @@ -190,7 +190,7 @@ GenTree* Lowering::LowerNode(GenTree* node) break; case GT_RETURN: - LowerRet(node); + LowerRet(node->AsUnOp()); break; case GT_RETURNTRAP: @@ -3084,7 +3084,7 @@ void Lowering::LowerJmpMethod(GenTree* jmp) } // Lower GT_RETURN node to insert PInvoke method epilog if required. -void Lowering::LowerRet(GenTree* ret) +void Lowering::LowerRet(GenTreeUnOp* ret) { assert(ret->OperGet() == GT_RETURN); @@ -3097,7 +3097,7 @@ void Lowering::LowerRet(GenTree* ret) (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(ret->gtGetOp1()))) { GenTreeUnOp* bitcast = new (comp, GT_BITCAST) GenTreeOp(GT_BITCAST, ret->TypeGet(), ret->gtGetOp1(), nullptr); - ret->AsOp()->gtOp1 = bitcast; + ret->gtOp1 = bitcast; BlockRange().InsertBefore(ret, bitcast); ContainCheckBitCast(bitcast); } @@ -3107,7 +3107,7 @@ void Lowering::LowerRet(GenTree* ret) { InsertPInvokeMethodEpilog(comp->compCurBB DEBUGARG(ret)); } - ContainCheckRet(ret->AsOp()); + ContainCheckRet(ret); } GenTree* Lowering::LowerDirectCall(GenTreeCall* call) @@ -5764,7 +5764,7 @@ void Lowering::ContainCheckLclHeap(GenTreeOp* node) // Arguments: // node - pointer to the node // -void Lowering::ContainCheckRet(GenTreeOp* ret) +void Lowering::ContainCheckRet(GenTreeUnOp* ret) { assert(ret->OperIs(GT_RETURN)); diff --git a/src/coreclr/src/jit/lower.h b/src/coreclr/src/jit/lower.h index 40112c0c6de01..883a20e16ea7a 100644 --- a/src/coreclr/src/jit/lower.h +++ b/src/coreclr/src/jit/lower.h @@ -83,7 +83,7 @@ class Lowering final : public Phase void ContainCheckReturnTrap(GenTreeOp* node); void ContainCheckArrOffset(GenTreeArrOffs* node); void ContainCheckLclHeap(GenTreeOp* node); - void ContainCheckRet(GenTreeOp* node); + void ContainCheckRet(GenTreeUnOp* ret); void ContainCheckJTrue(GenTreeOp* node); void ContainCheckBitCast(GenTree* node); @@ -133,7 +133,7 @@ class Lowering final : public Phase GenTree* LowerJTrue(GenTreeOp* jtrue); GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition); void LowerJmpMethod(GenTree* jmp); - void LowerRet(GenTree* ret); + void LowerRet(GenTreeUnOp* ret); GenTree* LowerDelegateInvoke(GenTreeCall* call); GenTree* LowerIndirectNonvirtCall(GenTreeCall* call); GenTree* LowerDirectCall(GenTreeCall* call); diff --git a/src/coreclr/src/jit/lsraarmarch.cpp b/src/coreclr/src/jit/lsraarmarch.cpp index 303b1c7514cea..fab94aaf11252 100644 --- a/src/coreclr/src/jit/lsraarmarch.cpp +++ b/src/coreclr/src/jit/lsraarmarch.cpp @@ -38,13 +38,9 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX // int LinearScan::BuildIndir(GenTreeIndir* indirTree) { - int srcCount = 0; - // If this is the rhs of a block copy (i.e. non-enregisterable struct), - // it has no register requirements. - if (indirTree->TypeGet() == TYP_STRUCT) - { - return srcCount; - } + // struct typed indirs are expected only on rhs of a block copy, + // but in this case they must be contained. + assert(indirTree->TypeGet() != TYP_STRUCT); GenTree* addr = indirTree->Addr(); GenTree* index = nullptr; @@ -109,7 +105,7 @@ int LinearScan::BuildIndir(GenTreeIndir* indirTree) } #endif // FEATURE_SIMD - srcCount = BuildIndirUses(indirTree); + int srcCount = BuildIndirUses(indirTree); buildInternalRegisterUses(); if (indirTree->gtOper != GT_STOREIND) diff --git a/src/coreclr/src/jit/lsrabuild.cpp b/src/coreclr/src/jit/lsrabuild.cpp index 41ef29eb9de0f..fb4b8ff61be89 100644 --- a/src/coreclr/src/jit/lsrabuild.cpp +++ b/src/coreclr/src/jit/lsrabuild.cpp @@ -2473,14 +2473,18 @@ void setTgtPref(Interval* interval, RefPosition* tgtPrefUse) RefPosition* LinearScan::BuildDef(GenTree* tree, regMaskTP dstCandidates, int multiRegIdx) { assert(!tree->isContained()); - RegisterType type = getDefType(tree); if (dstCandidates != RBM_NONE) { assert((tree->GetRegNum() == REG_NA) || (dstCandidates == genRegMask(tree->GetRegByIndex(multiRegIdx)))); } - if (tree->IsMultiRegNode()) + RegisterType type = getDefType(tree); + if (!tree->IsMultiRegNode()) + { + type = getDefType(tree); + } + else { type = tree->GetRegTypeByIndex(multiRegIdx); } diff --git a/src/coreclr/src/jit/lsraxarch.cpp b/src/coreclr/src/jit/lsraxarch.cpp index 7d09369a73f53..e23d1c80da6de 100644 --- a/src/coreclr/src/jit/lsraxarch.cpp +++ b/src/coreclr/src/jit/lsraxarch.cpp @@ -2751,12 +2751,9 @@ int LinearScan::BuildCast(GenTreeCast* cast) // int LinearScan::BuildIndir(GenTreeIndir* indirTree) { - // If this is the rhs of a block copy (i.e. non-enregisterable struct), - // it has no register requirements. - if (indirTree->TypeGet() == TYP_STRUCT) - { - return 0; - } + // struct typed indirs are expected only on rhs of a block copy, + // but in this case they must be contained. + assert(indirTree->TypeGet() != TYP_STRUCT); #ifdef FEATURE_SIMD RefPosition* internalFloatDef = nullptr; diff --git a/src/coreclr/src/jit/morph.cpp b/src/coreclr/src/jit/morph.cpp index 8fa319a8cb05b..69cbbdd7a834d 100644 --- a/src/coreclr/src/jit/morph.cpp +++ b/src/coreclr/src/jit/morph.cpp @@ -8888,7 +8888,8 @@ GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree) if (dest == destLclVarTree) { - dest = gtNewIndir(asgType, gtNewOperNode(GT_ADDR, TYP_BYREF, dest)); + GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, dest); + dest = gtNewIndir(asgType, addr); } } }