@@ -9133,7 +9133,7 @@ ValueNum ValueNumStore::EvalHWIntrinsicFunTernary(
91339133ValueNum ValueNumStore::EvalMathFuncUnary(var_types typ, NamedIntrinsic gtMathFN, ValueNum arg0VN)
91349134{
91359135 assert(arg0VN == VNNormalValue(arg0VN));
9136- assert(m_pComp->IsMathIntrinsic(gtMathFN));
9136+ assert(m_pComp->IsMathIntrinsic(gtMathFN) RISCV64_ONLY(|| m_pComp->IsBitCountingIntrinsic(gtMathFN)) );
91379137
91389138 // If the math intrinsic is not implemented by target-specific instructions, such as implemented
91399139 // by user calls, then don't do constant folding on it during ReadyToRun. This minimizes precision loss.
@@ -9385,10 +9385,8 @@ ValueNum ValueNumStore::EvalMathFuncUnary(var_types typ, NamedIntrinsic gtMathFN
93859385 unreached();
93869386 }
93879387 }
9388- else
9388+ else if (gtMathFN == NI_System_Math_Round)
93899389 {
9390- assert(gtMathFN == NI_System_Math_Round);
9391-
93929390 switch (TypeOfVN(arg0VN))
93939391 {
93949392 case TYP_DOUBLE:
@@ -9409,14 +9407,69 @@ ValueNum ValueNumStore::EvalMathFuncUnary(var_types typ, NamedIntrinsic gtMathFN
94099407 unreached();
94109408 }
94119409 }
9410+ else if (gtMathFN == NI_PRIMITIVE_LeadingZeroCount)
9411+ {
9412+ switch (TypeOfVN(arg0VN))
9413+ {
9414+ case TYP_LONG:
9415+ res = BitOperations::LeadingZeroCount((uint64_t)GetConstantInt64(arg0VN));
9416+ break;
9417+
9418+ case TYP_INT:
9419+ res = BitOperations::LeadingZeroCount((uint32_t)GetConstantInt32(arg0VN));
9420+ break;
9421+
9422+ default:
9423+ unreached();
9424+ }
9425+ }
9426+ else if (gtMathFN == NI_PRIMITIVE_TrailingZeroCount)
9427+ {
9428+ switch (TypeOfVN(arg0VN))
9429+ {
9430+ case TYP_LONG:
9431+ res = BitOperations::TrailingZeroCount((uint64_t)GetConstantInt64(arg0VN));
9432+ break;
9433+
9434+ case TYP_INT:
9435+ res = BitOperations::TrailingZeroCount((uint32_t)GetConstantInt32(arg0VN));
9436+ break;
9437+
9438+ default:
9439+ unreached();
9440+ }
9441+ }
9442+ else if (gtMathFN == NI_PRIMITIVE_PopCount)
9443+ {
9444+ switch (TypeOfVN(arg0VN))
9445+ {
9446+ case TYP_LONG:
9447+ res = BitOperations::PopCount((uint64_t)GetConstantInt64(arg0VN));
9448+ break;
9449+
9450+ case TYP_INT:
9451+ res = BitOperations::PopCount((uint32_t)GetConstantInt32(arg0VN));
9452+ break;
9453+
9454+ default:
9455+ unreached();
9456+ }
9457+ }
9458+ else
9459+ {
9460+ unreached();
9461+ }
94129462
94139463 return VNForIntCon(res);
94149464 }
94159465 }
94169466 else
94179467 {
94189468 assert((typ == TYP_DOUBLE) || (typ == TYP_FLOAT) ||
9419- ((typ == TYP_INT) && ((gtMathFN == NI_System_Math_ILogB) || (gtMathFN == NI_System_Math_Round))));
9469+ ((typ == TYP_INT) && ((gtMathFN == NI_System_Math_ILogB) || (gtMathFN == NI_System_Math_Round))) ||
9470+ (((typ == TYP_INT) || (typ == TYP_LONG)) &&
9471+ ((gtMathFN == NI_PRIMITIVE_LeadingZeroCount) || (gtMathFN == NI_PRIMITIVE_TrailingZeroCount) ||
9472+ (gtMathFN == NI_PRIMITIVE_PopCount))));
94209473
94219474 VNFunc vnf = VNF_Boundary;
94229475 switch (gtMathFN)
@@ -9508,6 +9561,15 @@ ValueNum ValueNumStore::EvalMathFuncUnary(var_types typ, NamedIntrinsic gtMathFN
95089561 case NI_System_Math_Truncate:
95099562 vnf = VNF_Truncate;
95109563 break;
9564+ case NI_PRIMITIVE_LeadingZeroCount:
9565+ vnf = VNF_LeadingZeroCount;
9566+ break;
9567+ case NI_PRIMITIVE_TrailingZeroCount:
9568+ vnf = VNF_TrailingZeroCount;
9569+ break;
9570+ case NI_PRIMITIVE_PopCount:
9571+ vnf = VNF_PopCount;
9572+ break;
95119573 default:
95129574 unreached(); // the above are the only math intrinsics at the time of this writing.
95139575 }
@@ -12829,7 +12891,7 @@ void Compiler::fgValueNumberIntrinsic(GenTree* tree)
1282912891 vnStore->VNPUnpackExc(intrinsic->AsOp()->gtOp2->gtVNPair, &arg1VNP, &arg1VNPx);
1283012892 }
1283112893
12832- if (IsMathIntrinsic(intrinsic->gtIntrinsicName))
12894+ if (IsMathIntrinsic(intrinsic->gtIntrinsicName) || IsBitCountingIntrinsic(intrinsic->gtIntrinsicName) )
1283312895 {
1283412896 // GT_INTRINSIC is a currently a subtype of binary operators. But most of
1283512897 // the math intrinsics are actually unary operations.
0 commit comments