diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h index 2efc48e3be4c5..bc83f19dc581f 100644 --- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h +++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizationArtifactCombiner.h @@ -192,7 +192,8 @@ class LegalizationArtifactCombiner { bool tryCombineSExt(MachineInstr &MI, SmallVectorImpl &DeadInsts, - SmallVectorImpl &UpdatedDefs) { + SmallVectorImpl &UpdatedDefs, + GISelObserverWrapper &Observer) { using namespace llvm::MIPatternMatch; assert(MI.getOpcode() == TargetOpcode::G_SEXT); @@ -211,7 +212,14 @@ class LegalizationArtifactCombiner { uint64_t SizeInBits = SrcTy.getScalarSizeInBits(); if (DstTy != MRI.getType(TruncSrc)) TruncSrc = Builder.buildAnyExtOrTrunc(DstTy, TruncSrc).getReg(0); - Builder.buildSExtInReg(DstReg, TruncSrc, SizeInBits); + // Elide G_SEXT_INREG if possible. This is similar to eliding G_AND in + // tryCombineZExt. Refer to the comment in tryCombineZExt for rationale. + if (KB && KB->computeNumSignBits(TruncSrc) > + DstTy.getScalarSizeInBits() - SizeInBits) + replaceRegOrBuildCopy(DstReg, TruncSrc, MRI, Builder, UpdatedDefs, + Observer); + else + Builder.buildSExtInReg(DstReg, TruncSrc, SizeInBits); markInstAndDefDead(MI, *MRI.getVRegDef(SrcReg), DeadInsts); return true; } @@ -1322,7 +1330,7 @@ class LegalizationArtifactCombiner { Changed = tryCombineZExt(MI, DeadInsts, UpdatedDefs, WrapperObserver); break; case TargetOpcode::G_SEXT: - Changed = tryCombineSExt(MI, DeadInsts, UpdatedDefs); + Changed = tryCombineSExt(MI, DeadInsts, UpdatedDefs, WrapperObserver); break; case TargetOpcode::G_UNMERGE_VALUES: Changed = tryCombineUnmergeValues(cast(MI), DeadInsts, diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir index 35c9538627b35..403c12d1f4235 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-min-max.mir @@ -221,11 +221,10 @@ body: | ; CHECK-NEXT: %vec:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: %vec1:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(slt), %vec(<2 x s64>), %vec1 - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND %vec1, [[XOR]] ; CHECK-NEXT: %smin:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $q0 = COPY %smin(<2 x s64>) @@ -249,17 +248,15 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(slt), [[DEF]](<2 x s64>), [[DEF]] - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(slt), [[DEF]](<2 x s64>), [[DEF]] - ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP1]], 1 - ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG1]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[SEXT_INREG1]] + ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP1]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ICMP1]] ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 @@ -494,11 +491,10 @@ body: | ; CHECK-NEXT: %vec:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: %vec1:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ult), %vec(<2 x s64>), %vec1 - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND %vec1, [[XOR]] ; CHECK-NEXT: %umin:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $q0 = COPY %umin(<2 x s64>) @@ -522,17 +518,15 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ult), [[DEF]](<2 x s64>), [[DEF]] - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ult), [[DEF]](<2 x s64>), [[DEF]] - ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP1]], 1 - ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG1]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[SEXT_INREG1]] + ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP1]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ICMP1]] ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 @@ -767,11 +761,10 @@ body: | ; CHECK-NEXT: %vec:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: %vec1:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), %vec(<2 x s64>), %vec1 - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND %vec1, [[XOR]] ; CHECK-NEXT: %smax:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $q0 = COPY %smax(<2 x s64>) @@ -795,17 +788,15 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[DEF]](<2 x s64>), [[DEF]] - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[DEF]](<2 x s64>), [[DEF]] - ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP1]], 1 - ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG1]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[SEXT_INREG1]] + ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP1]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ICMP1]] ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 @@ -1040,11 +1031,10 @@ body: | ; CHECK-NEXT: %vec:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: %vec1:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), %vec(<2 x s64>), %vec1 - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND %vec, [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND %vec1, [[XOR]] ; CHECK-NEXT: %umax:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $q0 = COPY %umax(<2 x s64>) @@ -1068,17 +1058,15 @@ body: | ; CHECK-NEXT: {{ $}} ; CHECK-NEXT: [[DEF:%[0-9]+]]:_(<2 x s64>) = G_IMPLICIT_DEF ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), [[DEF]](<2 x s64>), [[DEF]] - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(ugt), [[DEF]](<2 x s64>), [[DEF]] - ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP1]], 1 - ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG1]], [[BUILD_VECTOR]] - ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[SEXT_INREG1]] + ; CHECK-NEXT: [[XOR1:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP1]], [[BUILD_VECTOR]] + ; CHECK-NEXT: [[AND2:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[ICMP1]] ; CHECK-NEXT: [[AND3:%[0-9]+]]:_(<2 x s64>) = G_AND [[DEF]], [[XOR1]] ; CHECK-NEXT: [[OR1:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND2]], [[AND3]] ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir index 8b5cac1ec8735..92f8e524dbb31 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalize-select.mir @@ -17,11 +17,10 @@ body: | ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C]](s64), [[C]](s64) ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s64>) = G_ICMP intpred(sgt), [[COPY]](<2 x s64>), [[BUILD_VECTOR]] - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<2 x s64>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[C1]](s64), [[C1]](s64) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR1]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[COPY1]], [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s64>) = G_XOR [[ICMP]], [[BUILD_VECTOR1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s64>) = G_AND [[COPY1]], [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s64>) = G_AND [[COPY]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s64>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $q0 = COPY [[OR]](<2 x s64>) @@ -52,11 +51,10 @@ body: | ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32) ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<2 x s32>) = G_ICMP intpred(sgt), [[COPY]](<2 x s32>), [[BUILD_VECTOR]] - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<2 x s32>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s32>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR1]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY1]], [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<2 x s32>) = G_XOR [[ICMP]], [[BUILD_VECTOR1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY1]], [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<2 x s32>) = G_AND [[COPY]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $d0 = COPY [[OR]](<2 x s32>) @@ -87,11 +85,10 @@ body: | ; CHECK-NEXT: [[C:%[0-9]+]]:_(s8) = G_CONSTANT i8 0 ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8), [[C]](s8) ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(<16 x s8>) = G_ICMP intpred(sgt), [[COPY]](<16 x s8>), [[BUILD_VECTOR]] - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(<16 x s8>) = G_SEXT_INREG [[ICMP]], 1 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s8) = G_CONSTANT i8 -1 ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<16 x s8>) = G_BUILD_VECTOR [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8), [[C1]](s8) - ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<16 x s8>) = G_XOR [[SEXT_INREG]], [[BUILD_VECTOR1]] - ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<16 x s8>) = G_AND [[COPY1]], [[SEXT_INREG]] + ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(<16 x s8>) = G_XOR [[ICMP]], [[BUILD_VECTOR1]] + ; CHECK-NEXT: [[AND:%[0-9]+]]:_(<16 x s8>) = G_AND [[COPY1]], [[ICMP]] ; CHECK-NEXT: [[AND1:%[0-9]+]]:_(<16 x s8>) = G_AND [[COPY]], [[XOR]] ; CHECK-NEXT: [[OR:%[0-9]+]]:_(<16 x s8>) = G_OR [[AND]], [[AND1]] ; CHECK-NEXT: $q0 = COPY [[OR]](<16 x s8>) diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir index eb86a981c9f1e..16ad07e0df58e 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddo.mir @@ -18,8 +18,7 @@ body: | ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 7 ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SEXT_INREG]](s32), [[SEXT_INREG1]] ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 7 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32) - ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SEXT_INREG2]](s32), [[COPY2]] + ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SEXT_INREG2]](s32), [[C]] ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]] ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 127 ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[ADD]], [[C1]] diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir index 220450c5e4ec6..aa59de0118ad6 100644 --- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir +++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubo.mir @@ -18,8 +18,7 @@ body: | ; CHECK-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY]], 7 ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SEXT_INREG]](s32), [[SEXT_INREG1]] ; CHECK-NEXT: [[SEXT_INREG2:%[0-9]+]]:_(s32) = G_SEXT_INREG [[COPY1]], 7 - ; CHECK-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32) - ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SEXT_INREG2]](s32), [[COPY2]] + ; CHECK-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[SEXT_INREG2]](s32), [[C]] ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]] ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 127 ; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[SUB]], [[C1]] diff --git a/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-exts.mir b/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-exts.mir index caf8db442dc53..1a2d7be5674e8 100644 --- a/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-exts.mir +++ b/llvm/test/CodeGen/ARM/GlobalISel/arm-legalize-exts.mir @@ -211,10 +211,7 @@ body: | ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[V8]](s8) ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[V8]](s8) ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SEXT]] - ; CHECK: [[BITS:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[OR]], [[BITS]](s32) - ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[BITS]](s32) - ; CHECK: $r0 = COPY [[ASHR]] + ; CHECK: $r0 = COPY [[OR]] %5(s32) = G_SEXT %4(s16) $r0 = COPY %5 diff --git a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/constants.mir b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/constants.mir index 0ced5d52d262c..65a7e78044997 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/legalizer/constants.mir +++ b/llvm/test/CodeGen/Mips/GlobalISel/legalizer/constants.mir @@ -54,8 +54,7 @@ body: | bb.1.entry: ; MIPS32-LABEL: name: signed_i16 ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -32768 - ; MIPS32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[C]](s32) - ; MIPS32-NEXT: $v0 = COPY [[COPY]](s32) + ; MIPS32-NEXT: $v0 = COPY [[C]](s32) ; MIPS32-NEXT: RetRA implicit $v0 %0:_(s16) = G_CONSTANT i16 -32768 %1:_(s32) = G_SEXT %0(s16) @@ -71,8 +70,7 @@ body: | bb.1.entry: ; MIPS32-LABEL: name: signed_i8 ; MIPS32: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -128 - ; MIPS32-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY [[C]](s32) - ; MIPS32-NEXT: $v0 = COPY [[COPY]](s32) + ; MIPS32-NEXT: $v0 = COPY [[C]](s32) ; MIPS32-NEXT: RetRA implicit $v0 %0:_(s8) = G_CONSTANT i8 -128 %1:_(s32) = G_SEXT %0(s8) diff --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div.ll index f60bd998b7c88..85dd06641b08f 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div.ll +++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/rem_and_div.ll @@ -5,12 +5,8 @@ define signext i8 @sdiv_i8(i8 signext %a, i8 signext %b) { ; MIPS32-LABEL: sdiv_i8: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $1, $5, 24 -; MIPS32-NEXT: sra $2, $1, 24 -; MIPS32-NEXT: sll $1, $4, 24 -; MIPS32-NEXT: sra $1, $1, 24 -; MIPS32-NEXT: div $zero, $2, $1 -; MIPS32-NEXT: teq $1, $zero, 7 +; MIPS32-NEXT: div $zero, $5, $4 +; MIPS32-NEXT: teq $4, $zero, 7 ; MIPS32-NEXT: mflo $1 ; MIPS32-NEXT: sll $1, $1, 24 ; MIPS32-NEXT: sra $2, $1, 24 @@ -24,12 +20,8 @@ entry: define signext i16 @sdiv_i16(i16 signext %a, i16 signext %b) { ; MIPS32-LABEL: sdiv_i16: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $1, $5, 16 -; MIPS32-NEXT: sra $2, $1, 16 -; MIPS32-NEXT: sll $1, $4, 16 -; MIPS32-NEXT: sra $1, $1, 16 -; MIPS32-NEXT: div $zero, $2, $1 -; MIPS32-NEXT: teq $1, $zero, 7 +; MIPS32-NEXT: div $zero, $5, $4 +; MIPS32-NEXT: teq $4, $zero, 7 ; MIPS32-NEXT: mflo $1 ; MIPS32-NEXT: sll $1, $1, 16 ; MIPS32-NEXT: sra $2, $1, 16 @@ -81,12 +73,8 @@ entry: define signext i8 @srem_i8(i8 signext %a, i8 signext %b) { ; MIPS32-LABEL: srem_i8: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $1, $5, 24 -; MIPS32-NEXT: sra $2, $1, 24 -; MIPS32-NEXT: sll $1, $4, 24 -; MIPS32-NEXT: sra $1, $1, 24 -; MIPS32-NEXT: div $zero, $2, $1 -; MIPS32-NEXT: teq $1, $zero, 7 +; MIPS32-NEXT: div $zero, $5, $4 +; MIPS32-NEXT: teq $4, $zero, 7 ; MIPS32-NEXT: mflo $1 ; MIPS32-NEXT: sll $1, $1, 24 ; MIPS32-NEXT: sra $2, $1, 24 @@ -100,12 +88,8 @@ entry: define signext i16 @srem_i16(i16 signext %a, i16 signext %b) { ; MIPS32-LABEL: srem_i16: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $1, $5, 16 -; MIPS32-NEXT: sra $2, $1, 16 -; MIPS32-NEXT: sll $1, $4, 16 -; MIPS32-NEXT: sra $1, $1, 16 -; MIPS32-NEXT: div $zero, $2, $1 -; MIPS32-NEXT: teq $1, $zero, 7 +; MIPS32-NEXT: div $zero, $5, $4 +; MIPS32-NEXT: teq $4, $zero, 7 ; MIPS32-NEXT: mfhi $1 ; MIPS32-NEXT: sll $1, $1, 16 ; MIPS32-NEXT: sra $2, $1, 16 diff --git a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sitofp_and_uitofp.ll b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sitofp_and_uitofp.ll index b7f42c2912132..de1bad542462c 100644 --- a/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sitofp_and_uitofp.ll +++ b/llvm/test/CodeGen/Mips/GlobalISel/llvm-ir/sitofp_and_uitofp.ll @@ -35,9 +35,7 @@ entry: define float @i16tof32(i16 signext %a) { ; MIPS32-LABEL: i16tof32: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $1, $4, 16 -; MIPS32-NEXT: sra $1, $1, 16 -; MIPS32-NEXT: mtc1 $1, $f0 +; MIPS32-NEXT: mtc1 $4, $f0 ; MIPS32-NEXT: cvt.s.w $f0, $f0 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop @@ -49,9 +47,7 @@ entry: define float @i8tof32(i8 signext %a) { ; MIPS32-LABEL: i8tof32: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $1, $4, 24 -; MIPS32-NEXT: sra $1, $1, 24 -; MIPS32-NEXT: mtc1 $1, $f0 +; MIPS32-NEXT: mtc1 $4, $f0 ; MIPS32-NEXT: cvt.s.w $f0, $f0 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop @@ -93,9 +89,7 @@ entry: define double @i16tof64(i16 signext %a) { ; MIPS32-LABEL: i16tof64: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $1, $4, 16 -; MIPS32-NEXT: sra $1, $1, 16 -; MIPS32-NEXT: mtc1 $1, $f0 +; MIPS32-NEXT: mtc1 $4, $f0 ; MIPS32-NEXT: cvt.d.w $f0, $f0 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop @@ -107,9 +101,7 @@ entry: define double @i8tof64(i8 signext %a) { ; MIPS32-LABEL: i8tof64: ; MIPS32: # %bb.0: # %entry -; MIPS32-NEXT: sll $1, $4, 24 -; MIPS32-NEXT: sra $1, $1, 24 -; MIPS32-NEXT: mtc1 $1, $f0 +; MIPS32-NEXT: mtc1 $4, $f0 ; MIPS32-NEXT: cvt.d.w $f0, $f0 ; MIPS32-NEXT: jr $ra ; MIPS32-NEXT: nop diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/jumptable.ll b/llvm/test/CodeGen/RISCV/GlobalISel/jumptable.ll index 601290812bb2e..9dda1a241e042 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/jumptable.ll +++ b/llvm/test/CodeGen/RISCV/GlobalISel/jumptable.ll @@ -121,7 +121,6 @@ define void @above_threshold(i32 signext %in, ptr %out) nounwind { ; RV64I-SMALL-LABEL: above_threshold: ; RV64I-SMALL: # %bb.0: # %entry ; RV64I-SMALL-NEXT: li a2, 5 -; RV64I-SMALL-NEXT: sext.w a0, a0 ; RV64I-SMALL-NEXT: addi a0, a0, -1 ; RV64I-SMALL-NEXT: bltu a2, a0, .LBB0_9 ; RV64I-SMALL-NEXT: # %bb.1: # %entry @@ -156,7 +155,6 @@ define void @above_threshold(i32 signext %in, ptr %out) nounwind { ; RV64I-MEDIUM-LABEL: above_threshold: ; RV64I-MEDIUM: # %bb.0: # %entry ; RV64I-MEDIUM-NEXT: li a2, 5 -; RV64I-MEDIUM-NEXT: sext.w a0, a0 ; RV64I-MEDIUM-NEXT: addi a0, a0, -1 ; RV64I-MEDIUM-NEXT: bltu a2, a0, .LBB0_9 ; RV64I-MEDIUM-NEXT: # %bb.1: # %entry @@ -192,7 +190,6 @@ define void @above_threshold(i32 signext %in, ptr %out) nounwind { ; RV64I-PIC-LABEL: above_threshold: ; RV64I-PIC: # %bb.0: # %entry ; RV64I-PIC-NEXT: li a2, 5 -; RV64I-PIC-NEXT: sext.w a0, a0 ; RV64I-PIC-NEXT: addi a0, a0, -1 ; RV64I-PIC-NEXT: bltu a2, a0, .LBB0_9 ; RV64I-PIC-NEXT: # %bb.1: # %entry diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir index f96d659782178..c2258642c9dd2 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv32.mir @@ -52,31 +52,25 @@ body: | ; RV32I: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 ; RV32I-NEXT: [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY]], 16 ; RV32I-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 15 + ; RV32I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[ASSERT_SEXT]], [[C]](s32) + ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASSERT_SEXT]], [[ASHR]] + ; RV32I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD]], [[ASHR]] ; RV32I-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; RV32I-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ASSERT_SEXT]], [[C1]](s32) - ; RV32I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32) - ; RV32I-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[C]](s32) - ; RV32I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[ASSERT_SEXT]], [[ASHR1]] - ; RV32I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD]], [[ASHR1]] - ; RV32I-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; RV32I-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[XOR]], [[C2]](s32) - ; RV32I-NEXT: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C2]](s32) - ; RV32I-NEXT: $x10 = COPY [[ASHR2]](s32) + ; RV32I-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[XOR]], [[C1]](s32) + ; RV32I-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32) + ; RV32I-NEXT: $x10 = COPY [[ASHR1]](s32) ; RV32I-NEXT: PseudoRET implicit $x10 ; ; RV32ZBB-LABEL: name: abs_i16 ; RV32ZBB: [[COPY:%[0-9]+]]:_(s32) = COPY $x10 ; RV32ZBB-NEXT: [[ASSERT_SEXT:%[0-9]+]]:_(s32) = G_ASSERT_SEXT [[COPY]], 16 - ; RV32ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; RV32ZBB-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ASSERT_SEXT]], [[C]](s32) - ; RV32ZBB-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32) - ; RV32ZBB-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 - ; RV32ZBB-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[ASHR]] - ; RV32ZBB-NEXT: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[ASHR]], [[SUB]] - ; RV32ZBB-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 - ; RV32ZBB-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SMAX]], [[C2]](s32) - ; RV32ZBB-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C2]](s32) - ; RV32ZBB-NEXT: $x10 = COPY [[ASHR1]](s32) + ; RV32ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 + ; RV32ZBB-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[ASSERT_SEXT]] + ; RV32ZBB-NEXT: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[ASSERT_SEXT]], [[SUB]] + ; RV32ZBB-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16 + ; RV32ZBB-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[SMAX]], [[C1]](s32) + ; RV32ZBB-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32) + ; RV32ZBB-NEXT: $x10 = COPY [[ASHR]](s32) ; RV32ZBB-NEXT: PseudoRET implicit $x10 %1:_(s32) = COPY $x10 %2:_(s32) = G_ASSERT_SEXT %1, 16 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir index 6f1792172b064..f855317ad4fed 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-abs-rv64.mir @@ -56,35 +56,28 @@ body: | ; RV64I: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 ; RV64I-NEXT: [[ASSERT_SEXT:%[0-9]+]]:_(s64) = G_ASSERT_SEXT [[COPY]], 16 ; RV64I-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_SEXT]](s64) - ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s64) - ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16 - ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s64) - ; RV64I-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 - ; RV64I-NEXT: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[C2]](s64) + ; RV64I-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 15 + ; RV64I-NEXT: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[TRUNC]], [[C]](s64) ; RV64I-NEXT: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_SEXT]](s64) - ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC1]], [[ASHR1]] - ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD]], [[ASHR1]] + ; RV64I-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[TRUNC1]], [[ASHR]] + ; RV64I-NEXT: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[ADD]], [[ASHR]] ; RV64I-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[XOR]](s32) - ; RV64I-NEXT: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; RV64I-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C3]](s64) - ; RV64I-NEXT: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C3]](s64) - ; RV64I-NEXT: $x10 = COPY [[ASHR2]](s64) + ; RV64I-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV64I-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C1]](s64) + ; RV64I-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]](s64) + ; RV64I-NEXT: $x10 = COPY [[ASHR1]](s64) ; RV64I-NEXT: PseudoRET implicit $x10 ; ; RV64ZBB-LABEL: name: abs_i16 ; RV64ZBB: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 ; RV64ZBB-NEXT: [[ASSERT_SEXT:%[0-9]+]]:_(s64) = G_ASSERT_SEXT [[COPY]], 16 - ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; RV64ZBB-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ASSERT_SEXT]], [[C]](s64) - ; RV64ZBB-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C]](s64) - ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 - ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C1]], [[ASHR]] - ; RV64ZBB-NEXT: [[SMAX:%[0-9]+]]:_(s64) = G_SMAX [[ASHR]], [[SUB]] - ; RV64ZBB-NEXT: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 - ; RV64ZBB-NEXT: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[SMAX]], [[C2]](s64) - ; RV64ZBB-NEXT: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[SHL1]], [[C2]](s64) - ; RV64ZBB-NEXT: $x10 = COPY [[ASHR1]](s64) + ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0 + ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[C]], [[ASSERT_SEXT]] + ; RV64ZBB-NEXT: [[SMAX:%[0-9]+]]:_(s64) = G_SMAX [[ASSERT_SEXT]], [[SUB]] + ; RV64ZBB-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 48 + ; RV64ZBB-NEXT: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SMAX]], [[C1]](s64) + ; RV64ZBB-NEXT: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[SHL]], [[C1]](s64) + ; RV64ZBB-NEXT: $x10 = COPY [[ASHR]](s64) ; RV64ZBB-NEXT: PseudoRET implicit $x10 %1:_(s64) = COPY $x10 %2:_(s64) = G_ASSERT_SEXT %1, 16 @@ -116,11 +109,10 @@ body: | ; RV64ZBB-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ASSERT_SEXT]](s64) ; RV64ZBB-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; RV64ZBB-NEXT: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]] - ; RV64ZBB-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ASSERT_SEXT]], 32 ; RV64ZBB-NEXT: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[SUB]](s32) - ; RV64ZBB-NEXT: [[SMAX:%[0-9]+]]:_(s64) = G_SMAX [[SEXT_INREG]], [[SEXT]] - ; RV64ZBB-NEXT: [[SEXT_INREG1:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SMAX]], 32 - ; RV64ZBB-NEXT: $x10 = COPY [[SEXT_INREG1]](s64) + ; RV64ZBB-NEXT: [[SMAX:%[0-9]+]]:_(s64) = G_SMAX [[ASSERT_SEXT]], [[SEXT]] + ; RV64ZBB-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[SMAX]], 32 + ; RV64ZBB-NEXT: $x10 = COPY [[SEXT_INREG]](s64) ; RV64ZBB-NEXT: PseudoRET implicit $x10 %1:_(s64) = COPY $x10 %2:_(s64) = G_ASSERT_SEXT %1, 32 diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-jump-table-brjt-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-jump-table-brjt-rv64.mir index 9910e98a3e820..bf0661b88a78b 100644 --- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-jump-table-brjt-rv64.mir +++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-jump-table-brjt-rv64.mir @@ -55,9 +55,8 @@ body: | ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s64) = COPY $x10 ; CHECK-NEXT: [[ASSERT_SEXT:%[0-9]+]]:_(s64) = G_ASSERT_SEXT [[COPY]], 32 ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 5 - ; CHECK-NEXT: [[SEXT_INREG:%[0-9]+]]:_(s64) = G_SEXT_INREG [[ASSERT_SEXT]], 32 ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 1 - ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[SEXT_INREG]], [[C1]] + ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[ASSERT_SEXT]], [[C1]] ; CHECK-NEXT: [[ICMP:%[0-9]+]]:_(s64) = G_ICMP intpred(ugt), [[SUB]](s64), [[C]] ; CHECK-NEXT: G_BRCOND [[ICMP]](s64), %bb.8 ; CHECK-NEXT: {{ $}}