Skip to content

Commit

Permalink
[InstCombine] invert canonicalization of sext (x > -1) --> not (ashr x)
Browse files Browse the repository at this point in the history
https://alive2.llvm.org/ce/z/2iC4oB

This is similar to changes made for zext + lshr:
21d3871
6c39a3a

The existing fold did not account for extra uses, so we
see some instruction count reductions in the test diffs.

This is intended to improve analysis (icmp likely has more
transforms than any other opcode), make other transforms
more symmetric with zext/lshr, and it can be inverted
in codegen if profitable.

As with the earlier changes, there is potential to uncover
infinite combine loops, but I have not found any yet.
  • Loading branch information
rotateright committed Jan 24, 2023
1 parent 7ea998e commit e44a305
Show file tree
Hide file tree
Showing 11 changed files with 54 additions and 58 deletions.
8 changes: 8 additions & 0 deletions llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3896,6 +3896,14 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
return BinaryOperator::CreateAShr(X, Y);

// Bit-hack form of a signbit test:
// iN ~X >>s (N-1) --> sext i1 (X > -1) to iN
unsigned FullShift = Ty->getScalarSizeInBits() - 1;
if (match(NotVal, m_OneUse(m_AShr(m_Value(X), m_SpecificInt(FullShift))))) {
Value *IsNotNeg = Builder.CreateIsNotNeg(X, "isnotneg");
return new SExtInst(IsNotNeg, Ty);
}

// If we are inverting a right-shifted constant, we may be able to eliminate
// the 'not' by inverting the constant and using the opposite shift type.
// Canonicalization rules ensure that only a negative constant uses 'ashr',
Expand Down
8 changes: 2 additions & 6 deletions llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1368,18 +1368,14 @@ Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *Cmp,
if (!Op1->getType()->isIntOrIntVectorTy())
return nullptr;

if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) ||
(Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) {
// (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative
// (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive
if (Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) {
// sext (x <s 0) --> ashr x, 31 (all ones if negative)
Value *Sh = ConstantInt::get(Op0->getType(),
Op0->getType()->getScalarSizeInBits() - 1);
Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit");
if (In->getType() != Sext.getType())
In = Builder.CreateIntCast(In, Sext.getType(), true /*SExt*/);

if (Pred == ICmpInst::ICMP_SGT)
In = Builder.CreateNot(In, In->getName() + ".not");
return replaceInstUsesWith(Sext, In);
}

Expand Down
11 changes: 4 additions & 7 deletions llvm/test/Transforms/InstCombine/X86/x86-masked-memops.ll
Original file line number Diff line number Diff line change
Expand Up @@ -158,9 +158,8 @@ define <4 x i64> @mload_v4i64(ptr %f) {

define <4 x i64> @mload_v4i64_cmp(ptr %f, <4 x i64> %src) {
; CHECK-LABEL: @mload_v4i64_cmp(
; CHECK-NEXT: [[SRC_LOBIT:%.*]] = ashr <4 x i64> [[SRC:%.*]], <i64 63, i64 63, i64 63, i64 63>
; CHECK-NEXT: [[SRC_LOBIT_NOT:%.*]] = xor <4 x i64> [[SRC_LOBIT]], <i64 -1, i64 -1, i64 -1, i64 -1>
; CHECK-NEXT: [[LD:%.*]] = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr [[F:%.*]], <4 x i64> [[SRC_LOBIT_NOT]])
; CHECK-NEXT: [[ICMP:%.*]] = icmp sgt <4 x i64> [[SRC:%.*]], <i64 -1, i64 -1, i64 -1, i64 -1>
; CHECK-NEXT: [[LD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[F:%.*]], i32 1, <4 x i1> [[ICMP]], <4 x i64> zeroinitializer)
; CHECK-NEXT: ret <4 x i64> [[LD]]
;
%icmp = icmp sge <4 x i64> %src, zeroinitializer
Expand Down Expand Up @@ -271,10 +270,8 @@ define void @mstore_v4f64(ptr %f, <4 x double> %v) {

define void @mstore_v4f64_cmp(ptr %f, <4 x i32> %src, <4 x double> %v) {
; CHECK-LABEL: @mstore_v4f64_cmp(
; CHECK-NEXT: [[SRC_LOBIT:%.*]] = ashr <4 x i32> [[SRC:%.*]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[SRC_LOBIT]], <i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK-NEXT: [[DOTNOT:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64>
; CHECK-NEXT: tail call void @llvm.x86.avx.maskstore.pd.256(ptr [[F:%.*]], <4 x i64> [[DOTNOT]], <4 x double> [[V:%.*]])
; CHECK-NEXT: [[ICMP:%.*]] = icmp sgt <4 x i32> [[SRC:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[V:%.*]], ptr [[F:%.*]], i32 1, <4 x i1> [[ICMP]])
; CHECK-NEXT: ret void
;
%icmp = icmp sge <4 x i32> %src, zeroinitializer
Expand Down
32 changes: 15 additions & 17 deletions llvm/test/Transforms/InstCombine/and.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1770,8 +1770,8 @@ define i16 @not_signbit_splat_mask2(i8 %x, i16 %y) {

define i8 @not_ashr_bitwidth_mask(i8 %x, i8 %y) {
; CHECK-LABEL: @not_ashr_bitwidth_mask(
; CHECK-NEXT: [[ISNEG:%.*]] = icmp slt i8 [[X:%.*]], 0
; CHECK-NEXT: [[POS_OR_ZERO:%.*]] = select i1 [[ISNEG]], i8 0, i8 [[Y:%.*]]
; CHECK-NEXT: [[ISNOTNEG_INV:%.*]] = icmp slt i8 [[X:%.*]], 0
; CHECK-NEXT: [[POS_OR_ZERO:%.*]] = select i1 [[ISNOTNEG_INV]], i8 0, i8 [[Y:%.*]]
; CHECK-NEXT: ret i8 [[POS_OR_ZERO]]
;
%sign = ashr i8 %x, 7
Expand All @@ -1783,8 +1783,8 @@ define i8 @not_ashr_bitwidth_mask(i8 %x, i8 %y) {
define <2 x i8> @not_ashr_bitwidth_mask_vec_commute(<2 x i8> %x, <2 x i8> %py) {
; CHECK-LABEL: @not_ashr_bitwidth_mask_vec_commute(
; CHECK-NEXT: [[Y:%.*]] = mul <2 x i8> [[PY:%.*]], <i8 42, i8 2>
; CHECK-NEXT: [[ISNEG:%.*]] = icmp slt <2 x i8> [[X:%.*]], zeroinitializer
; CHECK-NEXT: [[POS_OR_ZERO:%.*]] = select <2 x i1> [[ISNEG]], <2 x i8> zeroinitializer, <2 x i8> [[Y]]
; CHECK-NEXT: [[ISNOTNEG_INV:%.*]] = icmp slt <2 x i8> [[X:%.*]], zeroinitializer
; CHECK-NEXT: [[POS_OR_ZERO:%.*]] = select <2 x i1> [[ISNOTNEG_INV]], <2 x i8> zeroinitializer, <2 x i8> [[Y]]
; CHECK-NEXT: ret <2 x i8> [[POS_OR_ZERO]]
;
%y = mul <2 x i8> %py, <i8 42, i8 2> ; thwart complexity-based ordering
Expand Down Expand Up @@ -1815,8 +1815,8 @@ define i8 @not_ashr_bitwidth_mask_use1(i8 %x, i8 %y) {

define i8 @not_ashr_bitwidth_mask_use2(i8 %x, i8 %y) {
; CHECK-LABEL: @not_ashr_bitwidth_mask_use2(
; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 7
; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[SIGN]], -1
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
; CHECK-NEXT: [[NOT:%.*]] = sext i1 [[ISNOTNEG]] to i8
; CHECK-NEXT: call void @use8(i8 [[NOT]])
; CHECK-NEXT: [[R:%.*]] = and i8 [[NOT]], [[Y:%.*]]
; CHECK-NEXT: ret i8 [[R]]
Expand Down Expand Up @@ -1860,8 +1860,8 @@ define i8 @not_lshr_bitwidth_mask(i8 %x, i8 %y) {

define i16 @invert_signbit_splat_mask(i8 %x, i16 %y) {
; CHECK-LABEL: @invert_signbit_splat_mask(
; CHECK-NEXT: [[ISNEG:%.*]] = icmp slt i8 [[X:%.*]], 0
; CHECK-NEXT: [[R:%.*]] = select i1 [[ISNEG]], i16 0, i16 [[Y:%.*]]
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
; CHECK-NEXT: [[R:%.*]] = select i1 [[ISNOTNEG]], i16 [[Y:%.*]], i16 0
; CHECK-NEXT: ret i16 [[R]]
;
%a = ashr i8 %x, 7
Expand Down Expand Up @@ -1904,11 +1904,10 @@ define i16 @invert_signbit_splat_mask_use1(i8 %x, i16 %y) {

define i16 @invert_signbit_splat_mask_use2(i8 %x, i16 %y) {
; CHECK-LABEL: @invert_signbit_splat_mask_use2(
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
; CHECK-NEXT: [[N:%.*]] = xor i8 [[A]], -1
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
; CHECK-NEXT: [[N:%.*]] = sext i1 [[ISNOTNEG]] to i8
; CHECK-NEXT: call void @use8(i8 [[N]])
; CHECK-NEXT: [[ISNEG:%.*]] = icmp slt i8 [[X]], 0
; CHECK-NEXT: [[R:%.*]] = select i1 [[ISNEG]], i16 0, i16 [[Y:%.*]]
; CHECK-NEXT: [[R:%.*]] = select i1 [[ISNOTNEG]], i16 [[Y:%.*]], i16 0
; CHECK-NEXT: ret i16 [[R]]
;
%a = ashr i8 %x, 7
Expand All @@ -1923,9 +1922,8 @@ define i16 @invert_signbit_splat_mask_use2(i8 %x, i16 %y) {

define i16 @invert_signbit_splat_mask_use3(i8 %x, i16 %y) {
; CHECK-LABEL: @invert_signbit_splat_mask_use3(
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
; CHECK-NEXT: [[N:%.*]] = xor i8 [[A]], -1
; CHECK-NEXT: [[S:%.*]] = sext i8 [[N]] to i16
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
; CHECK-NEXT: [[S:%.*]] = sext i1 [[ISNOTNEG]] to i16
; CHECK-NEXT: call void @use16(i16 [[S]])
; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]]
; CHECK-NEXT: ret i16 [[R]]
Expand All @@ -1942,8 +1940,8 @@ define i16 @invert_signbit_splat_mask_use3(i8 %x, i16 %y) {

define i16 @not_invert_signbit_splat_mask1(i8 %x, i16 %y) {
; CHECK-LABEL: @not_invert_signbit_splat_mask1(
; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7
; CHECK-NEXT: [[N:%.*]] = xor i8 [[A]], -1
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
; CHECK-NEXT: [[N:%.*]] = sext i1 [[ISNOTNEG]] to i8
; CHECK-NEXT: [[Z:%.*]] = zext i8 [[N]] to i16
; CHECK-NEXT: [[R:%.*]] = and i16 [[Z]], [[Y:%.*]]
; CHECK-NEXT: ret i16 [[R]]
Expand Down
10 changes: 5 additions & 5 deletions llvm/test/Transforms/InstCombine/bitcast-inseltpoison.ll
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,9 @@ define <2 x i32> @or_bitcast_int_to_vec(i64 %a) {

define <2 x i64> @is_negative(<4 x i32> %x) {
; CHECK-LABEL: @is_negative(
; CHECK-NEXT: [[LOBIT:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[LOBIT]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[TMP1]]
; CHECK-NEXT: [[X_LOBIT:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[NOTNOT:%.*]] = bitcast <4 x i32> [[X_LOBIT]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[NOTNOT]]
;
%lobit = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
%not = xor <4 x i32> %lobit, <i32 -1, i32 -1, i32 -1, i32 -1>
Expand All @@ -91,8 +91,8 @@ define <2 x i64> @is_negative(<4 x i32> %x) {

define <4 x i32> @is_negative_bonus_bitcast(<4 x i32> %x) {
; CHECK-LABEL: @is_negative_bonus_bitcast(
; CHECK-NEXT: [[LOBIT:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: ret <4 x i32> [[LOBIT]]
; CHECK-NEXT: [[X_LOBIT:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: ret <4 x i32> [[X_LOBIT]]
;
%lobit = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
%not = xor <4 x i32> %lobit, <i32 -1, i32 -1, i32 -1, i32 -1>
Expand Down
10 changes: 5 additions & 5 deletions llvm/test/Transforms/InstCombine/bitcast.ll
Original file line number Diff line number Diff line change
Expand Up @@ -77,9 +77,9 @@ define <2 x i32> @or_bitcast_int_to_vec(i64 %a) {

define <2 x i64> @is_negative(<4 x i32> %x) {
; CHECK-LABEL: @is_negative(
; CHECK-NEXT: [[LOBIT:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[LOBIT]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[TMP1]]
; CHECK-NEXT: [[X_LOBIT:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[NOTNOT:%.*]] = bitcast <4 x i32> [[X_LOBIT]] to <2 x i64>
; CHECK-NEXT: ret <2 x i64> [[NOTNOT]]
;
%lobit = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
%not = xor <4 x i32> %lobit, <i32 -1, i32 -1, i32 -1, i32 -1>
Expand All @@ -93,8 +93,8 @@ define <2 x i64> @is_negative(<4 x i32> %x) {

define <4 x i32> @is_negative_bonus_bitcast(<4 x i32> %x) {
; CHECK-LABEL: @is_negative_bonus_bitcast(
; CHECK-NEXT: [[LOBIT:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: ret <4 x i32> [[LOBIT]]
; CHECK-NEXT: [[X_LOBIT:%.*]] = ashr <4 x i32> [[X:%.*]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: ret <4 x i32> [[X_LOBIT]]
;
%lobit = ashr <4 x i32> %x, <i32 31, i32 31, i32 31, i32 31>
%not = xor <4 x i32> %lobit, <i32 -1, i32 -1, i32 -1, i32 -1>
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/Transforms/InstCombine/icmp.ll
Original file line number Diff line number Diff line change
Expand Up @@ -63,9 +63,9 @@ define i32 @test3(i32 %X) {

define i32 @test4(i32 %X) {
; CHECK-LABEL: @test4(
; CHECK-NEXT: [[X_LOBIT:%.*]] = ashr i32 [[X:%.*]], 31
; CHECK-NEXT: [[X_LOBIT_NOT:%.*]] = xor i32 [[X_LOBIT]], -1
; CHECK-NEXT: ret i32 [[X_LOBIT_NOT]]
; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[X:%.*]], -1
; CHECK-NEXT: [[B:%.*]] = sext i1 [[A]] to i32
; CHECK-NEXT: ret i32 [[B]]
;
%a = icmp ult i32 %X, -2147483648
%b = sext i1 %a to i32
Expand Down
6 changes: 3 additions & 3 deletions llvm/test/Transforms/InstCombine/overflow-mul.ll
Original file line number Diff line number Diff line change
Expand Up @@ -215,9 +215,9 @@ define <4 x i32> @pr20113(<4 x i16> %a, <4 x i16> %b) {
; CHECK-NEXT: [[VMOVL_I_I726:%.*]] = zext <4 x i16> [[A:%.*]] to <4 x i32>
; CHECK-NEXT: [[VMOVL_I_I712:%.*]] = zext <4 x i16> [[B:%.*]] to <4 x i32>
; CHECK-NEXT: [[MUL_I703:%.*]] = mul nuw <4 x i32> [[VMOVL_I_I712]], [[VMOVL_I_I726]]
; CHECK-NEXT: [[MUL_I703_LOBIT:%.*]] = ashr <4 x i32> [[MUL_I703]], <i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[MUL_I703_LOBIT_NOT:%.*]] = xor <4 x i32> [[MUL_I703_LOBIT]], <i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK-NEXT: ret <4 x i32> [[MUL_I703_LOBIT_NOT]]
; CHECK-NEXT: [[TMP:%.*]] = icmp sgt <4 x i32> [[MUL_I703]], <i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK-NEXT: [[VCGEZ_I:%.*]] = sext <4 x i1> [[TMP]] to <4 x i32>
; CHECK-NEXT: ret <4 x i32> [[VCGEZ_I]]
;
%vmovl.i.i726 = zext <4 x i16> %a to <4 x i32>
%vmovl.i.i712 = zext <4 x i16> %b to <4 x i32>
Expand Down
13 changes: 5 additions & 8 deletions llvm/test/Transforms/InstCombine/vec_sext.ll
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,8 @@ define <4 x i32> @vec_select(<4 x i32> %a, <4 x i32> %b) {
define <4 x i32> @vec_select_alternate_sign_bit_test(<4 x i32> %a, <4 x i32> %b) {
; CHECK-LABEL: @vec_select_alternate_sign_bit_test(
; CHECK-NEXT: [[SUB:%.*]] = sub nsw <4 x i32> zeroinitializer, [[A:%.*]]
; CHECK-NEXT: [[ISNEG1:%.*]] = icmp slt <4 x i32> [[B:%.*]], zeroinitializer
; CHECK-NEXT: [[T2:%.*]] = select <4 x i1> [[ISNEG1]], <4 x i32> [[A]], <4 x i32> zeroinitializer
; CHECK-NEXT: [[ISNEG:%.*]] = icmp slt <4 x i32> [[B]], zeroinitializer
; CHECK-NEXT: [[T3:%.*]] = select <4 x i1> [[ISNEG]], <4 x i32> zeroinitializer, <4 x i32> [[SUB]]
; CHECK-NEXT: [[COND:%.*]] = or <4 x i32> [[T2]], [[T3]]
; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <4 x i32> [[B:%.*]], zeroinitializer
; CHECK-NEXT: [[COND:%.*]] = select <4 x i1> [[CMP1]], <4 x i32> [[A]], <4 x i32> [[SUB]]
; CHECK-NEXT: ret <4 x i32> [[COND]]
;
%cmp = icmp sgt <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
Expand Down Expand Up @@ -58,9 +55,9 @@ define <2 x i32> @is_negative_undef_elt(<2 x i32> %a) {

define <2 x i32> @is_positive_undef_elt(<2 x i32> %a) {
; CHECK-LABEL: @is_positive_undef_elt(
; CHECK-NEXT: [[A_LOBIT:%.*]] = ashr <2 x i32> [[A:%.*]], <i32 31, i32 31>
; CHECK-NEXT: [[A_LOBIT_NOT:%.*]] = xor <2 x i32> [[A_LOBIT]], <i32 -1, i32 -1>
; CHECK-NEXT: ret <2 x i32> [[A_LOBIT_NOT]]
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt <2 x i32> [[A:%.*]], <i32 undef, i32 -1>
; CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i1> [[CMP]] to <2 x i32>
; CHECK-NEXT: ret <2 x i32> [[SEXT]]
;
%cmp = icmp sgt <2 x i32> %a, <i32 undef, i32 -1>
%sext = sext <2 x i1> %cmp to <2 x i32>
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/Transforms/InstCombine/xor-ashr.ll
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,8 @@ define i8 @wrongimm(i16 %add) {
define <4 x i32> @vectorpoison(<6 x i32> %0) {
; CHECK-LABEL: @vectorpoison(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[NEG:%.*]] = ashr <6 x i32> [[TMP0:%.*]], <i32 31, i32 31, i32 31, i32 31, i32 31, i32 31>
; CHECK-NEXT: [[SHR:%.*]] = xor <6 x i32> [[NEG]], <i32 -1, i32 -1, i32 -1, i32 poison, i32 poison, i32 poison>
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt <6 x i32> [[TMP0:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
; CHECK-NEXT: [[SHR:%.*]] = sext <6 x i1> [[ISNOTNEG]] to <6 x i32>
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <6 x i32> [[SHR]], <6 x i32> poison, <4 x i32> <i32 0, i32 1, i32 0, i32 2>
; CHECK-NEXT: ret <4 x i32> [[TMP1]]
;
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/Transforms/InstCombine/xor.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1124,8 +1124,8 @@ define i8 @not_ashr(i8 %x) {

define <2 x i8> @not_ashr_vec(<2 x i8> %x) {
; CHECK-LABEL: @not_ashr_vec(
; CHECK-NEXT: [[A:%.*]] = ashr <2 x i8> [[X:%.*]], <i8 7, i8 7>
; CHECK-NEXT: [[R:%.*]] = xor <2 x i8> [[A]], <i8 -1, i8 -1>
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt <2 x i8> [[X:%.*]], <i8 -1, i8 -1>
; CHECK-NEXT: [[R:%.*]] = sext <2 x i1> [[ISNOTNEG]] to <2 x i8>
; CHECK-NEXT: ret <2 x i8> [[R]]
;
%a = ashr <2 x i8> %x, <i8 7, i8 7>
Expand Down

0 comments on commit e44a305

Please sign in to comment.