diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h index 1b37aabaafae8..7850d9c70252a 100644 --- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h +++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h @@ -364,7 +364,7 @@ class LoopVectorizationLegality { /// Return true if the block BB needs to be predicated in order for the loop /// to be vectorized. - bool blockNeedsPredication(BasicBlock *BB) const; + bool blockNeedsPredication(const BasicBlock *BB) const; /// Check if this pointer is consecutive when vectorizing. This happens /// when the last index of the GEP is the induction variable, or that the diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h index bdc2a0dad8622..6eab92e66745e 100644 --- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h +++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorize.h @@ -145,7 +145,7 @@ struct LoopVectorizePass : public PassInfoMixin { LoopInfo *LI; TargetTransformInfo *TTI; DominatorTree *DT; - BlockFrequencyInfo *BFI; + std::function GetBFI; TargetLibraryInfo *TLI; DemandedBits *DB; AssumptionCache *AC; diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp index 26e2d44bdc9e6..5238a5d7d7c24 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorizationLegality.cpp @@ -1443,7 +1443,8 @@ bool LoopVectorizationLegality::isFixedOrderRecurrence( return FixedOrderRecurrences.count(Phi); } -bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) const { +bool LoopVectorizationLegality::blockNeedsPredication( + const BasicBlock *BB) const { // When vectorizing early exits, create predicates for the latch block only. // The early exiting block must be a direct predecessor of the latch at the // moment. diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 4edc004f161a1..107913898a312 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -146,6 +146,7 @@ #include "llvm/Transforms/Vectorize/LoopVectorizationLegality.h" #include #include +#include #include #include #include @@ -873,12 +874,14 @@ class LoopVectorizationCostModel { const TargetTransformInfo &TTI, const TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, - OptimizationRemarkEmitter *ORE, const Function *F, - const LoopVectorizeHints *Hints, + OptimizationRemarkEmitter *ORE, + std::function GetBFI, + const Function *F, const LoopVectorizeHints *Hints, InterleavedAccessInfo &IAI, bool OptForSize) : ScalarEpilogueStatus(SEL), TheLoop(L), PSE(PSE), LI(LI), Legal(Legal), - TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), TheFunction(F), - Hints(Hints), InterleaveInfo(IAI), OptForSize(OptForSize) { + TTI(TTI), TLI(TLI), DB(DB), AC(AC), ORE(ORE), GetBFI(GetBFI), + TheFunction(F), Hints(Hints), InterleaveInfo(IAI), + OptForSize(OptForSize) { if (TTI.supportsScalableVectors() || ForceTargetSupportsScalableVectors) initializeVScaleForTuning(); CostKind = F->hasMinSize() ? TTI::TCK_CodeSize : TTI::TCK_RecipThroughput; @@ -1219,7 +1222,7 @@ class LoopVectorizationCostModel { /// for which our chosen predication strategy is scalarization (i.e. we /// don't have an alternate strategy such as masking available). /// \p VF is the vectorization factor that will be used to vectorize \p I. - bool isScalarWithPredication(Instruction *I, ElementCount VF) const; + bool isScalarWithPredication(Instruction *I, ElementCount VF); /// Returns true if \p I is an instruction that needs to be predicated /// at runtime. The result is independent of the predication mechanism. @@ -1234,29 +1237,19 @@ class LoopVectorizationCostModel { /// optimizing for code size it will just be 1 as code size costs don't depend /// on execution probabilities. /// - /// TODO: We should use actual block probability here, if available. - /// Currently, we always assume predicated blocks have a 50% chance of - /// executing, apart from blocks that are only predicated due to tail folding. + /// Note that if a block wasn't originally predicated but was predicated due + /// to tail folding, the divisor will still be 1 because it will execute for + /// every iteration of the loop header. inline unsigned getPredBlockCostDivisor(TargetTransformInfo::TargetCostKind CostKind, - BasicBlock *BB) const { - // If a block wasn't originally predicated but was predicated due to - // e.g. tail folding, don't divide the cost. Tail folded loops may still be - // predicated in the final vector loop iteration, but for most loops that - // don't have low trip counts we can expect their probability to be close to - // zero. - if (!Legal->blockNeedsPredication(BB)) - return 1; - return CostKind == TTI::TCK_CodeSize ? 1 : 2; - } + const BasicBlock *BB); /// Return the costs for our two available strategies for lowering a /// div/rem operation which requires speculating at least one lane. /// First result is for scalarization (will be invalid for scalable /// vectors); second is for the safe-divisor strategy. std::pair - getDivRemSpeculationCost(Instruction *I, - ElementCount VF) const; + getDivRemSpeculationCost(Instruction *I, ElementCount VF); /// Returns true if \p I is a memory instruction with consecutive memory /// access that can be widened. @@ -1729,6 +1722,20 @@ class LoopVectorizationCostModel { /// Interface to emit optimization remarks. OptimizationRemarkEmitter *ORE; + /// A function to lazily fetch BlockFrequencyInfo. This avoids computing it + /// unless necessary, e.g. when the loop isn't legal to vectorize or when + /// there is no predication. + std::function GetBFI; + /// The BlockFrequencyInfo returned from GetBFI. + BlockFrequencyInfo *BFI = nullptr; + /// Returns the BlockFrequencyInfo for the function if cached, otherwise + /// fetches it via GetBFI. Avoids an indirect call to the std::function. + BlockFrequencyInfo &getBFI() { + if (!BFI) + BFI = &GetBFI(); + return *BFI; + } + const Function *TheFunction; /// Loop Vectorize Hint. @@ -2792,8 +2799,8 @@ void LoopVectorizationCostModel::collectLoopScalars(ElementCount VF) { Scalars[VF].insert_range(Worklist); } -bool LoopVectorizationCostModel::isScalarWithPredication( - Instruction *I, ElementCount VF) const { +bool LoopVectorizationCostModel::isScalarWithPredication(Instruction *I, + ElementCount VF) { if (!isPredicatedInst(I)) return false; @@ -2886,9 +2893,26 @@ bool LoopVectorizationCostModel::isPredicatedInst(Instruction *I) const { } } +unsigned LoopVectorizationCostModel::getPredBlockCostDivisor( + TargetTransformInfo::TargetCostKind CostKind, const BasicBlock *BB) { + if (CostKind == TTI::TCK_CodeSize) + return 1; + // If the block wasn't originally predicated then return early to avoid + // computing BlockFrequencyInfo unnecessarily. + if (!Legal->blockNeedsPredication(BB)) + return 1; + + uint64_t HeaderFreq = + getBFI().getBlockFreq(TheLoop->getHeader()).getFrequency(); + uint64_t BBFreq = getBFI().getBlockFreq(BB).getFrequency(); + assert(HeaderFreq >= BBFreq && + "Header has smaller block freq than dominated BB?"); + return std::round((double)HeaderFreq / BBFreq); +} + std::pair LoopVectorizationCostModel::getDivRemSpeculationCost(Instruction *I, - ElementCount VF) const { + ElementCount VF) { assert(I->getOpcode() == Instruction::UDiv || I->getOpcode() == Instruction::SDiv || I->getOpcode() == Instruction::SRem || @@ -9182,8 +9206,9 @@ static bool processLoopInVPlanNativePath( Loop *L, PredicatedScalarEvolution &PSE, LoopInfo *LI, DominatorTree *DT, LoopVectorizationLegality *LVL, TargetTransformInfo *TTI, TargetLibraryInfo *TLI, DemandedBits *DB, AssumptionCache *AC, - OptimizationRemarkEmitter *ORE, bool OptForSize, LoopVectorizeHints &Hints, - LoopVectorizationRequirements &Requirements) { + OptimizationRemarkEmitter *ORE, + std::function GetBFI, bool OptForSize, + LoopVectorizeHints &Hints, LoopVectorizationRequirements &Requirements) { if (isa(PSE.getBackedgeTakenCount())) { LLVM_DEBUG(dbgs() << "LV: cannot compute the outer-loop trip count\n"); @@ -9196,8 +9221,8 @@ static bool processLoopInVPlanNativePath( ScalarEpilogueLowering SEL = getScalarEpilogueLowering(F, L, Hints, OptForSize, TTI, TLI, *LVL, &IAI); - LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, F, - &Hints, IAI, OptForSize); + LoopVectorizationCostModel CM(SEL, L, PSE, LI, LVL, *TTI, TLI, DB, AC, ORE, + GetBFI, F, &Hints, IAI, OptForSize); // Use the planner for outer loop vectorization. // TODO: CM is not used at this point inside the planner. Turn CM into an // optional argument if we don't need it in the future. @@ -9897,8 +9922,10 @@ bool LoopVectorizePass::processLoop(Loop *L) { // Query this against the original loop and save it here because the profile // of the original loop header may change as the transformation happens. - bool OptForSize = llvm::shouldOptimizeForSize(L->getHeader(), PSI, BFI, - PGSOQueryType::IRPass); + bool OptForSize = llvm::shouldOptimizeForSize( + L->getHeader(), PSI, + PSI && PSI->hasProfileSummary() ? &GetBFI() : nullptr, + PGSOQueryType::IRPass); // Check if it is legal to vectorize the loop. LoopVectorizationRequirements Requirements; @@ -9932,7 +9959,8 @@ bool LoopVectorizePass::processLoop(Loop *L) { // pipeline. if (!L->isInnermost()) return processLoopInVPlanNativePath(L, PSE, LI, DT, &LVL, TTI, TLI, DB, AC, - ORE, OptForSize, Hints, Requirements); + ORE, GetBFI, OptForSize, Hints, + Requirements); assert(L->isInnermost() && "Inner loop expected."); @@ -10035,7 +10063,7 @@ bool LoopVectorizePass::processLoop(Loop *L) { // Use the cost model. LoopVectorizationCostModel CM(SEL, L, PSE, LI, &LVL, *TTI, TLI, DB, AC, ORE, - F, &Hints, IAI, OptForSize); + GetBFI, F, &Hints, IAI, OptForSize); // Use the planner for vectorization. LoopVectorizationPlanner LVP(L, LI, DT, TLI, *TTI, &LVL, CM, IAI, PSE, Hints, ORE); @@ -10353,9 +10381,9 @@ PreservedAnalyses LoopVectorizePass::run(Function &F, auto &MAMProxy = AM.getResult(F); PSI = MAMProxy.getCachedResult(*F.getParent()); - BFI = nullptr; - if (PSI && PSI->hasProfileSummary()) - BFI = &AM.getResult(F); + GetBFI = [&AM, &F]() -> BlockFrequencyInfo & { + return AM.getResult(F); + }; LoopVectorizeResult Result = runImpl(F); if (!Result.MadeAnyChange) return PreservedAnalyses::all(); diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/early_exit_costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/early_exit_costs.ll index 7ae50a5e4a075..791ef734ec48b 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/early_exit_costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/early_exit_costs.ll @@ -57,8 +57,8 @@ define i64 @same_exit_block_pre_inc_use1_nosve() { ; CHECK-NEXT: Cost of 48 for VF 16: EMIT vp<{{.*}}> = first-active-lane ir<%cmp3> ; CHECK-NEXT: Cost of 0 for VF 16: EMIT vp<{{.*}}> = add ; CHECK-NEXT: Cost of 0 for VF 16: vp<{{.*}}> = DERIVED-IV -; CHECK: LV: Minimum required TC for runtime checks to be profitable:160 -; CHECK-NEXT: LV: Vectorization is not beneficial: expected trip count < minimum profitable VF (64 < 160) +; CHECK: LV: Minimum required TC for runtime checks to be profitable:128 +; CHECK-NEXT: LV: Vectorization is not beneficial: expected trip count < minimum profitable VF (64 < 128) ; CHECK-NEXT: LV: Too many memory checks needed. entry: %p1 = alloca [1024 x i8] @@ -105,7 +105,7 @@ loop.header: %gep.src = getelementptr inbounds i64, ptr %src, i64 %iv %l = load i64, ptr %gep.src, align 1 %t = trunc i64 %l to i1 - br i1 %t, label %exit.0, label %loop.latch + br i1 %t, label %exit.0, label %loop.latch, !prof !0 loop.latch: %iv.next = add i64 %iv, 1 @@ -120,4 +120,6 @@ exit.1: ret i64 0 } +!0 = !{!"branch_weights", i32 1, i32 1} + attributes #1 = { "target-features"="+sve" vscale_range(1,16) } diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll index f67a3d9be408a..309fb6b51db54 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/predicated-costs.ll @@ -385,6 +385,59 @@ attributes #1 = { "target-cpu"="neoverse-v2" } !1 = !{!"llvm.loop.mustprogress"} !2 = !{!"llvm.loop.vectorize.predicate.enable", i1 true} !3 = !{!"llvm.loop.vectorize.enable", i1 true} + +; BFI computes if is taken 20 times, and loop 32 times. Make sure we round the +; divisor up to 2 so that we don't vectorize the loop unprofitably. +define void @round_scalar_pred_divisor(ptr %dst, double %x) { +; CHECK-LABEL: define void @round_scalar_pred_divisor( +; CHECK-SAME: ptr [[DST:%.*]], double [[X:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] +; CHECK-NEXT: [[C:%.*]] = fcmp une double [[X]], 0.000000e+00 +; CHECK-NEXT: br i1 [[C]], label %[[IF:.*]], label %[[LATCH]] +; CHECK: [[IF]]: +; CHECK-NEXT: [[TRUNC:%.*]] = trunc i64 [[IV]] to i32 +; CHECK-NEXT: [[UITOFP:%.*]] = uitofp i32 [[TRUNC]] to double +; CHECK-NEXT: [[SIN:%.*]] = tail call double @llvm.sin.f64(double [[UITOFP]]) +; CHECK-NEXT: [[FPTRUNC:%.*]] = fptrunc double [[SIN]] to float +; CHECK-NEXT: br label %[[LATCH]] +; CHECK: [[LATCH]]: +; CHECK-NEXT: [[PHI:%.*]] = phi float [ [[FPTRUNC]], %[[IF]] ], [ 0.000000e+00, %[[LOOP]] ] +; CHECK-NEXT: store float [[PHI]], ptr [[DST]], align 4 +; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 +; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 1024 +; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ 0, %entry ], [ %iv.next, %latch ] + %c = fcmp une double %x, 0.0 + br i1 %c, label %if, label %latch + +if: + %trunc = trunc i64 %iv to i32 + %uitofp = uitofp i32 %trunc to double + %sin = tail call double @llvm.sin(double %uitofp) + %fptrunc = fptrunc double %sin to float + br label %latch + +latch: + %phi = phi float [ %fptrunc, %if ], [ 0.0, %loop ] + store float %phi, ptr %dst + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, 1024 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + ;. ; CHECK: [[META0]] = !{[[META1:![0-9]+]]} ; CHECK: [[META1]] = distinct !{[[META1]], [[META2:![0-9]+]]} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll index 63348ccf94f78..66211bd0353d4 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/simple_early_exit.ll @@ -386,7 +386,7 @@ define i32 @diff_exit_block_needs_scev_check(i32 %end) { ; CHECK-NEXT: [[TMP0:%.*]] = trunc i32 [[END]] to i10 ; CHECK-NEXT: [[TMP1:%.*]] = zext i10 [[TMP0]] to i64 ; CHECK-NEXT: [[UMAX1:%.*]] = call i64 @llvm.umax.i64(i64 [[TMP1]], i64 1) -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX1]], 12 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[UMAX1]], 8 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]] ; CHECK: vector.scevcheck: ; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[END_CLAMPED]], i32 1) diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-predicated-costs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-predicated-costs.ll new file mode 100644 index 0000000000000..9dd2a2713a2ad --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-predicated-costs.ll @@ -0,0 +1,161 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 +; RUN: opt -p loop-vectorize -mtriple=aarch64 -mattr=+sve -S %s | FileCheck %s + +; The innermost block then.1 has a 25% chance of being executed according to +; BranchProbabilityInfo, but if we vectorize it then we will unconditionally +; execute it. Avoid this unprofitable vectorization by taking the nested +; probability into account in the cost model. +define void @nested(ptr noalias %p0, ptr noalias %p1, i1 %c0, i1 %c1) { +; CHECK-LABEL: define void @nested( +; CHECK-SAME: ptr noalias [[P0:%.*]], ptr noalias [[P1:%.*]], i1 [[C0:%.*]], i1 [[C1:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[X:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] +; CHECK-NEXT: br i1 [[C0]], label %[[THEN_0:.*]], label %[[LATCH]] +; CHECK: [[THEN_0]]: +; CHECK-NEXT: br i1 [[C1]], label %[[THEN_1:.*]], label %[[LATCH]] +; CHECK: [[THEN_1]]: +; CHECK-NEXT: [[GEP0:%.*]] = getelementptr i64, ptr [[P0]], i32 [[X]] +; CHECK-NEXT: [[X1:%.*]] = load i64, ptr [[GEP0]], align 8 +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[P1]], i32 [[X]] +; CHECK-NEXT: [[Y:%.*]] = load i64, ptr [[GEP1]], align 8 +; CHECK-NEXT: [[Z:%.*]] = udiv i64 [[X1]], [[Y]] +; CHECK-NEXT: store i64 [[Z]], ptr [[GEP1]], align 8 +; CHECK-NEXT: br label %[[LATCH]] +; CHECK: [[LATCH]]: +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[X]], 1 +; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ] + br i1 %c0, label %then.0, label %latch + +then.0: + br i1 %c1, label %then.1, label %latch + +then.1: + %gep0 = getelementptr i64, ptr %p0, i32 %iv + %x = load i64, ptr %gep0 + %gep1 = getelementptr i64, ptr %p1, i32 %iv + %y = load i64, ptr %gep1 + %z = udiv i64 %x, %y + store i64 %z, ptr %gep1 + br label %latch + +latch: + %iv.next = add i32 %iv, 1 + %done = icmp eq i32 %iv.next, 1024 + br i1 %done, label %exit, label %loop + +exit: + ret void +} + +; This is the same CFG as @nested above, but we have provided branch weights +; which tell BranchProbabilityInfo that then.1 will always be taken. In this +; case, we should vectorize because it is profitable. +define void @always_taken(ptr noalias %p0, ptr noalias %p1, i1 %c0, i1 %c1) { +; CHECK-LABEL: define void @always_taken( +; CHECK-SAME: ptr noalias [[P0:%.*]], ptr noalias [[P1:%.*]], i1 [[C0:%.*]], i1 [[C1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 [[TMP0]], 2 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 1024, [[TMP1]] +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vscale.i32() +; CHECK-NEXT: [[TMP5:%.*]] = mul nuw i32 [[TMP4]], 4 +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i32 1024, [[TMP5]] +; CHECK-NEXT: [[N_VEC:%.*]] = sub i32 1024, [[N_MOD_VF]] +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i1 [[C1]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i1 [[C0]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: [[TMP6:%.*]] = select [[BROADCAST_SPLAT2]], [[BROADCAST_SPLAT]], zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i64, ptr [[P0]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP8:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP7:%.*]] = shl nuw i64 [[TMP8]], 1 +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i64, ptr [[TMP10]], i64 [[TMP7]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr align 8 [[TMP10]], [[TMP6]], poison) +; CHECK-NEXT: [[WIDE_MASKED_LOAD3:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr align 8 [[TMP20]], [[TMP6]], poison) +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i64, ptr [[P1]], i32 [[INDEX]] +; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP11:%.*]] = shl nuw i64 [[TMP13]], 1 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i64, ptr [[TMP9]], i64 [[TMP11]] +; CHECK-NEXT: [[WIDE_MASKED_LOAD4:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr align 8 [[TMP9]], [[TMP6]], poison) +; CHECK-NEXT: [[WIDE_MASKED_LOAD5:%.*]] = call @llvm.masked.load.nxv2i64.p0(ptr align 8 [[TMP12]], [[TMP6]], poison) +; CHECK-NEXT: [[TMP21:%.*]] = select [[TMP6]], [[WIDE_MASKED_LOAD4]], splat (i64 1) +; CHECK-NEXT: [[TMP14:%.*]] = select [[TMP6]], [[WIDE_MASKED_LOAD5]], splat (i64 1) +; CHECK-NEXT: [[TMP15:%.*]] = udiv [[WIDE_MASKED_LOAD]], [[TMP21]] +; CHECK-NEXT: [[TMP22:%.*]] = udiv [[WIDE_MASKED_LOAD3]], [[TMP14]] +; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP15]], ptr align 8 [[TMP9]], [[TMP6]]) +; CHECK-NEXT: call void @llvm.masked.store.nxv2i64.p0( [[TMP22]], ptr align 8 [[TMP12]], [[TMP6]]) +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 1024, [[N_VEC]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT1:%.*]], %[[LATCH:.*]] ] +; CHECK-NEXT: br i1 [[C0]], label %[[THEN_0:.*]], label %[[LATCH]], !prof [[PROF3:![0-9]+]] +; CHECK: [[THEN_0]]: +; CHECK-NEXT: br i1 [[C1]], label %[[THEN_1:.*]], label %[[LATCH]], !prof [[PROF3]] +; CHECK: [[THEN_1]]: +; CHECK-NEXT: [[GEP0:%.*]] = getelementptr i64, ptr [[P0]], i32 [[IV1]] +; CHECK-NEXT: [[X:%.*]] = load i64, ptr [[GEP0]], align 8 +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i64, ptr [[P1]], i32 [[IV1]] +; CHECK-NEXT: [[Y:%.*]] = load i64, ptr [[GEP1]], align 8 +; CHECK-NEXT: [[Z:%.*]] = udiv i64 [[X]], [[Y]] +; CHECK-NEXT: store i64 [[Z]], ptr [[GEP1]], align 8 +; CHECK-NEXT: br label %[[LATCH]] +; CHECK: [[LATCH]]: +; CHECK-NEXT: [[IV_NEXT1]] = add i32 [[IV1]], 1 +; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT1]], 1024 +; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ] + br i1 %c0, label %then.0, label %latch, !prof !4 + +then.0: + br i1 %c1, label %then.1, label %latch, !prof !4 + +then.1: + %gep0 = getelementptr i64, ptr %p0, i32 %iv + %x = load i64, ptr %gep0 + %gep1 = getelementptr i64, ptr %p1, i32 %iv + %y = load i64, ptr %gep1 + %z = udiv i64 %x, %y + store i64 %z, ptr %gep1 + br label %latch + +latch: + %iv.next = add i32 %iv, 1 + %done = icmp eq i32 %iv.next, 1024 + br i1 %done, label %exit, label %loop + +exit: + ret void +} + +!4 = !{!"branch_weights", i32 1, i32 0} + diff --git a/llvm/test/Transforms/LoopVectorize/RISCV/predicated-costs.ll b/llvm/test/Transforms/LoopVectorize/RISCV/predicated-costs.ll new file mode 100644 index 0000000000000..e0ab30b0ae5cc --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/RISCV/predicated-costs.ll @@ -0,0 +1,115 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 +; RUN: opt < %s -S -p loop-vectorize -mtriple=riscv64 -mattr=+v | FileCheck %s + +; The innermost block then.1 has a 25% chance of being executed according to +; BranchProbabilityInfo, but if we vectorize it then we will unconditionally +; execute it. Avoid this unprofitable vectorization by taking the nested +; probability into account in the cost model. +define void @nested(ptr noalias %p0, ptr noalias %p1, i1 %c0, i1 %c1) { +; CHECK-LABEL: define void @nested( +; CHECK-SAME: ptr noalias [[P0:%.*]], ptr noalias [[P1:%.*]], i1 [[C0:%.*]], i1 [[C1:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: br label %[[LOOP:.*]] +; CHECK: [[LOOP]]: +; CHECK-NEXT: [[IV1:%.*]] = phi i32 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LATCH:.*]] ] +; CHECK-NEXT: br i1 [[C0]], label %[[THEN_0:.*]], label %[[LATCH]] +; CHECK: [[THEN_0]]: +; CHECK-NEXT: br i1 [[C1]], label %[[THEN_1:.*]], label %[[LATCH]] +; CHECK: [[THEN_1]]: +; CHECK-NEXT: [[GEP2:%.*]] = getelementptr i32, ptr [[P0]], i32 [[IV1]] +; CHECK-NEXT: [[X:%.*]] = load i32, ptr [[GEP2]], align 4 +; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i32, ptr [[P1]], i32 [[X]] +; CHECK-NEXT: store i32 0, ptr [[GEP1]], align 4 +; CHECK-NEXT: br label %[[LATCH]] +; CHECK: [[LATCH]]: +; CHECK-NEXT: [[IV_NEXT]] = add i32 [[IV1]], 1 +; CHECK-NEXT: [[DONE:%.*]] = icmp eq i32 [[IV_NEXT]], 1024 +; CHECK-NEXT: br i1 [[DONE]], label %[[EXIT:.*]], label %[[LOOP]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ] + br i1 %c0, label %then.0, label %latch + +then.0: + br i1 %c1, label %then.1, label %latch + +then.1: + %gep0 = getelementptr i32, ptr %p0, i32 %iv + %x = load i32, ptr %gep0 + %gep1 = getelementptr i32, ptr %p1, i32 %x + store i32 0, ptr %gep1 + br label %latch + +latch: + %iv.next = add i32 %iv, 1 + %done = icmp eq i32 %iv.next, 1024 + br i1 %done, label %exit, label %loop + +exit: + ret void +} + +; This is the same CFG as @nested above, but we have provided branch weights +; which tell BranchProbabilityInfo that then.1 will always be taken. In this +; case, we should vectorize because it is profitable. +define void @always_taken(ptr noalias %p0, ptr noalias %p1, i1 %c0, i1 %c1) { +; CHECK-LABEL: define void @always_taken( +; CHECK-SAME: ptr noalias [[P0:%.*]], ptr noalias [[P1:%.*]], i1 [[C0:%.*]], i1 [[C1:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[ENTRY:.*:]] +; CHECK-NEXT: br label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, i1 [[C1]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector [[BROADCAST_SPLATINSERT]], poison, zeroinitializer +; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement poison, i1 [[C0]], i64 0 +; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector [[BROADCAST_SPLATINSERT1]], poison, zeroinitializer +; CHECK-NEXT: [[TMP0:%.*]] = select [[BROADCAST_SPLAT2]], [[BROADCAST_SPLAT]], zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i32 [ 1024, %[[VECTOR_PH]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.experimental.get.vector.length.i32(i32 [[AVL]], i32 4, i1 true) +; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i32, ptr [[P0]], i32 [[EVL_BASED_IV]] +; CHECK-NEXT: [[VP_OP_LOAD:%.*]] = call @llvm.vp.load.nxv4i32.p0(ptr align 4 [[TMP2]], [[TMP0]], i32 [[TMP1]]) +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i32, ptr [[P1]], [[VP_OP_LOAD]] +; CHECK-NEXT: call void @llvm.vp.scatter.nxv4i32.nxv4p0( zeroinitializer, align 4 [[TMP3]], [[TMP0]], i32 [[TMP1]]) +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i32 [[TMP1]], [[EVL_BASED_IV]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw i32 [[AVL]], [[TMP1]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP4]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: br label %[[EXIT:.*]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i32 [ 0, %entry ], [ %iv.next, %latch ] + br i1 %c0, label %then.0, label %latch, !prof !0 + +then.0: + br i1 %c1, label %then.1, label %latch, !prof !0 + +then.1: + %gep0 = getelementptr i32, ptr %p0, i32 %iv + %x = load i32, ptr %gep0 + %gep1 = getelementptr i32, ptr %p1, i32 %x + store i32 0, ptr %gep1 + br label %latch + +latch: + %iv.next = add i32 %iv, 1 + %done = icmp eq i32 %iv.next, 1024 + br i1 %done, label %exit, label %loop + +exit: + ret void +} + +!0 = !{!"branch_weights", i32 1, i32 0} diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll index a727973b43511..a5578e544a364 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/std-find.ll @@ -145,7 +145,7 @@ define ptr @std_find_caller(ptr noundef %first, ptr noundef %last) { ; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[FIRST3]] ; CHECK-NEXT: [[TMP2:%.*]] = lshr exact i64 [[TMP1]], 1 ; CHECK-NEXT: [[TMP3:%.*]] = add nuw i64 [[TMP2]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 158 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 126 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[LOOP_HEADER_I_PREHEADER2:.*]], label %[[VECTOR_PH:.*]] ; CHECK: [[VECTOR_PH]]: ; CHECK-NEXT: [[XTRAITER:%.*]] = and i64 [[TMP3]], -8 diff --git a/llvm/test/Transforms/PhaseOrdering/loop-vectorize-bfi.ll b/llvm/test/Transforms/PhaseOrdering/loop-vectorize-bfi.ll new file mode 100644 index 0000000000000..6183a2679c3aa --- /dev/null +++ b/llvm/test/Transforms/PhaseOrdering/loop-vectorize-bfi.ll @@ -0,0 +1,61 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 +; REQUIRES: riscv-registered-target +; RUN: opt -p 'lto' -mtriple riscv64 -mattr=+v -S < %s | FileCheck %s + +; Test that BlockFrequencyInfo is invalidated after loop passes, so it's +; accurate whenever LoopVectorize uses it. LoopVectorizer requires that +; innermost loop headers have a greater than or equal to frequency than any +; block it dominates. + +define void @f(i1 %x) !prof !0 { +; CHECK-LABEL: define void @f( +; CHECK-SAME: i1 [[X:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {{.*}}{ +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[DOTSCALAR:%.*]] = xor i1 [[X]], true +; CHECK-NEXT: [[TMP1:%.*]] = insertelement poison, i1 [[DOTSCALAR]], i64 0 +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector [[TMP1]], poison, zeroinitializer +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[EVL_BASED_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_EVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[AVL:%.*]] = phi i64 [ 65, %[[ENTRY]] ], [ [[AVL_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @llvm.experimental.get.vector.length.i64(i64 [[AVL]], i32 2, i1 true) +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr null, i64 [[EVL_BASED_IV]] +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0( poison, ptr align 8 [[TMP4]], [[TMP2]], i32 [[TMP3]]) +; CHECK-NEXT: call void @llvm.vp.store.nxv2i64.p0( poison, ptr align 8 [[TMP4]], [[TMP2]], i32 [[TMP3]]) +; CHECK-NEXT: [[TMP5:%.*]] = zext nneg i32 [[TMP3]] to i64 +; CHECK-NEXT: [[INDEX_EVL_NEXT]] = add nuw i64 [[EVL_BASED_IV]], [[TMP5]] +; CHECK-NEXT: [[AVL_NEXT]] = sub nuw nsw i64 [[AVL]], [[TMP5]] +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[AVL_NEXT]], 0 +; CHECK-NEXT: br i1 [[TMP6]], label %[[EXIT:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: ret void +; +entry: + br label %loop + +loop: + %iv = phi i64 [ %iv.next, %latch ], [ 0, %entry ] + %gep = getelementptr i64, ptr null, i64 %iv + br label %foo + +foo: + %phi = phi i1 [ false, %loop ], [ true, %baz ] + br i1 %x, label %baz, label %bar + +bar: + store i64 0, ptr %gep + br label %baz + +baz: + br i1 %phi, label %latch, label %foo + +latch: + %iv.next = add i64 %iv, 1 + %ec = icmp eq i64 %iv, 64 + br i1 %ec, label %exit, label %loop + +exit: + ret void +} + +!0 = !{!"function_entry_count", i64 1}