diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index b603bbe55dc9a..a18b21c5d84da 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -264,11 +264,6 @@ static cl::opt EnableMaskedInterleavedMemAccesses( "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); -static cl::opt TinyTripCountInterleaveThreshold( - "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, - cl::desc("We don't interleave loops with a estimated constant trip count " - "below this number")); - static cl::opt ForceTargetNumScalarRegs( "force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers.")); @@ -316,12 +311,6 @@ static cl::opt EnableLoadStoreRuntimeInterleave( cl::desc( "Enable runtime interleaving until load/store ports are saturated")); -/// Interleave small loops with scalar reductions. -static cl::opt InterleaveSmallLoopScalarReduction( - "interleave-small-loop-scalar-reduction", cl::init(false), cl::Hidden, - cl::desc("Enable interleaving for loops with small iteration counts that " - "contain scalar reductions to expose ILP.")); - /// The number of stores in a loop that are allowed to need predication. static cl::opt NumberOfStoresToPredicate( "vectorize-num-stores-pred", cl::init(1), cl::Hidden, @@ -5823,14 +5812,6 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); const bool HasReductions = !Legal->getReductionVars().empty(); - // Do not interleave loops with a relatively small known or estimated trip - // count. But we will interleave when InterleaveSmallLoopScalarReduction is - // enabled, and the code has scalar reductions(HasReductions && VF = 1), - // because with the above conditions interleaving can expose ILP and break - // cross iteration dependences for reductions. - if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && - !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) - return 1; // If we did not calculate the cost for VF (because the user selected the VF) // then we calculate the cost of VF here. @@ -5903,21 +5884,58 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, MaxInterleaveCount = ForceTargetMaxVectorInterleaveFactor; } - // If trip count is known or estimated compile time constant, limit the - // interleave count to be less than the trip count divided by VF, provided it - // is at least 1. - // - // For scalable vectors we can't know if interleaving is beneficial. It may - // not be beneficial for small loops if none of the lanes in the second vector - // iterations is enabled. However, for larger loops, there is likely to be a - // similar benefit as for fixed-width vectors. For now, we choose to leave - // the InterleaveCount as if vscale is '1', although if some information about - // the vector is known (e.g. min vector size), we can make a better decision. - if (BestKnownTC) { - MaxInterleaveCount = - std::min(*BestKnownTC / VF.getKnownMinValue(), MaxInterleaveCount); - // Make sure MaxInterleaveCount is greater than 0. - MaxInterleaveCount = std::max(1u, MaxInterleaveCount); + unsigned EstimatedVF = VF.getKnownMinValue(); + if (VF.isScalable()) { + if (std::optional VScale = getVScaleForTuning(TheLoop, TTI)) + EstimatedVF *= *VScale; + } + assert(EstimatedVF >= 1 && "Estimated VF shouldn't be less than 1"); + + unsigned KnownTC = PSE.getSE()->getSmallConstantTripCount(TheLoop); + if (KnownTC > 0) { + // At least one iteration must be scalar when this constraint holds. So the + // maximum available iterations for interleaving is one less. + unsigned AvailableTC = + requiresScalarEpilogue(VF.isVector()) ? KnownTC - 1 : KnownTC; + + // If trip count is known we select between two prospective ICs, where + // 1) the aggressive IC is capped by the trip count divided by VF + // 2) the conservative IC is capped by the trip count divided by (VF * 2) + // The final IC is selected in a way that the epilogue loop trip count is + // minimized while maximizing the IC itself, so that we either run the + // vector loop at least once if it generates a small epilogue loop, or else + // we run the vector loop at least twice. + + unsigned InterleaveCountUB = bit_floor( + std::max(1u, std::min(AvailableTC / EstimatedVF, MaxInterleaveCount))); + unsigned InterleaveCountLB = bit_floor(std::max( + 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount))); + MaxInterleaveCount = InterleaveCountLB; + + if (InterleaveCountUB != InterleaveCountLB) { + unsigned TailTripCountUB = + (AvailableTC % (EstimatedVF * InterleaveCountUB)); + unsigned TailTripCountLB = + (AvailableTC % (EstimatedVF * InterleaveCountLB)); + // If both produce same scalar tail, maximize the IC to do the same work + // in fewer vector loop iterations + if (TailTripCountUB == TailTripCountLB) + MaxInterleaveCount = InterleaveCountUB; + } + } else if (BestKnownTC && *BestKnownTC > 0) { + // At least one iteration must be scalar when this constraint holds. So the + // maximum available iterations for interleaving is one less. + unsigned AvailableTC = requiresScalarEpilogue(VF.isVector()) + ? (*BestKnownTC) - 1 + : *BestKnownTC; + + // If trip count is an estimated compile time constant, limit the + // IC to be capped by the trip count divided by VF * 2, such that the vector + // loop runs at least twice to make interleaving seem profitable when there + // is an epilogue loop present. Since exact Trip count is not known we + // choose to be conservative in our IC estimate. + MaxInterleaveCount = bit_floor(std::max( + 1u, std::min(AvailableTC / (EstimatedVF * 2), MaxInterleaveCount))); } assert(MaxInterleaveCount > 0 && @@ -6021,8 +6039,7 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, // If there are scalar reductions and TTI has enabled aggressive // interleaving for reductions, we will interleave to expose ILP. - if (InterleaveSmallLoopScalarReduction && VF.isScalar() && - AggressivelyInterleaveReductions) { + if (VF.isScalar() && AggressivelyInterleaveReductions) { LLVM_DEBUG(dbgs() << "LV: Interleaving to expose ILP.\n"); // Interleave no less than SmallIC but not as aggressive as the normal IC // to satisfy the rare situation when resources are too limited. diff --git a/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll b/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll index d1cff14c3f4b7..97ea2c6708dad 100644 --- a/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll +++ b/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll @@ -1,3 +1,4 @@ +; REQUIRES: x86-registered-target ; RUN: opt -aa-pipeline=basic-aa -passes=loop-distribute -enable-loop-distribute -verify-loop-info -verify-dom-info -S \ ; RUN: < %s | FileCheck %s @@ -79,6 +80,8 @@ entry: ; VECTORIZE: mul <4 x i32> +; VECTORIZE: mul <4 x i32> +; VECTORIZE-NOT: mul <4 x i32> for.body: ; preds = %for.body, %entry %ind = phi i64 [ 0, %entry ], [ %add, %for.body ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_estimated_tc.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_estimated_tc.ll new file mode 100644 index 0000000000000..886ee521d274d --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_estimated_tc.ll @@ -0,0 +1,349 @@ +; RUN: opt < %s -force-target-max-vector-interleave=8 -p loop-vectorize -S -pass-remarks=loop-vectorize -disable-output 2>&1 | FileCheck %s +; RUN: opt < %s -force-target-max-vector-interleave=8 -p loop-vectorize -S 2>&1 | FileCheck %s -check-prefix=CHECK-IR + +target triple = "aarch64-linux-gnu" + +%pair = type { i8, i8 } + +; For a loop with a profile-guided estimated TC of 32, when the auto-vectorizer chooses VF 16, +; it should conservatively choose IC 1 so that the vector loop runs twice at least +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 1) +define void @loop_with_profile_tc_32(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !0 + +for.end: + ret void +} + +; For a loop with a profile-guided estimated TC of 33, when the auto-vectorizer chooses VF 16, +; it should conservatively choose IC 1 so that the vector loop runs twice at least +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 1) +define void @loop_with_profile_tc_33(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !1 + +for.end: + ret void +} + +; For a loop with a profile-guided estimated TC of 48, when the auto-vectorizer chooses VF 16, +; it should conservatively choose IC 1 so that the vector loop runs twice at least +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 1) +define void @loop_with_profile_tc_48(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !2 + +for.end: + ret void +} + +; For a loop with a profile-guided estimated TC of 63, when the auto-vectorizer chooses VF 16, +; it should conservatively choose IC 1 so that the vector loop runs twice at least +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 1) +define void @loop_with_profile_tc_63(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !3 + +for.end: + ret void +} + +; For a loop with a profile-guided estimated TC of 64, when the auto-vectorizer chooses VF 16, +; it should choose conservatively IC 2 so that the vector loop runs twice at least +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 2) +define void @loop_with_profile_tc_64(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !4 + +for.end: + ret void +} + +; This has the same profile-guided estimated trip count as loop_with_profile_tc_64 but since the +; resulting interleaved group in this case may access memory out-of-bounds, it requires a scalar +; epilogue iteration for correctness, making at most 63 iterations available for interleaving. +; When the auto-vectorizer chooses VF 16, it should choose IC 1 to leave a smaller scalar +; remainder than IC 2 +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 1) +define void @loop_with_profile_tc_64_scalar_epilogue_reqd(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %gep.src = getelementptr inbounds [3 x i8], ptr %p, i64 %i, i64 0 + %l = load i8, ptr %gep.src, align 1 + %gep.dst = getelementptr inbounds i8, ptr %q, i64 %i + store i8 %l, ptr %gep.dst, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !4 + +for.end: + ret void +} + +; For a loop with a profile-guided estimated TC of 100, when the auto-vectorizer chooses VF 16, +; it should choose conservatively IC 2 so that the vector loop runs twice at least +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 2) +define void @loop_with_profile_tc_100(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !5 + +for.end: + ret void +} + +; For a loop with a profile-guided estimated TC of 128, when the auto-vectorizer chooses VF 16, +; it should choose conservatively IC 4 so that the vector loop runs twice at least +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 4) +define void @loop_with_profile_tc_128(ptr noalias %p, ptr noalias %q, i64 %n) { +; CHECK-IR-LABEL: define void @loop_with_profile_tc_128( +; CHECK-IR-SAME: ptr noalias [[P:%.*]], ptr noalias [[Q:%.*]], i64 [[N:%.*]]) { +; CHECK-IR-NEXT: iter.check: +; CHECK-IR-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8 +; CHECK-IR-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; CHECK-IR: vector.main.loop.iter.check: +; CHECK-IR-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[N]], 64 +; CHECK-IR-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !6 + +for.end: + ret void +} + +; This has the same profile-guided estimated trip count as loop_with_profile_tc_128 but since +; the resulting interleaved group in this case may access memory out-of-bounds, it requires +; a scalar epilogue iteration for correctness, making at most 127 iterations available for +; interleaving. +; When the auto-vectorizer chooses VF 16, it should choose IC 2 to leave a smaller scalar +; remainder than IC 4 +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 2) +define void @loop_with_profile_tc_128_scalar_epilogue_reqd(ptr noalias %p, ptr noalias %q, i64 %n) { +; CHECK-IR-LABEL: define void @loop_with_profile_tc_128_scalar_epilogue_reqd( +; CHECK-IR-SAME: ptr noalias [[P:%.*]], ptr noalias [[Q:%.*]], i64 [[N:%.*]]) { +; CHECK-IR-NEXT: iter.check: +; CHECK-IR-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[N]], 8 +; CHECK-IR-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] +; CHECK-IR: vector.main.loop.iter.check: +; CHECK-IR-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ule i64 [[N]], 32 +; CHECK-IR-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] +; +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %gep.src = getelementptr inbounds [3 x i8], ptr %p, i64 %i, i64 0 + %l = load i8, ptr %gep.src, align 1 + %gep.dst = getelementptr inbounds i8, ptr %q, i64 %i + store i8 %l, ptr %gep.dst, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !6 + +for.end: + ret void +} + +; For a loop with a profile-guided estimated TC of 129, when the auto-vectorizer chooses VF 16, +; it should choose conservatively IC 4 so that the vector loop runs twice at least +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 4) +define void @loop_with_profile_tc_129(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !7 + +for.end: + ret void +} + +; For a loop with a profile-guided estimated TC of 180, when the auto-vectorizer chooses VF 16, +; it should choose conservatively IC 4 so that the vector loop runs twice at least +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 4) +define void @loop_with_profile_tc_180(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !8 + +for.end: + ret void +} + +; For a loop with a profile-guided estimated TC of 193, when the auto-vectorizer chooses VF 16, +; it should choose conservatively IC 4 so that the vector loop runs twice at least +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 4) +define void @loop_with_profile_tc_193(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !9 + +for.end: + ret void +} + +; For a loop with a profile-guided estimated TC of 1000, when the auto-vectorizer chooses VF 16, +; the IC will be capped by the target-specific maximum interleave count +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 8) +define void @loop_with_profile_tc_1000(ptr noalias %p, ptr noalias %q, i64 %n) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, %n + br i1 %cond, label %for.end, label %for.body, !prof !10 + +for.end: + ret void +} + +!0 = !{!"branch_weights", i32 1, i32 31} +!1 = !{!"branch_weights", i32 1, i32 32} +!2 = !{!"branch_weights", i32 1, i32 47} +!3 = !{!"branch_weights", i32 1, i32 62} +!4 = !{!"branch_weights", i32 1, i32 63} +!5 = !{!"branch_weights", i32 1, i32 99} +!6 = !{!"branch_weights", i32 1, i32 127} +!7 = !{!"branch_weights", i32 1, i32 128} +!8 = !{!"branch_weights", i32 1, i32 179} +!9 = !{!"branch_weights", i32 1, i32 192} +!10 = !{!"branch_weights", i32 1, i32 999} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_known_tc.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_known_tc.ll new file mode 100644 index 0000000000000..a3b45f7b5ca51 --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_known_tc.ll @@ -0,0 +1,351 @@ +; RUN: opt < %s -force-target-max-vector-interleave=8 -p loop-vectorize -S -pass-remarks=loop-vectorize -disable-output 2>&1 | FileCheck %s +; RUN: opt < %s -force-target-max-vector-interleave=8 -p loop-vectorize -S 2>&1 | FileCheck %s -check-prefix=CHECK-IR + +target triple = "aarch64-linux-gnu" + +%pair = type { i8, i8 } + +; For this loop with known TC of 32, when the auto-vectorizer chooses VF 16, it should choose +; IC 2 since there is no remainder loop run needed after the vector loop runs. +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 2) +define void @loop_with_tc_32(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 32 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; This has the same trip count as loop_with_tc_32 but since the resulting interleaved group +; in this case may access memory out-of-bounds, it requires a scalar epilogue iteration for +; correctness, making at most 31 iterations available for interleaving. +; When the auto-vectorizer chooses VF 16, it should choose IC 1 to leave a smaller scalar remainder +; than IC 2 +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 1) +define void @loop_with_tc_32_scalar_epilogue_reqd(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %gep.src = getelementptr inbounds [3 x i8], ptr %p, i64 %i, i64 0 + %l = load i8, ptr %gep.src, align 1 + %gep.dst = getelementptr inbounds i8, ptr %q, i64 %i + store i8 %l, ptr %gep.dst, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 32 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For this loop with known TC of 33, when the auto-vectorizer chooses VF 16, it should choose +; IC 2 since there is a small remainder loop TC that needs to run after the vector loop. +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 2) +define void @loop_with_tc_33(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 33 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For this loop with known TC of 39, when the auto-vectorizer chooses VF 16, it should choose +; IC 2 since there is a small remainder loop that needs to run after the vector loop. +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 2) +define void @loop_with_tc_39(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 39 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For this loop with known TC of 48, when the auto-vectorizer chooses VF 16, it should choose +; IC 1 since there will be no remainder loop that needs to run after the vector loop. +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 1) +define void @loop_with_tc_48(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 48 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For this loop with known TC of 49, when the auto-vectorizer chooses VF 16, it should choose +; IC 1 since a remainder loop TC of 1 is more efficient than remainder loop TC of 17 with IC 2 +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 1) +define void @loop_with_tc_49(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 49 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For this loop with known TC of 55, when the auto-vectorizer chooses VF 16, it should choose +; IC 1 since a remainder loop TC of 7 is more efficient than remainder loop TC of 23 with IC 2 +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 1) +define void @loop_with_tc_55(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 55 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For this loop with known TC of 100, when the auto-vectorizer chooses VF 16, it should choose +; IC 2 since a remainder loop TC of 4 is more efficient than remainder loop TC of 36 with IC 4 +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 2) +define void @loop_with_tc_100(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 100 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For this loop with known TC of 128, when the auto-vectorizer chooses VF 16, it should choose +; IC 8 since there is no remainder loop run needed after the vector loop runs +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 8) +define void @loop_with_tc_128(ptr noalias %p, ptr noalias %q) { +; CHECK-IR-LABEL: define void @loop_with_tc_128( +; CHECK-IR-SAME: ptr noalias [[P:%.*]], ptr noalias [[Q:%.*]]) { +; CHECK-IR-NEXT: entry: +; CHECK-IR-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 128 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; This has the same trip count as loop_with_tc_128 but since the resulting interleaved group +; in this case may access memory out-of-bounds, it requires a scalar epilogue iteration for +; correctness, making at most 127 iterations available for interleaving. +; Like loop_with_tc_128, the entry block should branch into the vector loop, instead of the scalar epilogue. +; When the auto-vectorizer chooses VF 16, it should choose IC 2 to leave a smaller scalar +; remainder than IC 4 +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 2) +define void @loop_with_tc_128_scalar_epilogue_reqd(ptr noalias %p, ptr noalias %q) { +; CHECK-IR-LABEL: define void @loop_with_tc_128_scalar_epilogue_reqd( +; CHECK-IR-SAME: ptr noalias [[P:%.*]], ptr noalias [[Q:%.*]]) { +; CHECK-IR-NEXT: entry: +; CHECK-IR-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %gep.src = getelementptr inbounds [3 x i8], ptr %p, i64 %i, i64 0 + %l = load i8, ptr %gep.src, align 1 + %gep.dst = getelementptr inbounds i8, ptr %q, i64 %i + store i8 %l, ptr %gep.dst, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 128 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For this loop with known TC of 129, when the auto-vectorizer chooses VF 16, it should choose +; IC 8 since there is a small remainder loop that needs to run after the vector loop +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 8) +define void @loop_with_tc_129(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 129 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For this loop with known TC of 180, when the auto-vectorizer chooses VF 16, it should choose +; IC 8 since the remainder loop of TC 52 cannot be reduced by choosing IC 4 +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 8) +define void @loop_with_tc_180(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 180 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For this loop with known TC of 193, when the auto-vectorizer chooses VF 16, it should choose +; IC 4 since a remainder loop TC of 1 is more efficient than remainder loop TC of 65 with IC 8 +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 4) +define void @loop_with_tc_193(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 193 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} + +; For a loop with high known TC of 1000, when the auto-vectorizer chooses VF 16, the IC will +; be capped by the target-specific maximum interleave count +; CHECK: remark: :0:0: vectorized loop (vectorization width: 16, interleaved count: 8) +define void @loop_with_tc_1000(ptr noalias %p, ptr noalias %q) { +entry: + br label %for.body + +for.body: + %i = phi i64 [ 0, %entry ], [ %i.next, %for.body ] + %tmp0 = getelementptr %pair, ptr %p, i64 %i, i32 0 + %tmp1 = load i8, ptr %tmp0, align 1 + %tmp2 = getelementptr %pair, ptr %p, i64 %i, i32 1 + %tmp3 = load i8, ptr %tmp2, align 1 + %add = add i8 %tmp1, %tmp3 + %qi = getelementptr i8, ptr %q, i64 %i + store i8 %add, ptr %qi, align 1 + %i.next = add nuw nsw i64 %i, 1 + %cond = icmp eq i64 %i.next, 1000 + br i1 %cond, label %for.end, label %for.body + +for.end: + ret void +} diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll index 01657493e9d4a..99e348a65eaf8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll @@ -19,17 +19,30 @@ define void @induction_i7(ptr %dst) #0 { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ] -; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP11:%.*]] = add [[VEC_IND]], zeroinitializer -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP10]] -; CHECK-NEXT: [[EXT:%.+]] = zext [[TMP11]] to -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i32 0 -; CHECK-NEXT: store [[EXT]], ptr [[TMP13]], align 8 -; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], -; +; CHECK-NEXT: [[STEP_ADD:%.*]] = add [[VEC_IND]], [[DOTSPLAT:%.*]] +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2 +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 0 +; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 1 +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], [[TMP17]] +; CHECK-NEXT: [[TMP19:%.*]] = add [[VEC_IND]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = add [[STEP_ADD]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP18]] +; CHECK-NEXT: [[TMP23:%.*]] = zext [[TMP19]] to +; CHECK-NEXT: [[TMP24:%.*]] = zext [[TMP20]] to +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i32 0 +; CHECK-NEXT: store [[TMP23:%.*]], ptr [[TMP25]], align 8 +; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 2 +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i64 [[TMP27]] +; CHECK-NEXT: store [[TMP24]], ptr [[TMP28]], align 8 +; CHECK-NEXT: [[TMP29:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP30]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[STEP_ADD]], [[DOTSPLAT]] + entry: br label %for.body @@ -61,20 +74,30 @@ define void @induction_i3_zext(ptr %dst) #0 { ; CHECK: [[TMP5:%.*]] = trunc [[TMP4]] to ; CHECK-NEXT: [[TMP6:%.*]] = add [[TMP5]], zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = mul [[TMP6]], shufflevector ( insertelement ( poison, i3 1, i64 0), poison, zeroinitializer) -; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP7]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ] -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP10:%.*]] = zext [[VEC_IND]] to -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i32 0 -; CHECK-NEXT: store [[TMP10]], ptr [[TMP13]], align 8 -; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], -; +; CHECK-NEXT: [[STEP_ADD:%.*]] = add [[VEC_IND]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2 +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 0 +; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 1 +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], [[TMP17]] +; CHECK-NEXT: [[TMP19:%.*]] = zext [[VEC_IND]] to +; CHECK-NEXT: [[TMP20:%.*]] = zext [[STEP_ADD]] to +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP18]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i32 0 +; CHECK-NEXT: store [[TMP19]], ptr [[TMP23]], align 8 +; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[TMP24]], 2 +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i64 [[TMP25]] +; CHECK-NEXT: store [[TMP20]], ptr [[TMP26]], align 8 +; CHECK-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP28:%.*]] = mul i64 [[TMP27]], 4 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP28]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[STEP_ADD]], [[DOTSPLAT]] entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll index 7121c85dd59be..c12b3b122ba74 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/interleave_IC.ll @@ -1,5 +1,5 @@ -; RUN: opt < %s -passes=loop-vectorize -S -mcpu=pwr9 -interleave-small-loop-scalar-reduction=true 2>&1 | FileCheck %s -; RUN: opt < %s -passes='loop-vectorize' -S -mcpu=pwr9 -interleave-small-loop-scalar-reduction=true 2>&1 | FileCheck %s +; RUN: opt < %s -passes=loop-vectorize -S -mcpu=pwr9 2>&1 | FileCheck %s +; RUN: opt < %s -passes='loop-vectorize' -S -mcpu=pwr9 2>&1 | FileCheck %s ; CHECK-LABEL: vector.body ; CHECK: load double, ptr diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/large-loop-rdx.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/large-loop-rdx.ll index a83bf7d41569f..ba073dc1590d1 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/large-loop-rdx.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/large-loop-rdx.ll @@ -8,10 +8,6 @@ ; CHECK-NEXT: fadd ; CHECK-NEXT: fadd ; CHECK-NEXT: fadd -; CHECK-NEXT: fadd -; CHECK-NEXT: fadd -; CHECK-NEXT: fadd -; CHECK-NEXT: fadd ; CHECK-NEXT: = ; CHECK-NOT: fadd ; CHECK-SAME: > diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll index a448b3086c449..0bee21a481d06 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/optimal-epilog-vectorization.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3 ; RUN: opt < %s -passes='loop-vectorize' -enable-epilogue-vectorization -epilogue-vectorization-force-VF=2 -S | FileCheck %s --check-prefix VF-TWO-CHECK ; RUN: opt < %s -passes='loop-vectorize' -enable-epilogue-vectorization -epilogue-vectorization-force-VF=4 -S | FileCheck %s --check-prefix VF-FOUR-CHECK @@ -6,19 +7,20 @@ target triple = "powerpc64le-unknown-linux-gnu" ; Function Attrs: nounwind define dso_local void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 signext %N) #0 { -; VF-TWO-CHECK-LABEL: @f1( +; VF-TWO-CHECK-LABEL: define dso_local void @f1( +; VF-TWO-CHECK-SAME: ptr noalias [[AA:%.*]], ptr noalias [[BB:%.*]], ptr noalias [[CC:%.*]], i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; VF-TWO-CHECK-NEXT: entry: -; VF-TWO-CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[N:%.*]], 0 +; VF-TWO-CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[N]], 0 ; VF-TWO-CHECK-NEXT: br i1 [[CMP1]], label [[ITER_CHECK:%.*]], label [[FOR_END:%.*]] ; VF-TWO-CHECK: iter.check: ; VF-TWO-CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64 ; VF-TWO-CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 2 ; VF-TWO-CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; VF-TWO-CHECK: vector.main.loop.iter.check: -; VF-TWO-CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 48 +; VF-TWO-CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 32 ; VF-TWO-CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; VF-TWO-CHECK: vector.ph: -; VF-TWO-CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 48 +; VF-TWO-CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 32 ; VF-TWO-CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; VF-TWO-CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; VF-TWO-CHECK: vector.body: @@ -31,133 +33,89 @@ define dso_local void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 ; VF-TWO-CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 20 ; VF-TWO-CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 24 ; VF-TWO-CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 28 -; VF-TWO-CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 32 -; VF-TWO-CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 36 -; VF-TWO-CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 40 -; VF-TWO-CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 44 -; VF-TWO-CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[BB:%.*]], i64 [[TMP0]] -; VF-TWO-CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP1]] -; VF-TWO-CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP2]] -; VF-TWO-CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP3]] -; VF-TWO-CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP4]] -; VF-TWO-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP5]] -; VF-TWO-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP6]] -; VF-TWO-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP7]] -; VF-TWO-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP8]] -; VF-TWO-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP9]] -; VF-TWO-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP10]] -; VF-TWO-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP11]] -; VF-TWO-CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 0 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP24]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 4 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP26]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 8 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP28]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 12 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP30]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 16 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP32]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 20 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP34]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 24 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP36]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 28 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP38]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 32 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP40]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 36 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP42]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 40 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP44]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 44 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP46]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[CC:%.*]], i64 [[TMP0]] -; VF-TWO-CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP1]] -; VF-TWO-CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP2]] -; VF-TWO-CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP3]] -; VF-TWO-CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP4]] -; VF-TWO-CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP5]] -; VF-TWO-CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP6]] -; VF-TWO-CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP7]] -; VF-TWO-CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP8]] -; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP9]] -; VF-TWO-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP10]] -; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP11]] -; VF-TWO-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 0 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD13:%.*]] = load <4 x float>, ptr [[TMP60]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 4 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <4 x float>, ptr [[TMP62]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 8 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD15:%.*]] = load <4 x float>, ptr [[TMP64]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 12 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD16:%.*]] = load <4 x float>, ptr [[TMP66]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 16 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD17:%.*]] = load <4 x float>, ptr [[TMP68]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 20 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD18:%.*]] = load <4 x float>, ptr [[TMP70]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 24 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD19:%.*]] = load <4 x float>, ptr [[TMP72]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 28 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD20:%.*]] = load <4 x float>, ptr [[TMP74]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP76:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 32 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD21:%.*]] = load <4 x float>, ptr [[TMP76]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 36 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD22:%.*]] = load <4 x float>, ptr [[TMP78]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 40 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD23:%.*]] = load <4 x float>, ptr [[TMP80]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP82:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 44 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <4 x float>, ptr [[TMP82]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP84:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD13]] -; VF-TWO-CHECK-NEXT: [[TMP85:%.*]] = fadd fast <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD14]] -; VF-TWO-CHECK-NEXT: [[TMP86:%.*]] = fadd fast <4 x float> [[WIDE_LOAD3]], [[WIDE_LOAD15]] -; VF-TWO-CHECK-NEXT: [[TMP87:%.*]] = fadd fast <4 x float> [[WIDE_LOAD4]], [[WIDE_LOAD16]] -; VF-TWO-CHECK-NEXT: [[TMP88:%.*]] = fadd fast <4 x float> [[WIDE_LOAD5]], [[WIDE_LOAD17]] -; VF-TWO-CHECK-NEXT: [[TMP89:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD18]] -; VF-TWO-CHECK-NEXT: [[TMP90:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD19]] -; VF-TWO-CHECK-NEXT: [[TMP91:%.*]] = fadd fast <4 x float> [[WIDE_LOAD8]], [[WIDE_LOAD20]] -; VF-TWO-CHECK-NEXT: [[TMP92:%.*]] = fadd fast <4 x float> [[WIDE_LOAD9]], [[WIDE_LOAD21]] -; VF-TWO-CHECK-NEXT: [[TMP93:%.*]] = fadd fast <4 x float> [[WIDE_LOAD10]], [[WIDE_LOAD22]] -; VF-TWO-CHECK-NEXT: [[TMP94:%.*]] = fadd fast <4 x float> [[WIDE_LOAD11]], [[WIDE_LOAD23]] -; VF-TWO-CHECK-NEXT: [[TMP95:%.*]] = fadd fast <4 x float> [[WIDE_LOAD12]], [[WIDE_LOAD24]] -; VF-TWO-CHECK-NEXT: [[TMP96:%.*]] = getelementptr inbounds float, ptr [[AA:%.*]], i64 [[TMP0]] -; VF-TWO-CHECK-NEXT: [[TMP97:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP1]] -; VF-TWO-CHECK-NEXT: [[TMP98:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP2]] -; VF-TWO-CHECK-NEXT: [[TMP99:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP3]] -; VF-TWO-CHECK-NEXT: [[TMP100:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP4]] -; VF-TWO-CHECK-NEXT: [[TMP101:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP5]] -; VF-TWO-CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP6]] -; VF-TWO-CHECK-NEXT: [[TMP103:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP7]] -; VF-TWO-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP8]] -; VF-TWO-CHECK-NEXT: [[TMP105:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP9]] -; VF-TWO-CHECK-NEXT: [[TMP106:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP10]] -; VF-TWO-CHECK-NEXT: [[TMP107:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP11]] -; VF-TWO-CHECK-NEXT: [[TMP108:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 0 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP84]], ptr [[TMP108]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 4 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP85]], ptr [[TMP110]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP112:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 8 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP86]], ptr [[TMP112]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 12 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP87]], ptr [[TMP114]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP116:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 16 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP88]], ptr [[TMP116]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP118:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 20 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP89]], ptr [[TMP118]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP120:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 24 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP90]], ptr [[TMP120]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP122:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 28 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP91]], ptr [[TMP122]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP124:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 32 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP92]], ptr [[TMP124]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP126:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 36 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP93]], ptr [[TMP126]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP128:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 40 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP94]], ptr [[TMP128]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP130:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 44 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP95]], ptr [[TMP130]], align 4 -; VF-TWO-CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 48 -; VF-TWO-CHECK-NEXT: [[TMP132:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; VF-TWO-CHECK-NEXT: br i1 [[TMP132]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOPID_MV:![0-9]+]] +; VF-TWO-CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP0]] +; VF-TWO-CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP1]] +; VF-TWO-CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP2]] +; VF-TWO-CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP3]] +; VF-TWO-CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP4]] +; VF-TWO-CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP5]] +; VF-TWO-CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP6]] +; VF-TWO-CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP7]] +; VF-TWO-CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 0 +; VF-TWO-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 4 +; VF-TWO-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 8 +; VF-TWO-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 12 +; VF-TWO-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 16 +; VF-TWO-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 20 +; VF-TWO-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 24 +; VF-TWO-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 28 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP16]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP17]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP18]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP19]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP20]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP21]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP22]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP23]], align 4 +; VF-TWO-CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP0]] +; VF-TWO-CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP1]] +; VF-TWO-CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP2]] +; VF-TWO-CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP3]] +; VF-TWO-CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP4]] +; VF-TWO-CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP5]] +; VF-TWO-CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP6]] +; VF-TWO-CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP7]] +; VF-TWO-CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 0 +; VF-TWO-CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 4 +; VF-TWO-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 8 +; VF-TWO-CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 12 +; VF-TWO-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 16 +; VF-TWO-CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 20 +; VF-TWO-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 24 +; VF-TWO-CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 28 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP32]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP33]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP34]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP35]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD13:%.*]] = load <4 x float>, ptr [[TMP36]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <4 x float>, ptr [[TMP37]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD15:%.*]] = load <4 x float>, ptr [[TMP38]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD16:%.*]] = load <4 x float>, ptr [[TMP39]], align 4 +; VF-TWO-CHECK-NEXT: [[TMP40:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD9]] +; VF-TWO-CHECK-NEXT: [[TMP41:%.*]] = fadd fast <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD10]] +; VF-TWO-CHECK-NEXT: [[TMP42:%.*]] = fadd fast <4 x float> [[WIDE_LOAD3]], [[WIDE_LOAD11]] +; VF-TWO-CHECK-NEXT: [[TMP43:%.*]] = fadd fast <4 x float> [[WIDE_LOAD4]], [[WIDE_LOAD12]] +; VF-TWO-CHECK-NEXT: [[TMP44:%.*]] = fadd fast <4 x float> [[WIDE_LOAD5]], [[WIDE_LOAD13]] +; VF-TWO-CHECK-NEXT: [[TMP45:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD14]] +; VF-TWO-CHECK-NEXT: [[TMP46:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD15]] +; VF-TWO-CHECK-NEXT: [[TMP47:%.*]] = fadd fast <4 x float> [[WIDE_LOAD8]], [[WIDE_LOAD16]] +; VF-TWO-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP0]] +; VF-TWO-CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP1]] +; VF-TWO-CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP2]] +; VF-TWO-CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP3]] +; VF-TWO-CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP4]] +; VF-TWO-CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP5]] +; VF-TWO-CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP6]] +; VF-TWO-CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP7]] +; VF-TWO-CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 0 +; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 4 +; VF-TWO-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 8 +; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 12 +; VF-TWO-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 16 +; VF-TWO-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 20 +; VF-TWO-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 24 +; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 28 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP40]], ptr [[TMP56]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP41]], ptr [[TMP57]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP42]], ptr [[TMP58]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP43]], ptr [[TMP59]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP44]], ptr [[TMP60]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP45]], ptr [[TMP61]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP46]], ptr [[TMP62]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP47]], ptr [[TMP63]], align 4 +; VF-TWO-CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; VF-TWO-CHECK-NEXT: [[TMP64:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF-TWO-CHECK-NEXT: br i1 [[TMP64]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF-TWO-CHECK: middle.block: ; VF-TWO-CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; VF-TWO-CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] @@ -167,38 +125,38 @@ define dso_local void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 ; VF-TWO-CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; VF-TWO-CHECK: vec.epilog.ph: ; VF-TWO-CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; VF-TWO-CHECK-NEXT: [[N_MOD_VF25:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 2 -; VF-TWO-CHECK-NEXT: [[N_VEC26:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF25]] +; VF-TWO-CHECK-NEXT: [[N_MOD_VF17:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 2 +; VF-TWO-CHECK-NEXT: [[N_VEC18:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF17]] ; VF-TWO-CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; VF-TWO-CHECK: vec.epilog.vector.body: -; VF-TWO-CHECK-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT31:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; VF-TWO-CHECK-NEXT: [[TMP133:%.*]] = add i64 [[OFFSET_IDX]], 0 -; VF-TWO-CHECK-NEXT: [[TMP134:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP133]] -; VF-TWO-CHECK-NEXT: [[TMP135:%.*]] = getelementptr inbounds float, ptr [[TMP134]], i32 0 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD29:%.*]] = load <2 x float>, ptr [[TMP135]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP137:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP133]] -; VF-TWO-CHECK-NEXT: [[TMP138:%.*]] = getelementptr inbounds float, ptr [[TMP137]], i32 0 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD30:%.*]] = load <2 x float>, ptr [[TMP138]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP140:%.*]] = fadd fast <2 x float> [[WIDE_LOAD29]], [[WIDE_LOAD30]] -; VF-TWO-CHECK-NEXT: [[TMP141:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP133]] -; VF-TWO-CHECK-NEXT: [[TMP142:%.*]] = getelementptr inbounds float, ptr [[TMP141]], i32 0 -; VF-TWO-CHECK-NEXT: store <2 x float> [[TMP140]], ptr [[TMP142]], align 4 -; VF-TWO-CHECK-NEXT: [[INDEX_NEXT31]] = add nuw i64 [[OFFSET_IDX]], 2 -; VF-TWO-CHECK-NEXT: [[TMP144:%.*]] = icmp eq i64 [[INDEX_NEXT31]], [[N_VEC26]] -; VF-TWO-CHECK-NEXT: br i1 [[TMP144]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOPID_EV:![0-9]+]] +; VF-TWO-CHECK-NEXT: [[INDEX20:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT23:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; VF-TWO-CHECK-NEXT: [[TMP65:%.*]] = add i64 [[INDEX20]], 0 +; VF-TWO-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP65]] +; VF-TWO-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP66]], i32 0 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD21:%.*]] = load <2 x float>, ptr [[TMP67]], align 4 +; VF-TWO-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP65]] +; VF-TWO-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 0 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD22:%.*]] = load <2 x float>, ptr [[TMP69]], align 4 +; VF-TWO-CHECK-NEXT: [[TMP70:%.*]] = fadd fast <2 x float> [[WIDE_LOAD21]], [[WIDE_LOAD22]] +; VF-TWO-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP65]] +; VF-TWO-CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds float, ptr [[TMP71]], i32 0 +; VF-TWO-CHECK-NEXT: store <2 x float> [[TMP70]], ptr [[TMP72]], align 4 +; VF-TWO-CHECK-NEXT: [[INDEX_NEXT23]] = add nuw i64 [[INDEX20]], 2 +; VF-TWO-CHECK-NEXT: [[TMP73:%.*]] = icmp eq i64 [[INDEX_NEXT23]], [[N_VEC18]] +; VF-TWO-CHECK-NEXT: br i1 [[TMP73]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF-TWO-CHECK: vec.epilog.middle.block: -; VF-TWO-CHECK-NEXT: [[CMP_N27:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC26]] -; VF-TWO-CHECK-NEXT: br i1 [[CMP_N27]], label [[FOR_END_LOOPEXIT]], label [[VEC_EPILOG_SCALAR_PH]] +; VF-TWO-CHECK-NEXT: [[CMP_N19:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC18]] +; VF-TWO-CHECK-NEXT: br i1 [[CMP_N19]], label [[FOR_END_LOOPEXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; VF-TWO-CHECK: vec.epilog.scalar.ph: -; VF-TWO-CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC26]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK]] ] +; VF-TWO-CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC18]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK]] ] ; VF-TWO-CHECK-NEXT: br label [[FOR_BODY:%.*]] ; VF-TWO-CHECK: for.body: ; VF-TWO-CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; VF-TWO-CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[INDVARS_IV]] -; VF-TWO-CHECK-NEXT: [[TMP145:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; VF-TWO-CHECK-NEXT: [[TMP74:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; VF-TWO-CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[INDVARS_IV]] -; VF-TWO-CHECK-NEXT: [[TMP146:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; VF-TWO-CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP145]], [[TMP146]] +; VF-TWO-CHECK-NEXT: [[TMP75:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; VF-TWO-CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP74]], [[TMP75]] ; VF-TWO-CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[INDVARS_IV]] ; VF-TWO-CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX4]], align 4 ; VF-TWO-CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -209,19 +167,20 @@ define dso_local void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 ; VF-TWO-CHECK: for.end: ; VF-TWO-CHECK-NEXT: ret void ; -; VF-FOUR-CHECK-LABEL: @f1( +; VF-FOUR-CHECK-LABEL: define dso_local void @f1( +; VF-FOUR-CHECK-SAME: ptr noalias [[AA:%.*]], ptr noalias [[BB:%.*]], ptr noalias [[CC:%.*]], i32 signext [[N:%.*]]) #[[ATTR0:[0-9]+]] { ; VF-FOUR-CHECK-NEXT: entry: -; VF-FOUR-CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[N:%.*]], 0 +; VF-FOUR-CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[N]], 0 ; VF-FOUR-CHECK-NEXT: br i1 [[CMP1]], label [[ITER_CHECK:%.*]], label [[FOR_END:%.*]] ; VF-FOUR-CHECK: iter.check: ; VF-FOUR-CHECK-NEXT: [[WIDE_TRIP_COUNT:%.*]] = zext i32 [[N]] to i64 ; VF-FOUR-CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 4 ; VF-FOUR-CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH:%.*]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; VF-FOUR-CHECK: vector.main.loop.iter.check: -; VF-FOUR-CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 48 +; VF-FOUR-CHECK-NEXT: [[MIN_ITERS_CHECK1:%.*]] = icmp ult i64 [[WIDE_TRIP_COUNT]], 32 ; VF-FOUR-CHECK-NEXT: br i1 [[MIN_ITERS_CHECK1]], label [[VEC_EPILOG_PH:%.*]], label [[VECTOR_PH:%.*]] ; VF-FOUR-CHECK: vector.ph: -; VF-FOUR-CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 48 +; VF-FOUR-CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 32 ; VF-FOUR-CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF]] ; VF-FOUR-CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; VF-FOUR-CHECK: vector.body: @@ -234,133 +193,89 @@ define dso_local void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 ; VF-FOUR-CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 20 ; VF-FOUR-CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 24 ; VF-FOUR-CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 28 -; VF-FOUR-CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 32 -; VF-FOUR-CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 36 -; VF-FOUR-CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 40 -; VF-FOUR-CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 44 -; VF-FOUR-CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[BB:%.*]], i64 [[TMP0]] -; VF-FOUR-CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP1]] -; VF-FOUR-CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP2]] -; VF-FOUR-CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP3]] -; VF-FOUR-CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP4]] -; VF-FOUR-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP5]] -; VF-FOUR-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP6]] -; VF-FOUR-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP7]] -; VF-FOUR-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP8]] -; VF-FOUR-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP9]] -; VF-FOUR-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP10]] -; VF-FOUR-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP11]] -; VF-FOUR-CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 0 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP24]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 4 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP26]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 8 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP28]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 12 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP30]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 16 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP32]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 20 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP34]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 24 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP36]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 28 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP38]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 32 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP40]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 36 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP42]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 40 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP44]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 44 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP46]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[CC:%.*]], i64 [[TMP0]] -; VF-FOUR-CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP1]] -; VF-FOUR-CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP2]] -; VF-FOUR-CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP3]] -; VF-FOUR-CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP4]] -; VF-FOUR-CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP5]] -; VF-FOUR-CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP6]] -; VF-FOUR-CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP7]] -; VF-FOUR-CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP8]] -; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP9]] -; VF-FOUR-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP10]] -; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP11]] -; VF-FOUR-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 0 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD13:%.*]] = load <4 x float>, ptr [[TMP60]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 4 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <4 x float>, ptr [[TMP62]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 8 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD15:%.*]] = load <4 x float>, ptr [[TMP64]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 12 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD16:%.*]] = load <4 x float>, ptr [[TMP66]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 16 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD17:%.*]] = load <4 x float>, ptr [[TMP68]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 20 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD18:%.*]] = load <4 x float>, ptr [[TMP70]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 24 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD19:%.*]] = load <4 x float>, ptr [[TMP72]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 28 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD20:%.*]] = load <4 x float>, ptr [[TMP74]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP76:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 32 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD21:%.*]] = load <4 x float>, ptr [[TMP76]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 36 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD22:%.*]] = load <4 x float>, ptr [[TMP78]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 40 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD23:%.*]] = load <4 x float>, ptr [[TMP80]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP82:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 44 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <4 x float>, ptr [[TMP82]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP84:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD13]] -; VF-FOUR-CHECK-NEXT: [[TMP85:%.*]] = fadd fast <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD14]] -; VF-FOUR-CHECK-NEXT: [[TMP86:%.*]] = fadd fast <4 x float> [[WIDE_LOAD3]], [[WIDE_LOAD15]] -; VF-FOUR-CHECK-NEXT: [[TMP87:%.*]] = fadd fast <4 x float> [[WIDE_LOAD4]], [[WIDE_LOAD16]] -; VF-FOUR-CHECK-NEXT: [[TMP88:%.*]] = fadd fast <4 x float> [[WIDE_LOAD5]], [[WIDE_LOAD17]] -; VF-FOUR-CHECK-NEXT: [[TMP89:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD18]] -; VF-FOUR-CHECK-NEXT: [[TMP90:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD19]] -; VF-FOUR-CHECK-NEXT: [[TMP91:%.*]] = fadd fast <4 x float> [[WIDE_LOAD8]], [[WIDE_LOAD20]] -; VF-FOUR-CHECK-NEXT: [[TMP92:%.*]] = fadd fast <4 x float> [[WIDE_LOAD9]], [[WIDE_LOAD21]] -; VF-FOUR-CHECK-NEXT: [[TMP93:%.*]] = fadd fast <4 x float> [[WIDE_LOAD10]], [[WIDE_LOAD22]] -; VF-FOUR-CHECK-NEXT: [[TMP94:%.*]] = fadd fast <4 x float> [[WIDE_LOAD11]], [[WIDE_LOAD23]] -; VF-FOUR-CHECK-NEXT: [[TMP95:%.*]] = fadd fast <4 x float> [[WIDE_LOAD12]], [[WIDE_LOAD24]] -; VF-FOUR-CHECK-NEXT: [[TMP96:%.*]] = getelementptr inbounds float, ptr [[AA:%.*]], i64 [[TMP0]] -; VF-FOUR-CHECK-NEXT: [[TMP97:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP1]] -; VF-FOUR-CHECK-NEXT: [[TMP98:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP2]] -; VF-FOUR-CHECK-NEXT: [[TMP99:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP3]] -; VF-FOUR-CHECK-NEXT: [[TMP100:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP4]] -; VF-FOUR-CHECK-NEXT: [[TMP101:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP5]] -; VF-FOUR-CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP6]] -; VF-FOUR-CHECK-NEXT: [[TMP103:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP7]] -; VF-FOUR-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP8]] -; VF-FOUR-CHECK-NEXT: [[TMP105:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP9]] -; VF-FOUR-CHECK-NEXT: [[TMP106:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP10]] -; VF-FOUR-CHECK-NEXT: [[TMP107:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP11]] -; VF-FOUR-CHECK-NEXT: [[TMP108:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 0 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP84]], ptr [[TMP108]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 4 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP85]], ptr [[TMP110]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP112:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 8 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP86]], ptr [[TMP112]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 12 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP87]], ptr [[TMP114]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP116:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 16 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP88]], ptr [[TMP116]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP118:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 20 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP89]], ptr [[TMP118]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP120:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 24 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP90]], ptr [[TMP120]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP122:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 28 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP91]], ptr [[TMP122]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP124:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 32 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP92]], ptr [[TMP124]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP126:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 36 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP93]], ptr [[TMP126]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP128:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 40 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP94]], ptr [[TMP128]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP130:%.*]] = getelementptr inbounds float, ptr [[TMP96]], i32 44 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP95]], ptr [[TMP130]], align 4 -; VF-FOUR-CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 48 -; VF-FOUR-CHECK-NEXT: [[TMP132:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; VF-FOUR-CHECK-NEXT: br i1 [[TMP132]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; VF-FOUR-CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP0]] +; VF-FOUR-CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP1]] +; VF-FOUR-CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP2]] +; VF-FOUR-CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP3]] +; VF-FOUR-CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP4]] +; VF-FOUR-CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP5]] +; VF-FOUR-CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP6]] +; VF-FOUR-CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP7]] +; VF-FOUR-CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 0 +; VF-FOUR-CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 4 +; VF-FOUR-CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 8 +; VF-FOUR-CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 12 +; VF-FOUR-CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 16 +; VF-FOUR-CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 20 +; VF-FOUR-CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 24 +; VF-FOUR-CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, ptr [[TMP8]], i32 28 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP16]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP17]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP18]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP19]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP20]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP21]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP22]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP23]], align 4 +; VF-FOUR-CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP0]] +; VF-FOUR-CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP1]] +; VF-FOUR-CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP2]] +; VF-FOUR-CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP3]] +; VF-FOUR-CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP4]] +; VF-FOUR-CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP5]] +; VF-FOUR-CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP6]] +; VF-FOUR-CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP7]] +; VF-FOUR-CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 0 +; VF-FOUR-CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 4 +; VF-FOUR-CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 8 +; VF-FOUR-CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 12 +; VF-FOUR-CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 16 +; VF-FOUR-CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 20 +; VF-FOUR-CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 24 +; VF-FOUR-CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds float, ptr [[TMP24]], i32 28 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP32]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP33]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD11:%.*]] = load <4 x float>, ptr [[TMP34]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP35]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD13:%.*]] = load <4 x float>, ptr [[TMP36]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <4 x float>, ptr [[TMP37]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD15:%.*]] = load <4 x float>, ptr [[TMP38]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD16:%.*]] = load <4 x float>, ptr [[TMP39]], align 4 +; VF-FOUR-CHECK-NEXT: [[TMP40:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD9]] +; VF-FOUR-CHECK-NEXT: [[TMP41:%.*]] = fadd fast <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD10]] +; VF-FOUR-CHECK-NEXT: [[TMP42:%.*]] = fadd fast <4 x float> [[WIDE_LOAD3]], [[WIDE_LOAD11]] +; VF-FOUR-CHECK-NEXT: [[TMP43:%.*]] = fadd fast <4 x float> [[WIDE_LOAD4]], [[WIDE_LOAD12]] +; VF-FOUR-CHECK-NEXT: [[TMP44:%.*]] = fadd fast <4 x float> [[WIDE_LOAD5]], [[WIDE_LOAD13]] +; VF-FOUR-CHECK-NEXT: [[TMP45:%.*]] = fadd fast <4 x float> [[WIDE_LOAD6]], [[WIDE_LOAD14]] +; VF-FOUR-CHECK-NEXT: [[TMP46:%.*]] = fadd fast <4 x float> [[WIDE_LOAD7]], [[WIDE_LOAD15]] +; VF-FOUR-CHECK-NEXT: [[TMP47:%.*]] = fadd fast <4 x float> [[WIDE_LOAD8]], [[WIDE_LOAD16]] +; VF-FOUR-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP0]] +; VF-FOUR-CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP1]] +; VF-FOUR-CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP2]] +; VF-FOUR-CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP3]] +; VF-FOUR-CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP4]] +; VF-FOUR-CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP5]] +; VF-FOUR-CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP6]] +; VF-FOUR-CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP7]] +; VF-FOUR-CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 0 +; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 4 +; VF-FOUR-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 8 +; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 12 +; VF-FOUR-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 16 +; VF-FOUR-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 20 +; VF-FOUR-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 24 +; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 28 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP40]], ptr [[TMP56]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP41]], ptr [[TMP57]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP42]], ptr [[TMP58]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP43]], ptr [[TMP59]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP44]], ptr [[TMP60]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP45]], ptr [[TMP61]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP46]], ptr [[TMP62]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP47]], ptr [[TMP63]], align 4 +; VF-FOUR-CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; VF-FOUR-CHECK-NEXT: [[TMP64:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF-FOUR-CHECK-NEXT: br i1 [[TMP64]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; VF-FOUR-CHECK: middle.block: ; VF-FOUR-CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; VF-FOUR-CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] @@ -370,38 +285,38 @@ define dso_local void @f1(ptr noalias %aa, ptr noalias %bb, ptr noalias %cc, i32 ; VF-FOUR-CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; VF-FOUR-CHECK: vec.epilog.ph: ; VF-FOUR-CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] -; VF-FOUR-CHECK-NEXT: [[N_MOD_VF25:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 4 -; VF-FOUR-CHECK-NEXT: [[N_VEC26:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF25]] +; VF-FOUR-CHECK-NEXT: [[N_MOD_VF17:%.*]] = urem i64 [[WIDE_TRIP_COUNT]], 4 +; VF-FOUR-CHECK-NEXT: [[N_VEC18:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_MOD_VF17]] ; VF-FOUR-CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; VF-FOUR-CHECK: vec.epilog.vector.body: -; VF-FOUR-CHECK-NEXT: [[OFFSET_IDX:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT31:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; VF-FOUR-CHECK-NEXT: [[TMP133:%.*]] = add i64 [[OFFSET_IDX]], 0 -; VF-FOUR-CHECK-NEXT: [[TMP134:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP133]] -; VF-FOUR-CHECK-NEXT: [[TMP135:%.*]] = getelementptr inbounds float, ptr [[TMP134]], i32 0 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD29:%.*]] = load <4 x float>, ptr [[TMP135]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP137:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP133]] -; VF-FOUR-CHECK-NEXT: [[TMP138:%.*]] = getelementptr inbounds float, ptr [[TMP137]], i32 0 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD30:%.*]] = load <4 x float>, ptr [[TMP138]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP140:%.*]] = fadd fast <4 x float> [[WIDE_LOAD29]], [[WIDE_LOAD30]] -; VF-FOUR-CHECK-NEXT: [[TMP141:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP133]] -; VF-FOUR-CHECK-NEXT: [[TMP142:%.*]] = getelementptr inbounds float, ptr [[TMP141]], i32 0 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP140]], ptr [[TMP142]], align 4 -; VF-FOUR-CHECK-NEXT: [[INDEX_NEXT31]] = add nuw i64 [[OFFSET_IDX]], 4 -; VF-FOUR-CHECK-NEXT: [[TMP144:%.*]] = icmp eq i64 [[INDEX_NEXT31]], [[N_VEC26]] -; VF-FOUR-CHECK-NEXT: br i1 [[TMP144]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] +; VF-FOUR-CHECK-NEXT: [[INDEX20:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT23:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; VF-FOUR-CHECK-NEXT: [[TMP65:%.*]] = add i64 [[INDEX20]], 0 +; VF-FOUR-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[TMP65]] +; VF-FOUR-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP66]], i32 0 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD21:%.*]] = load <4 x float>, ptr [[TMP67]], align 4 +; VF-FOUR-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[TMP65]] +; VF-FOUR-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 0 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD22:%.*]] = load <4 x float>, ptr [[TMP69]], align 4 +; VF-FOUR-CHECK-NEXT: [[TMP70:%.*]] = fadd fast <4 x float> [[WIDE_LOAD21]], [[WIDE_LOAD22]] +; VF-FOUR-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[TMP65]] +; VF-FOUR-CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds float, ptr [[TMP71]], i32 0 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP70]], ptr [[TMP72]], align 4 +; VF-FOUR-CHECK-NEXT: [[INDEX_NEXT23]] = add nuw i64 [[INDEX20]], 4 +; VF-FOUR-CHECK-NEXT: [[TMP73:%.*]] = icmp eq i64 [[INDEX_NEXT23]], [[N_VEC18]] +; VF-FOUR-CHECK-NEXT: br i1 [[TMP73]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VF-FOUR-CHECK: vec.epilog.middle.block: -; VF-FOUR-CHECK-NEXT: [[CMP_N27:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC26]] -; VF-FOUR-CHECK-NEXT: br i1 [[CMP_N27]], label [[FOR_END_LOOPEXIT]], label [[VEC_EPILOG_SCALAR_PH]] +; VF-FOUR-CHECK-NEXT: [[CMP_N19:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC18]] +; VF-FOUR-CHECK-NEXT: br i1 [[CMP_N19]], label [[FOR_END_LOOPEXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; VF-FOUR-CHECK: vec.epilog.scalar.ph: -; VF-FOUR-CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC26]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK]] ] +; VF-FOUR-CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC18]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[ITER_CHECK]] ] ; VF-FOUR-CHECK-NEXT: br label [[FOR_BODY:%.*]] ; VF-FOUR-CHECK: for.body: ; VF-FOUR-CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; VF-FOUR-CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[BB]], i64 [[INDVARS_IV]] -; VF-FOUR-CHECK-NEXT: [[TMP145:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; VF-FOUR-CHECK-NEXT: [[TMP74:%.*]] = load float, ptr [[ARRAYIDX]], align 4 ; VF-FOUR-CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[CC]], i64 [[INDVARS_IV]] -; VF-FOUR-CHECK-NEXT: [[TMP146:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 -; VF-FOUR-CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP145]], [[TMP146]] +; VF-FOUR-CHECK-NEXT: [[TMP75:%.*]] = load float, ptr [[ARRAYIDX2]], align 4 +; VF-FOUR-CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP74]], [[TMP75]] ; VF-FOUR-CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds float, ptr [[AA]], i64 [[INDVARS_IV]] ; VF-FOUR-CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX4]], align 4 ; VF-FOUR-CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -443,9 +358,10 @@ for.end: ; preds = %for.end.loopexit, % } define dso_local signext i32 @f2(ptr noalias %A, ptr noalias %B, i32 signext %n) #0 { -; VF-TWO-CHECK-LABEL: @f2( +; VF-TWO-CHECK-LABEL: define dso_local signext i32 @f2( +; VF-TWO-CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] { ; VF-TWO-CHECK-NEXT: entry: -; VF-TWO-CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[N:%.*]], 1 +; VF-TWO-CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[N]], 1 ; VF-TWO-CHECK-NEXT: br i1 [[CMP1]], label [[ITER_CHECK:%.*]], label [[FOR_END:%.*]] ; VF-TWO-CHECK: iter.check: ; VF-TWO-CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1 @@ -514,7 +430,7 @@ define dso_local signext i32 @f2(ptr noalias %A, ptr noalias %B, i32 signext %n) ; VF-TWO-CHECK-NEXT: [[TMP45:%.*]] = sext i32 [[TMP37]] to i64 ; VF-TWO-CHECK-NEXT: [[TMP46:%.*]] = sext i32 [[TMP38]] to i64 ; VF-TWO-CHECK-NEXT: [[TMP47:%.*]] = sext i32 [[TMP39]] to i64 -; VF-TWO-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[TMP40]] +; VF-TWO-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP40]] ; VF-TWO-CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP41]] ; VF-TWO-CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP42]] ; VF-TWO-CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP43]] @@ -524,76 +440,76 @@ define dso_local signext i32 @f2(ptr noalias %A, ptr noalias %B, i32 signext %n) ; VF-TWO-CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP47]] ; VF-TWO-CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 0 ; VF-TWO-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP56]], i32 -3 +; VF-TWO-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -4 +; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP58]], i32 -3 +; VF-TWO-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -8 +; VF-TWO-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP60]], i32 -3 +; VF-TWO-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -12 +; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP62]], i32 -3 +; VF-TWO-CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -16 +; VF-TWO-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP64]], i32 -3 +; VF-TWO-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -20 +; VF-TWO-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP66]], i32 -3 +; VF-TWO-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -24 +; VF-TWO-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 -3 +; VF-TWO-CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -28 +; VF-TWO-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP70]], i32 -3 ; VF-TWO-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP57]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x float> [[WIDE_LOAD]], <4 x float> poison, <4 x i32> -; VF-TWO-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -4 -; VF-TWO-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP59]], i32 -3 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP60]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP59]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE3:%.*]] = shufflevector <4 x float> [[WIDE_LOAD2]], <4 x float> poison, <4 x i32> -; VF-TWO-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -8 -; VF-TWO-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP62]], i32 -3 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP63]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP61]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE5:%.*]] = shufflevector <4 x float> [[WIDE_LOAD4]], <4 x float> poison, <4 x i32> -; VF-TWO-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -12 -; VF-TWO-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[TMP65]], i32 -3 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP66]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP63]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE7:%.*]] = shufflevector <4 x float> [[WIDE_LOAD6]], <4 x float> poison, <4 x i32> -; VF-TWO-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -16 -; VF-TWO-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 -3 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP69]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP65]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE9:%.*]] = shufflevector <4 x float> [[WIDE_LOAD8]], <4 x float> poison, <4 x i32> -; VF-TWO-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -20 -; VF-TWO-CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds float, ptr [[TMP71]], i32 -3 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP72]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP67]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE11:%.*]] = shufflevector <4 x float> [[WIDE_LOAD10]], <4 x float> poison, <4 x i32> -; VF-TWO-CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -24 -; VF-TWO-CHECK-NEXT: [[TMP75:%.*]] = getelementptr inbounds float, ptr [[TMP74]], i32 -3 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP75]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP69]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE13:%.*]] = shufflevector <4 x float> [[WIDE_LOAD12]], <4 x float> poison, <4 x i32> -; VF-TWO-CHECK-NEXT: [[TMP77:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -28 -; VF-TWO-CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds float, ptr [[TMP77]], i32 -3 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <4 x float>, ptr [[TMP78]], align 4 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <4 x float>, ptr [[TMP71]], align 4 ; VF-TWO-CHECK-NEXT: [[REVERSE15:%.*]] = shufflevector <4 x float> [[WIDE_LOAD14]], <4 x float> poison, <4 x i32> -; VF-TWO-CHECK-NEXT: [[TMP80:%.*]] = fadd fast <4 x float> [[REVERSE]], -; VF-TWO-CHECK-NEXT: [[TMP81:%.*]] = fadd fast <4 x float> [[REVERSE3]], -; VF-TWO-CHECK-NEXT: [[TMP82:%.*]] = fadd fast <4 x float> [[REVERSE5]], -; VF-TWO-CHECK-NEXT: [[TMP83:%.*]] = fadd fast <4 x float> [[REVERSE7]], -; VF-TWO-CHECK-NEXT: [[TMP84:%.*]] = fadd fast <4 x float> [[REVERSE9]], -; VF-TWO-CHECK-NEXT: [[TMP85:%.*]] = fadd fast <4 x float> [[REVERSE11]], -; VF-TWO-CHECK-NEXT: [[TMP86:%.*]] = fadd fast <4 x float> [[REVERSE13]], -; VF-TWO-CHECK-NEXT: [[TMP87:%.*]] = fadd fast <4 x float> [[REVERSE15]], -; VF-TWO-CHECK-NEXT: [[TMP88:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP16]] -; VF-TWO-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP17]] -; VF-TWO-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP18]] -; VF-TWO-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP19]] -; VF-TWO-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP20]] -; VF-TWO-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP21]] -; VF-TWO-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP22]] -; VF-TWO-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP23]] -; VF-TWO-CHECK-NEXT: [[TMP96:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 0 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP80]], ptr [[TMP96]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP98:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 4 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP81]], ptr [[TMP98]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP100:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 8 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP82]], ptr [[TMP100]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 12 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP83]], ptr [[TMP102]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 16 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP84]], ptr [[TMP104]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP106:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 20 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP85]], ptr [[TMP106]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP108:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 24 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP86]], ptr [[TMP108]], align 4 -; VF-TWO-CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 28 -; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP87]], ptr [[TMP110]], align 4 +; VF-TWO-CHECK-NEXT: [[TMP72:%.*]] = fadd fast <4 x float> [[REVERSE]], +; VF-TWO-CHECK-NEXT: [[TMP73:%.*]] = fadd fast <4 x float> [[REVERSE3]], +; VF-TWO-CHECK-NEXT: [[TMP74:%.*]] = fadd fast <4 x float> [[REVERSE5]], +; VF-TWO-CHECK-NEXT: [[TMP75:%.*]] = fadd fast <4 x float> [[REVERSE7]], +; VF-TWO-CHECK-NEXT: [[TMP76:%.*]] = fadd fast <4 x float> [[REVERSE9]], +; VF-TWO-CHECK-NEXT: [[TMP77:%.*]] = fadd fast <4 x float> [[REVERSE11]], +; VF-TWO-CHECK-NEXT: [[TMP78:%.*]] = fadd fast <4 x float> [[REVERSE13]], +; VF-TWO-CHECK-NEXT: [[TMP79:%.*]] = fadd fast <4 x float> [[REVERSE15]], +; VF-TWO-CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP16]] +; VF-TWO-CHECK-NEXT: [[TMP81:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP17]] +; VF-TWO-CHECK-NEXT: [[TMP82:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP18]] +; VF-TWO-CHECK-NEXT: [[TMP83:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP19]] +; VF-TWO-CHECK-NEXT: [[TMP84:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP20]] +; VF-TWO-CHECK-NEXT: [[TMP85:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP21]] +; VF-TWO-CHECK-NEXT: [[TMP86:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP22]] +; VF-TWO-CHECK-NEXT: [[TMP87:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP23]] +; VF-TWO-CHECK-NEXT: [[TMP88:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 0 +; VF-TWO-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 4 +; VF-TWO-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 8 +; VF-TWO-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 12 +; VF-TWO-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 16 +; VF-TWO-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 20 +; VF-TWO-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 24 +; VF-TWO-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 28 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP72]], ptr [[TMP88]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP73]], ptr [[TMP89]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP74]], ptr [[TMP90]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP75]], ptr [[TMP91]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP76]], ptr [[TMP92]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP77]], ptr [[TMP93]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP78]], ptr [[TMP94]], align 4 +; VF-TWO-CHECK-NEXT: store <4 x float> [[TMP79]], ptr [[TMP95]], align 4 ; VF-TWO-CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; VF-TWO-CHECK-NEXT: [[TMP112:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; VF-TWO-CHECK-NEXT: br i1 [[TMP112]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; VF-TWO-CHECK-NEXT: [[TMP96:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF-TWO-CHECK-NEXT: br i1 [[TMP96]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VF-TWO-CHECK: middle.block: ; VF-TWO-CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; VF-TWO-CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; VF-TWO-CHECK: vec.epilog.iter.check: -; VF-TWO-CHECK-NEXT: [[IND_END19:%.*]] = trunc i64 [[N_VEC]] to i32 +; VF-TWO-CHECK-NEXT: [[IND_END18:%.*]] = trunc i64 [[N_VEC]] to i32 ; VF-TWO-CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; VF-TWO-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 2 ; VF-TWO-CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] @@ -604,55 +520,56 @@ define dso_local signext i32 @f2(ptr noalias %A, ptr noalias %B, i32 signext %n) ; VF-TWO-CHECK-NEXT: [[IND_END:%.*]] = trunc i64 [[N_VEC17]] to i32 ; VF-TWO-CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; VF-TWO-CHECK: vec.epilog.vector.body: -; VF-TWO-CHECK-NEXT: [[OFFSET_IDX23:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT26:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; VF-TWO-CHECK-NEXT: [[OFFSET_IDX22:%.*]] = trunc i64 [[OFFSET_IDX23]] to i32 -; VF-TWO-CHECK-NEXT: [[TMP113:%.*]] = add i32 [[OFFSET_IDX22]], 0 -; VF-TWO-CHECK-NEXT: [[TMP114:%.*]] = add i64 [[OFFSET_IDX23]], 0 -; VF-TWO-CHECK-NEXT: [[TMP115:%.*]] = xor i32 [[TMP113]], -1 -; VF-TWO-CHECK-NEXT: [[TMP116:%.*]] = add i32 [[TMP115]], [[N]] -; VF-TWO-CHECK-NEXT: [[TMP117:%.*]] = sext i32 [[TMP116]] to i64 -; VF-TWO-CHECK-NEXT: [[TMP118:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP117]] -; VF-TWO-CHECK-NEXT: [[TMP119:%.*]] = getelementptr inbounds float, ptr [[TMP118]], i32 0 -; VF-TWO-CHECK-NEXT: [[TMP120:%.*]] = getelementptr inbounds float, ptr [[TMP119]], i32 -1 -; VF-TWO-CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <2 x float>, ptr [[TMP120]], align 4 -; VF-TWO-CHECK-NEXT: [[REVERSE25:%.*]] = shufflevector <2 x float> [[WIDE_LOAD24]], <2 x float> poison, <2 x i32> -; VF-TWO-CHECK-NEXT: [[TMP122:%.*]] = fadd fast <2 x float> [[REVERSE25]], -; VF-TWO-CHECK-NEXT: [[TMP123:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP114]] -; VF-TWO-CHECK-NEXT: [[TMP124:%.*]] = getelementptr inbounds float, ptr [[TMP123]], i32 0 -; VF-TWO-CHECK-NEXT: store <2 x float> [[TMP122]], ptr [[TMP124]], align 4 -; VF-TWO-CHECK-NEXT: [[INDEX_NEXT26]] = add nuw i64 [[OFFSET_IDX23]], 2 -; VF-TWO-CHECK-NEXT: [[TMP126:%.*]] = icmp eq i64 [[INDEX_NEXT26]], [[N_VEC17]] -; VF-TWO-CHECK-NEXT: br i1 [[TMP126]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; VF-TWO-CHECK-NEXT: [[INDEX21:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT25:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; VF-TWO-CHECK-NEXT: [[OFFSET_IDX22:%.*]] = trunc i64 [[INDEX21]] to i32 +; VF-TWO-CHECK-NEXT: [[TMP97:%.*]] = add i32 [[OFFSET_IDX22]], 0 +; VF-TWO-CHECK-NEXT: [[TMP98:%.*]] = add i64 [[INDEX21]], 0 +; VF-TWO-CHECK-NEXT: [[TMP99:%.*]] = xor i32 [[TMP97]], -1 +; VF-TWO-CHECK-NEXT: [[TMP100:%.*]] = add i32 [[TMP99]], [[N]] +; VF-TWO-CHECK-NEXT: [[TMP101:%.*]] = sext i32 [[TMP100]] to i64 +; VF-TWO-CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP101]] +; VF-TWO-CHECK-NEXT: [[TMP103:%.*]] = getelementptr inbounds float, ptr [[TMP102]], i32 0 +; VF-TWO-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP103]], i32 -1 +; VF-TWO-CHECK-NEXT: [[WIDE_LOAD23:%.*]] = load <2 x float>, ptr [[TMP104]], align 4 +; VF-TWO-CHECK-NEXT: [[REVERSE24:%.*]] = shufflevector <2 x float> [[WIDE_LOAD23]], <2 x float> poison, <2 x i32> +; VF-TWO-CHECK-NEXT: [[TMP105:%.*]] = fadd fast <2 x float> [[REVERSE24]], +; VF-TWO-CHECK-NEXT: [[TMP106:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP98]] +; VF-TWO-CHECK-NEXT: [[TMP107:%.*]] = getelementptr inbounds float, ptr [[TMP106]], i32 0 +; VF-TWO-CHECK-NEXT: store <2 x float> [[TMP105]], ptr [[TMP107]], align 4 +; VF-TWO-CHECK-NEXT: [[INDEX_NEXT25]] = add nuw i64 [[INDEX21]], 2 +; VF-TWO-CHECK-NEXT: [[TMP108:%.*]] = icmp eq i64 [[INDEX_NEXT25]], [[N_VEC17]] +; VF-TWO-CHECK-NEXT: br i1 [[TMP108]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VF-TWO-CHECK: vec.epilog.middle.block: ; VF-TWO-CHECK-NEXT: [[CMP_N20:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC17]] ; VF-TWO-CHECK-NEXT: br i1 [[CMP_N20]], label [[FOR_END_LOOPEXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; VF-TWO-CHECK: vec.epilog.scalar.ph: ; VF-TWO-CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC17]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ITER_CHECK]] ] -; VF-TWO-CHECK-NEXT: [[BC_RESUME_VAL18:%.*]] = phi i32 [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END19]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ITER_CHECK]] ] +; VF-TWO-CHECK-NEXT: [[BC_RESUME_VAL19:%.*]] = phi i32 [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END18]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ITER_CHECK]] ] ; VF-TWO-CHECK-NEXT: br label [[FOR_BODY:%.*]] ; VF-TWO-CHECK: for.body: ; VF-TWO-CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; VF-TWO-CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[BC_RESUME_VAL18]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; VF-TWO-CHECK-NEXT: [[TMP127:%.*]] = xor i32 [[I_014]], -1 -; VF-TWO-CHECK-NEXT: [[SUB2:%.*]] = add i32 [[TMP127]], [[N]] +; VF-TWO-CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[BC_RESUME_VAL19]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; VF-TWO-CHECK-NEXT: [[TMP109:%.*]] = xor i32 [[I_014]], -1 +; VF-TWO-CHECK-NEXT: [[SUB2:%.*]] = add i32 [[TMP109]], [[N]] ; VF-TWO-CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[SUB2]] to i64 ; VF-TWO-CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IDXPROM]] -; VF-TWO-CHECK-NEXT: [[TMP128:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; VF-TWO-CHECK-NEXT: [[CONV3:%.*]] = fadd fast float [[TMP128]], 1.000000e+00 +; VF-TWO-CHECK-NEXT: [[TMP110:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; VF-TWO-CHECK-NEXT: [[CONV3:%.*]] = fadd fast float [[TMP110]], 1.000000e+00 ; VF-TWO-CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] ; VF-TWO-CHECK-NEXT: store float [[CONV3]], ptr [[ARRAYIDX5]], align 4 ; VF-TWO-CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VF-TWO-CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_014]], 1 ; VF-TWO-CHECK-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[INDVARS_IV_NEXT]], [[WIDE_TRIP_COUNT]] -; VF-TWO-CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT]], !llvm.loop [[LOOPID_MS_CM:![0-9]+]] +; VF-TWO-CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_BODY]], label [[FOR_END_LOOPEXIT]], !llvm.loop [[LOOP7:![0-9]+]] ; VF-TWO-CHECK: for.end.loopexit: ; VF-TWO-CHECK-NEXT: br label [[FOR_END]] ; VF-TWO-CHECK: for.end: ; VF-TWO-CHECK-NEXT: ret i32 0 ; -; VF-FOUR-CHECK-LABEL: @f2( +; VF-FOUR-CHECK-LABEL: define dso_local signext i32 @f2( +; VF-FOUR-CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i32 signext [[N:%.*]]) #[[ATTR0]] { ; VF-FOUR-CHECK-NEXT: entry: -; VF-FOUR-CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[N:%.*]], 1 +; VF-FOUR-CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[N]], 1 ; VF-FOUR-CHECK-NEXT: br i1 [[CMP1]], label [[ITER_CHECK:%.*]], label [[FOR_END:%.*]] ; VF-FOUR-CHECK: iter.check: ; VF-FOUR-CHECK-NEXT: [[TMP0:%.*]] = add i32 [[N]], -1 @@ -721,7 +638,7 @@ define dso_local signext i32 @f2(ptr noalias %A, ptr noalias %B, i32 signext %n) ; VF-FOUR-CHECK-NEXT: [[TMP45:%.*]] = sext i32 [[TMP37]] to i64 ; VF-FOUR-CHECK-NEXT: [[TMP46:%.*]] = sext i32 [[TMP38]] to i64 ; VF-FOUR-CHECK-NEXT: [[TMP47:%.*]] = sext i32 [[TMP39]] to i64 -; VF-FOUR-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[TMP40]] +; VF-FOUR-CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP40]] ; VF-FOUR-CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP41]] ; VF-FOUR-CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP42]] ; VF-FOUR-CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP43]] @@ -731,76 +648,76 @@ define dso_local signext i32 @f2(ptr noalias %A, ptr noalias %B, i32 signext %n) ; VF-FOUR-CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP47]] ; VF-FOUR-CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 0 ; VF-FOUR-CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds float, ptr [[TMP56]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -4 +; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP58]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -8 +; VF-FOUR-CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds float, ptr [[TMP60]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -12 +; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP62]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -16 +; VF-FOUR-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP64]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -20 +; VF-FOUR-CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds float, ptr [[TMP66]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -24 +; VF-FOUR-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -28 +; VF-FOUR-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP70]], i32 -3 ; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP57]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE:%.*]] = shufflevector <4 x float> [[WIDE_LOAD]], <4 x float> poison, <4 x i32> -; VF-FOUR-CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -4 -; VF-FOUR-CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds float, ptr [[TMP59]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP60]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP59]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE3:%.*]] = shufflevector <4 x float> [[WIDE_LOAD2]], <4 x float> poison, <4 x i32> -; VF-FOUR-CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -8 -; VF-FOUR-CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds float, ptr [[TMP62]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP63]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP61]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE5:%.*]] = shufflevector <4 x float> [[WIDE_LOAD4]], <4 x float> poison, <4 x i32> -; VF-FOUR-CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -12 -; VF-FOUR-CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds float, ptr [[TMP65]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP66]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP63]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE7:%.*]] = shufflevector <4 x float> [[WIDE_LOAD6]], <4 x float> poison, <4 x i32> -; VF-FOUR-CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -16 -; VF-FOUR-CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds float, ptr [[TMP68]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP69]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP65]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE9:%.*]] = shufflevector <4 x float> [[WIDE_LOAD8]], <4 x float> poison, <4 x i32> -; VF-FOUR-CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -20 -; VF-FOUR-CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds float, ptr [[TMP71]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP72]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP67]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE11:%.*]] = shufflevector <4 x float> [[WIDE_LOAD10]], <4 x float> poison, <4 x i32> -; VF-FOUR-CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -24 -; VF-FOUR-CHECK-NEXT: [[TMP75:%.*]] = getelementptr inbounds float, ptr [[TMP74]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP75]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD12:%.*]] = load <4 x float>, ptr [[TMP69]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE13:%.*]] = shufflevector <4 x float> [[WIDE_LOAD12]], <4 x float> poison, <4 x i32> -; VF-FOUR-CHECK-NEXT: [[TMP77:%.*]] = getelementptr inbounds float, ptr [[TMP48]], i32 -28 -; VF-FOUR-CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds float, ptr [[TMP77]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <4 x float>, ptr [[TMP78]], align 4 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD14:%.*]] = load <4 x float>, ptr [[TMP71]], align 4 ; VF-FOUR-CHECK-NEXT: [[REVERSE15:%.*]] = shufflevector <4 x float> [[WIDE_LOAD14]], <4 x float> poison, <4 x i32> -; VF-FOUR-CHECK-NEXT: [[TMP80:%.*]] = fadd fast <4 x float> [[REVERSE]], -; VF-FOUR-CHECK-NEXT: [[TMP81:%.*]] = fadd fast <4 x float> [[REVERSE3]], -; VF-FOUR-CHECK-NEXT: [[TMP82:%.*]] = fadd fast <4 x float> [[REVERSE5]], -; VF-FOUR-CHECK-NEXT: [[TMP83:%.*]] = fadd fast <4 x float> [[REVERSE7]], -; VF-FOUR-CHECK-NEXT: [[TMP84:%.*]] = fadd fast <4 x float> [[REVERSE9]], -; VF-FOUR-CHECK-NEXT: [[TMP85:%.*]] = fadd fast <4 x float> [[REVERSE11]], -; VF-FOUR-CHECK-NEXT: [[TMP86:%.*]] = fadd fast <4 x float> [[REVERSE13]], -; VF-FOUR-CHECK-NEXT: [[TMP87:%.*]] = fadd fast <4 x float> [[REVERSE15]], -; VF-FOUR-CHECK-NEXT: [[TMP88:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP16]] -; VF-FOUR-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP17]] -; VF-FOUR-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP18]] -; VF-FOUR-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP19]] -; VF-FOUR-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP20]] -; VF-FOUR-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP21]] -; VF-FOUR-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP22]] -; VF-FOUR-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP23]] -; VF-FOUR-CHECK-NEXT: [[TMP96:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 0 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP80]], ptr [[TMP96]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP98:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 4 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP81]], ptr [[TMP98]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP100:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 8 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP82]], ptr [[TMP100]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 12 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP83]], ptr [[TMP102]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 16 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP84]], ptr [[TMP104]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP106:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 20 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP85]], ptr [[TMP106]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP108:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 24 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP86]], ptr [[TMP108]], align 4 -; VF-FOUR-CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds float, ptr [[TMP88]], i32 28 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP87]], ptr [[TMP110]], align 4 +; VF-FOUR-CHECK-NEXT: [[TMP72:%.*]] = fadd fast <4 x float> [[REVERSE]], +; VF-FOUR-CHECK-NEXT: [[TMP73:%.*]] = fadd fast <4 x float> [[REVERSE3]], +; VF-FOUR-CHECK-NEXT: [[TMP74:%.*]] = fadd fast <4 x float> [[REVERSE5]], +; VF-FOUR-CHECK-NEXT: [[TMP75:%.*]] = fadd fast <4 x float> [[REVERSE7]], +; VF-FOUR-CHECK-NEXT: [[TMP76:%.*]] = fadd fast <4 x float> [[REVERSE9]], +; VF-FOUR-CHECK-NEXT: [[TMP77:%.*]] = fadd fast <4 x float> [[REVERSE11]], +; VF-FOUR-CHECK-NEXT: [[TMP78:%.*]] = fadd fast <4 x float> [[REVERSE13]], +; VF-FOUR-CHECK-NEXT: [[TMP79:%.*]] = fadd fast <4 x float> [[REVERSE15]], +; VF-FOUR-CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP16]] +; VF-FOUR-CHECK-NEXT: [[TMP81:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP17]] +; VF-FOUR-CHECK-NEXT: [[TMP82:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP18]] +; VF-FOUR-CHECK-NEXT: [[TMP83:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP19]] +; VF-FOUR-CHECK-NEXT: [[TMP84:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP20]] +; VF-FOUR-CHECK-NEXT: [[TMP85:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP21]] +; VF-FOUR-CHECK-NEXT: [[TMP86:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP22]] +; VF-FOUR-CHECK-NEXT: [[TMP87:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP23]] +; VF-FOUR-CHECK-NEXT: [[TMP88:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 0 +; VF-FOUR-CHECK-NEXT: [[TMP89:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 4 +; VF-FOUR-CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 8 +; VF-FOUR-CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 12 +; VF-FOUR-CHECK-NEXT: [[TMP92:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 16 +; VF-FOUR-CHECK-NEXT: [[TMP93:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 20 +; VF-FOUR-CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 24 +; VF-FOUR-CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds float, ptr [[TMP80]], i32 28 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP72]], ptr [[TMP88]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP73]], ptr [[TMP89]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP74]], ptr [[TMP90]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP75]], ptr [[TMP91]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP76]], ptr [[TMP92]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP77]], ptr [[TMP93]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP78]], ptr [[TMP94]], align 4 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP79]], ptr [[TMP95]], align 4 ; VF-FOUR-CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 -; VF-FOUR-CHECK-NEXT: [[TMP112:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] -; VF-FOUR-CHECK-NEXT: br i1 [[TMP112]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOPID_MV_CM:![0-9]+]] +; VF-FOUR-CHECK-NEXT: [[TMP96:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; VF-FOUR-CHECK-NEXT: br i1 [[TMP96]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VF-FOUR-CHECK: middle.block: ; VF-FOUR-CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; VF-FOUR-CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END_LOOPEXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; VF-FOUR-CHECK: vec.epilog.iter.check: -; VF-FOUR-CHECK-NEXT: [[IND_END19:%.*]] = trunc i64 [[N_VEC]] to i32 +; VF-FOUR-CHECK-NEXT: [[IND_END18:%.*]] = trunc i64 [[N_VEC]] to i32 ; VF-FOUR-CHECK-NEXT: [[N_VEC_REMAINING:%.*]] = sub i64 [[WIDE_TRIP_COUNT]], [[N_VEC]] ; VF-FOUR-CHECK-NEXT: [[MIN_EPILOG_ITERS_CHECK:%.*]] = icmp ult i64 [[N_VEC_REMAINING]], 4 ; VF-FOUR-CHECK-NEXT: br i1 [[MIN_EPILOG_ITERS_CHECK]], label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] @@ -811,41 +728,41 @@ define dso_local signext i32 @f2(ptr noalias %A, ptr noalias %B, i32 signext %n) ; VF-FOUR-CHECK-NEXT: [[IND_END:%.*]] = trunc i64 [[N_VEC17]] to i32 ; VF-FOUR-CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; VF-FOUR-CHECK: vec.epilog.vector.body: -; VF-FOUR-CHECK-NEXT: [[OFFSET_IDX23:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT26:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; VF-FOUR-CHECK-NEXT: [[OFFSET_IDX22:%.*]] = trunc i64 [[OFFSET_IDX23]] to i32 -; VF-FOUR-CHECK-NEXT: [[TMP113:%.*]] = add i32 [[OFFSET_IDX22]], 0 -; VF-FOUR-CHECK-NEXT: [[TMP114:%.*]] = add i64 [[OFFSET_IDX23]], 0 -; VF-FOUR-CHECK-NEXT: [[TMP115:%.*]] = xor i32 [[TMP113]], -1 -; VF-FOUR-CHECK-NEXT: [[TMP116:%.*]] = add i32 [[TMP115]], [[N]] -; VF-FOUR-CHECK-NEXT: [[TMP117:%.*]] = sext i32 [[TMP116]] to i64 -; VF-FOUR-CHECK-NEXT: [[TMP118:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP117]] -; VF-FOUR-CHECK-NEXT: [[TMP119:%.*]] = getelementptr inbounds float, ptr [[TMP118]], i32 0 -; VF-FOUR-CHECK-NEXT: [[TMP120:%.*]] = getelementptr inbounds float, ptr [[TMP119]], i32 -3 -; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <4 x float>, ptr [[TMP120]], align 4 -; VF-FOUR-CHECK-NEXT: [[REVERSE25:%.*]] = shufflevector <4 x float> [[WIDE_LOAD24]], <4 x float> poison, <4 x i32> -; VF-FOUR-CHECK-NEXT: [[TMP122:%.*]] = fadd fast <4 x float> [[REVERSE25]], -; VF-FOUR-CHECK-NEXT: [[TMP123:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP114]] -; VF-FOUR-CHECK-NEXT: [[TMP124:%.*]] = getelementptr inbounds float, ptr [[TMP123]], i32 0 -; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP122]], ptr [[TMP124]], align 4 -; VF-FOUR-CHECK-NEXT: [[INDEX_NEXT26]] = add nuw i64 [[OFFSET_IDX23]], 4 -; VF-FOUR-CHECK-NEXT: [[TMP126:%.*]] = icmp eq i64 [[INDEX_NEXT26]], [[N_VEC17]] -; VF-FOUR-CHECK-NEXT: br i1 [[TMP126]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOPID_EV_CM:![0-9]+]] +; VF-FOUR-CHECK-NEXT: [[INDEX21:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT25:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; VF-FOUR-CHECK-NEXT: [[OFFSET_IDX22:%.*]] = trunc i64 [[INDEX21]] to i32 +; VF-FOUR-CHECK-NEXT: [[TMP97:%.*]] = add i32 [[OFFSET_IDX22]], 0 +; VF-FOUR-CHECK-NEXT: [[TMP98:%.*]] = add i64 [[INDEX21]], 0 +; VF-FOUR-CHECK-NEXT: [[TMP99:%.*]] = xor i32 [[TMP97]], -1 +; VF-FOUR-CHECK-NEXT: [[TMP100:%.*]] = add i32 [[TMP99]], [[N]] +; VF-FOUR-CHECK-NEXT: [[TMP101:%.*]] = sext i32 [[TMP100]] to i64 +; VF-FOUR-CHECK-NEXT: [[TMP102:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP101]] +; VF-FOUR-CHECK-NEXT: [[TMP103:%.*]] = getelementptr inbounds float, ptr [[TMP102]], i32 0 +; VF-FOUR-CHECK-NEXT: [[TMP104:%.*]] = getelementptr inbounds float, ptr [[TMP103]], i32 -3 +; VF-FOUR-CHECK-NEXT: [[WIDE_LOAD23:%.*]] = load <4 x float>, ptr [[TMP104]], align 4 +; VF-FOUR-CHECK-NEXT: [[REVERSE24:%.*]] = shufflevector <4 x float> [[WIDE_LOAD23]], <4 x float> poison, <4 x i32> +; VF-FOUR-CHECK-NEXT: [[TMP105:%.*]] = fadd fast <4 x float> [[REVERSE24]], +; VF-FOUR-CHECK-NEXT: [[TMP106:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP98]] +; VF-FOUR-CHECK-NEXT: [[TMP107:%.*]] = getelementptr inbounds float, ptr [[TMP106]], i32 0 +; VF-FOUR-CHECK-NEXT: store <4 x float> [[TMP105]], ptr [[TMP107]], align 4 +; VF-FOUR-CHECK-NEXT: [[INDEX_NEXT25]] = add nuw i64 [[INDEX21]], 4 +; VF-FOUR-CHECK-NEXT: [[TMP108:%.*]] = icmp eq i64 [[INDEX_NEXT25]], [[N_VEC17]] +; VF-FOUR-CHECK-NEXT: br i1 [[TMP108]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; VF-FOUR-CHECK: vec.epilog.middle.block: ; VF-FOUR-CHECK-NEXT: [[CMP_N20:%.*]] = icmp eq i64 [[WIDE_TRIP_COUNT]], [[N_VEC17]] ; VF-FOUR-CHECK-NEXT: br i1 [[CMP_N20]], label [[FOR_END_LOOPEXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; VF-FOUR-CHECK: vec.epilog.scalar.ph: ; VF-FOUR-CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC17]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[N_VEC]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ITER_CHECK]] ] -; VF-FOUR-CHECK-NEXT: [[BC_RESUME_VAL18:%.*]] = phi i32 [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END19]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ITER_CHECK]] ] +; VF-FOUR-CHECK-NEXT: [[BC_RESUME_VAL19:%.*]] = phi i32 [ [[IND_END]], [[VEC_EPILOG_MIDDLE_BLOCK]] ], [ [[IND_END18]], [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_SCEVCHECK]] ], [ 0, [[ITER_CHECK]] ] ; VF-FOUR-CHECK-NEXT: br label [[FOR_BODY:%.*]] ; VF-FOUR-CHECK: for.body: ; VF-FOUR-CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] -; VF-FOUR-CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[BC_RESUME_VAL18]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] -; VF-FOUR-CHECK-NEXT: [[TMP127:%.*]] = xor i32 [[I_014]], -1 -; VF-FOUR-CHECK-NEXT: [[SUB2:%.*]] = add i32 [[TMP127]], [[N]] +; VF-FOUR-CHECK-NEXT: [[I_014:%.*]] = phi i32 [ [[BC_RESUME_VAL19]], [[VEC_EPILOG_SCALAR_PH]] ], [ [[INC:%.*]], [[FOR_BODY]] ] +; VF-FOUR-CHECK-NEXT: [[TMP109:%.*]] = xor i32 [[I_014]], -1 +; VF-FOUR-CHECK-NEXT: [[SUB2:%.*]] = add i32 [[TMP109]], [[N]] ; VF-FOUR-CHECK-NEXT: [[IDXPROM:%.*]] = sext i32 [[SUB2]] to i64 ; VF-FOUR-CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IDXPROM]] -; VF-FOUR-CHECK-NEXT: [[TMP128:%.*]] = load float, ptr [[ARRAYIDX]], align 4 -; VF-FOUR-CHECK-NEXT: [[CONV3:%.*]] = fadd fast float [[TMP128]], 1.000000e+00 +; VF-FOUR-CHECK-NEXT: [[TMP110:%.*]] = load float, ptr [[ARRAYIDX]], align 4 +; VF-FOUR-CHECK-NEXT: [[CONV3:%.*]] = fadd fast float [[TMP110]], 1.000000e+00 ; VF-FOUR-CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] ; VF-FOUR-CHECK-NEXT: store float [[CONV3]], ptr [[ARRAYIDX5]], align 4 ; VF-FOUR-CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 @@ -889,14 +806,6 @@ for.end: ; preds = %for.end.loopexit, % ret i32 0 } -; VF-TWO-CHECK-DAG: [[LOOPID_MV]] = distinct !{[[LOOPID_MV]], [[LOOPID_DISABLE_VECT:!.*]], [[LOOPID_DISABLE_UNROLL:!.*]]} -; VF-TWO-CHECK-DAG: [[LOOPID_EV]] = distinct !{[[LOOPID_EV]], [[LOOPID_DISABLE_VECT]], [[LOOPID_DISABLE_UNROLL]]} -; VF-TWO-CHECK-DAG: [[LOOPID_DISABLE_VECT]] = [[DISABLE_VECT_STR:!{!"llvm.loop.isvectorized".*}.*]] -; VF-TWO-CHECK-DAG: [[LOOPID_DISABLE_UNROLL]] = [[DISABLE_UNROLL_STR:!{!"llvm.loop.unroll.runtime.disable"}.*]] ; -; VF-FOUR-CHECK-DAG: [[LOOPID_MV_CM]] = distinct !{[[LOOPID_MV_CM]], [[LOOPID_DISABLE_VECT_CM:!.*]], [[LOOPID_DISABLE_UNROLL_CM:!.*]]} -; VF-FOUR-CHECK-DAG: [[LOOPID_EV_CM]] = distinct !{[[LOOPID_EV_CM]], [[LOOPID_DISABLE_VECT_CM]], [[LOOPID_DISABLE_UNROLL_CM]]} -; VF-FOUR-CHECK-DAG: [[LOOPID_DISABLE_VECT_CM]] = [[DISABLE_VECT_STR_CM:!{!"llvm.loop.isvectorized".*}.*]] -; VF-FOUR-CHECK-DAG: [[LOOPID_DISABLE_UNROLL_CM]] = [[DISABLE_UNROLL_STR_CM:!{!"llvm.loop.unroll.runtime.disable"}.*]] ; attributes #0 = { nounwind "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "frame-pointer"="none" "less-precise-fpmad"="false" "min-legal-vector-width"="0" "no-infs-fp-math"="true" "no-jump-tables"="false" "no-nans-fp-math"="true" "no-signed-zeros-fp-math"="true" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="ppc64le" "target-features"="+altivec,+bpermd,+crypto,+direct-move,+extdiv,+htm,+power8-vector,+vsx,-power9-vector,-spe" "unsafe-fp-math"="true" "use-soft-float"="false" } diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/reg-usage.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/reg-usage.ll index f9d512e74e1b4..aac9aff3d391f 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/reg-usage.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/reg-usage.ll @@ -79,7 +79,7 @@ for.body: ; preds = %for.body, %entry define i64 @bar(ptr nocapture %a) { ; CHECK-LABEL: bar -; CHECK: Executing best plan with VF=2, UF=12 +; CHECK: Executing best plan with VF=2, UF=8 entry: br label %for.body @@ -107,7 +107,7 @@ for.body: define void @hoo(i32 %n) { ; CHECK-LABEL: hoo -; CHECK: Executing best plan with VF=1, UF=12 +; CHECK: Executing best plan with VF=1, UF=8 entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll b/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll index a567045c53c6e..ce83ab460a6c2 100644 --- a/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll +++ b/llvm/test/Transforms/LoopVectorize/PowerPC/small-loop-rdx.ll @@ -9,10 +9,6 @@ ; CHECK-NEXT: fadd ; CHECK-NEXT: fadd ; CHECK-NEXT: fadd -; CHECK-NEXT: fadd -; CHECK-NEXT: fadd -; CHECK-NEXT: fadd -; CHECK-NEXT: fadd ; CHECK-NOT: fadd ; CHECK: middle.block diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/zero_unroll.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/zero_unroll.ll index d469178fec0e7..d8535d510ca03 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/zero_unroll.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/zero_unroll.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -passes=loop-vectorize -mtriple=s390x-linux-gnu -tiny-trip-count-interleave-threshold=4 -vectorizer-min-trip-count=8 < %s | FileCheck %s +; RUN: opt -S -passes=loop-vectorize -mtriple=s390x-linux-gnu -vectorizer-min-trip-count=8 < %s | FileCheck %s define i32 @main(i32 %arg, ptr nocapture readnone %arg1) #0 { ;CHECK: vector.body: diff --git a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll index 1c7f3a70d37c3..8fd0008b21721 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll @@ -73,24 +73,34 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; SSE: vector.body: ; SSE-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; SSE-NEXT: [[VEC_PHI:%.*]] = phi <2 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ] +; SSE-NEXT: [[VEC_PHI1:%.*]] = phi <2 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI3:%.*]], [[VECTOR_BODY]] ] ; SSE-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 -; SSE-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[TMP0]] -; SSE-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[TMP1]], i32 0 -; SSE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 -; SSE-NEXT: [[TMP3:%.*]] = fcmp fast une <2 x double> [[WIDE_LOAD]], -; SSE-NEXT: [[TMP4:%.*]] = fadd fast <2 x double> [[VEC_PHI]], [[WIDE_LOAD]] -; SSE-NEXT: [[TMP5:%.*]] = xor <2 x i1> [[TMP3]], -; SSE-NEXT: [[PREDPHI]] = select <2 x i1> [[TMP3]], <2 x double> [[TMP4]], <2 x double> [[VEC_PHI]] -; SSE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 -; SSE-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 32 -; SSE-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; SSE-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 2 +; SSE-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[TMP0]] +; SSE-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[ARR]], i32 [[TMP1]] +; SSE-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[TMP2]], i32 0 +; SSE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP4]], align 8 +; SSE-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[TMP2]], i32 2 +; SSE-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 +; SSE-NEXT: [[TMP6:%.*]] = fcmp fast une <2 x double> [[WIDE_LOAD]], +; SSE-NEXT: [[TMP7:%.*]] = fcmp fast une <2 x double> [[WIDE_LOAD2]], +; SSE-NEXT: [[TMP8:%.*]] = fadd fast <2 x double> [[VEC_PHI]], [[WIDE_LOAD]] +; SSE-NEXT: [[TMP9:%.*]] = fadd fast <2 x double> [[VEC_PHI1]], [[WIDE_LOAD2]] +; SSE-NEXT: [[TMP10:%.*]] = xor <2 x i1> [[TMP6]], +; SSE-NEXT: [[TMP11:%.*]] = xor <2 x i1> [[TMP7]], +; SSE-NEXT: [[PREDPHI]] = select <2 x i1> [[TMP6]], <2 x double> [[TMP8]], <2 x double> [[VEC_PHI]] +; SSE-NEXT: [[PREDPHI3]] = select <2 x i1> [[TMP7]], <2 x double> [[TMP9]], <2 x double> [[VEC_PHI1]] +; SSE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; SSE-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 32 +; SSE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SSE: middle.block: -; SSE-NEXT: [[TMP7:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> [[PREDPHI]]) +; SSE-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x double> [[PREDPHI3]], [[PREDPHI]] +; SSE-NEXT: [[TMP13:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> [[BIN_RDX]]) ; SSE-NEXT: [[CMP_N:%.*]] = icmp eq i32 32, 32 ; SSE-NEXT: br i1 [[CMP_N]], label [[DONE:%.*]], label [[SCALAR_PH]] ; SSE: scalar.ph: ; SSE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 32, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; SSE-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; SSE-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] ; SSE-NEXT: br label [[LOOP:%.*]] ; SSE: loop: ; SSE-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[NEXT_ITER:%.*]] ] @@ -108,9 +118,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; SSE-NEXT: [[TOT_NEXT]] = phi double [ [[TOT]], [[NO_ADD]] ], [ [[TOT_NEW]], [[DO_ADD]] ] ; SSE-NEXT: [[I_NEXT]] = add i32 [[I]], 1 ; SSE-NEXT: [[AGAIN:%.*]] = icmp ult i32 [[I_NEXT]], 32 -; SSE-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]], !llvm.loop [[LOOP2:![0-9]+]] +; SSE-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]], !llvm.loop [[LOOP3:![0-9]+]] ; SSE: done: -; SSE-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; SSE-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] ; SSE-NEXT: ret double [[TOT_NEXT_LCSSA]] ; ; AVX-LABEL: @sumIfVector( @@ -121,24 +131,54 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; AVX: vector.body: ; AVX-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; AVX-NEXT: [[VEC_PHI:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ] +; AVX-NEXT: [[VEC_PHI1:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI7:%.*]], [[VECTOR_BODY]] ] +; AVX-NEXT: [[VEC_PHI2:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI8:%.*]], [[VECTOR_BODY]] ] +; AVX-NEXT: [[VEC_PHI3:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI9:%.*]], [[VECTOR_BODY]] ] ; AVX-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 -; AVX-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[TMP0]] -; AVX-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[TMP1]], i32 0 -; AVX-NEXT: [[WIDE_LOAD:%.*]] = load <4 x double>, ptr [[TMP2]], align 8 -; AVX-NEXT: [[TMP3:%.*]] = fcmp fast une <4 x double> [[WIDE_LOAD]], -; AVX-NEXT: [[TMP4:%.*]] = fadd fast <4 x double> [[VEC_PHI]], [[WIDE_LOAD]] -; AVX-NEXT: [[TMP5:%.*]] = xor <4 x i1> [[TMP3]], -; AVX-NEXT: [[PREDPHI]] = select <4 x i1> [[TMP3]], <4 x double> [[TMP4]], <4 x double> [[VEC_PHI]] -; AVX-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; AVX-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 32 -; AVX-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; AVX-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 4 +; AVX-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 8 +; AVX-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 12 +; AVX-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[TMP0]] +; AVX-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[ARR]], i32 [[TMP1]] +; AVX-NEXT: [[TMP6:%.*]] = getelementptr double, ptr [[ARR]], i32 [[TMP2]] +; AVX-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[ARR]], i32 [[TMP3]] +; AVX-NEXT: [[TMP8:%.*]] = getelementptr double, ptr [[TMP4]], i32 0 +; AVX-NEXT: [[WIDE_LOAD:%.*]] = load <4 x double>, ptr [[TMP8]], align 8 +; AVX-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[TMP4]], i32 4 +; AVX-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x double>, ptr [[TMP9]], align 8 +; AVX-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[TMP4]], i32 8 +; AVX-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x double>, ptr [[TMP10]], align 8 +; AVX-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP4]], i32 12 +; AVX-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x double>, ptr [[TMP11]], align 8 +; AVX-NEXT: [[TMP12:%.*]] = fcmp fast une <4 x double> [[WIDE_LOAD]], +; AVX-NEXT: [[TMP13:%.*]] = fcmp fast une <4 x double> [[WIDE_LOAD4]], +; AVX-NEXT: [[TMP14:%.*]] = fcmp fast une <4 x double> [[WIDE_LOAD5]], +; AVX-NEXT: [[TMP15:%.*]] = fcmp fast une <4 x double> [[WIDE_LOAD6]], +; AVX-NEXT: [[TMP16:%.*]] = fadd fast <4 x double> [[VEC_PHI]], [[WIDE_LOAD]] +; AVX-NEXT: [[TMP17:%.*]] = fadd fast <4 x double> [[VEC_PHI1]], [[WIDE_LOAD4]] +; AVX-NEXT: [[TMP18:%.*]] = fadd fast <4 x double> [[VEC_PHI2]], [[WIDE_LOAD5]] +; AVX-NEXT: [[TMP19:%.*]] = fadd fast <4 x double> [[VEC_PHI3]], [[WIDE_LOAD6]] +; AVX-NEXT: [[TMP20:%.*]] = xor <4 x i1> [[TMP12]], +; AVX-NEXT: [[TMP21:%.*]] = xor <4 x i1> [[TMP13]], +; AVX-NEXT: [[TMP22:%.*]] = xor <4 x i1> [[TMP14]], +; AVX-NEXT: [[TMP23:%.*]] = xor <4 x i1> [[TMP15]], +; AVX-NEXT: [[PREDPHI]] = select <4 x i1> [[TMP12]], <4 x double> [[TMP16]], <4 x double> [[VEC_PHI]] +; AVX-NEXT: [[PREDPHI7]] = select <4 x i1> [[TMP13]], <4 x double> [[TMP17]], <4 x double> [[VEC_PHI1]] +; AVX-NEXT: [[PREDPHI8]] = select <4 x i1> [[TMP14]], <4 x double> [[TMP18]], <4 x double> [[VEC_PHI2]] +; AVX-NEXT: [[PREDPHI9]] = select <4 x i1> [[TMP15]], <4 x double> [[TMP19]], <4 x double> [[VEC_PHI3]] +; AVX-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 +; AVX-NEXT: [[TMP24:%.*]] = icmp eq i32 [[INDEX_NEXT]], 32 +; AVX-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AVX: middle.block: -; AVX-NEXT: [[TMP7:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double -0.000000e+00, <4 x double> [[PREDPHI]]) +; AVX-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x double> [[PREDPHI7]], [[PREDPHI]] +; AVX-NEXT: [[BIN_RDX10:%.*]] = fadd fast <4 x double> [[PREDPHI8]], [[BIN_RDX]] +; AVX-NEXT: [[BIN_RDX11:%.*]] = fadd fast <4 x double> [[PREDPHI9]], [[BIN_RDX10]] +; AVX-NEXT: [[TMP25:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double -0.000000e+00, <4 x double> [[BIN_RDX11]]) ; AVX-NEXT: [[CMP_N:%.*]] = icmp eq i32 32, 32 ; AVX-NEXT: br i1 [[CMP_N]], label [[DONE:%.*]], label [[SCALAR_PH]] ; AVX: scalar.ph: ; AVX-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 32, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; AVX-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; AVX-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ] ; AVX-NEXT: br label [[LOOP:%.*]] ; AVX: loop: ; AVX-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[NEXT_ITER:%.*]] ] @@ -156,9 +196,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; AVX-NEXT: [[TOT_NEXT]] = phi double [ [[TOT]], [[NO_ADD]] ], [ [[TOT_NEW]], [[DO_ADD]] ] ; AVX-NEXT: [[I_NEXT]] = add i32 [[I]], 1 ; AVX-NEXT: [[AGAIN:%.*]] = icmp ult i32 [[I_NEXT]], 32 -; AVX-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]], !llvm.loop [[LOOP2:![0-9]+]] +; AVX-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]], !llvm.loop [[LOOP3:![0-9]+]] ; AVX: done: -; AVX-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; AVX-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ] ; AVX-NEXT: ret double [[TOT_NEXT_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleave_short_tc.ll b/llvm/test/Transforms/LoopVectorize/X86/interleave_short_tc.ll index d3604b51a82f9..93d4e4c6d1673 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/interleave_short_tc.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/interleave_short_tc.ll @@ -1,16 +1,13 @@ -; Check that we won't interleave by more than "best known" estimated trip count. +; Check that we won't interleave by more than half the "best known" estimated trip count. -; The loop is expected to be vectorized by 4 and interleaving suppresed due to -; short trip count which is controled by "tiny-trip-count-interleave-threshold". -; RUN: opt -passes=loop-vectorize -force-vector-width=4 -vectorizer-min-trip-count=4 -S < %s | FileCheck %s ; ; The loop is expected to be vectorized by 4 and computed interleaving factor is 1. ; Thus the resulting step is 4. -; RUN: opt -passes=loop-vectorize -force-vector-width=4 -vectorizer-min-trip-count=4 -tiny-trip-count-interleave-threshold=4 -S < %s | FileCheck %s +; RUN: opt -passes=loop-vectorize -force-vector-width=4 -vectorizer-min-trip-count=4 -S < %s | FileCheck %s ; The loop is expected to be vectorized by 2 and computed interleaving factor is 2. ; Thus the resulting step is 4. -; RUN: opt -passes=loop-vectorize -force-vector-width=2 -vectorizer-min-trip-count=4 -tiny-trip-count-interleave-threshold=4 -S < %s | FileCheck %s +; RUN: opt -passes=loop-vectorize -force-vector-width=2 -vectorizer-min-trip-count=4 -S < %s | FileCheck %s ; Check that we won't interleave by more than "best known" estimated trip count. @@ -56,4 +53,4 @@ for.body: ; preds = %for.body, %for.body br i1 %exitcond, label %for.cond.cleanup.loopexit, label %for.body, !prof !1 } -!1 = !{!"branch_weights", i32 1, i32 5} +!1 = !{!"branch_weights", i32 1, i32 9} diff --git a/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll b/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll index 93c78f42d19a8..4ee4b13f40d58 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll @@ -217,20 +217,41 @@ define void @test_tc_20(ptr noalias %src, ptr noalias %dst) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 64 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0 -; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], ptr [[TMP4]], align 64 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 20 -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP8]], align 64 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP9]], align 64 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 8 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP10]], align 64 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 12 +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i8>, ptr [[TMP11]], align 64 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 0 +; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], ptr [[TMP16]], align 64 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 4 +; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD1]], ptr [[TMP17]], align 64 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 8 +; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD2]], ptr [[TMP18]], align 64 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 12 +; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD3]], ptr [[TMP19]], align 64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 20, 20 +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 20, 16 ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 20, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[LOOP]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll index ef1912fffde29..f9a6fc8439da1 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll @@ -2525,49 +2525,148 @@ define i32 @test_stride_three(i64 %len, ptr %test_base) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP29:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP116:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP117:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP118:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP119:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 3 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 3 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 6 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 9 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = load i1, ptr [[TMP4]], align 1 -; CHECK-NEXT: [[TMP9:%.*]] = load i1, ptr [[TMP5]], align 1 -; CHECK-NEXT: [[TMP10:%.*]] = load i1, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = load i1, ptr [[TMP7]], align 1 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i1> poison, i1 [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i1> [[TMP12]], i1 [[TMP9]], i32 1 -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i1> [[TMP13]], i1 [[TMP10]], i32 2 -; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i1> [[TMP14]], i1 [[TMP11]], i32 3 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP16]], align 4 -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 4 -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 4 -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP19]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> poison, i32 [[TMP20]], i32 0 -; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 1 -; CHECK-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 2 -; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 3 -; CHECK-NEXT: [[TMP28:%.*]] = xor <4 x i1> [[TMP15]], -; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> [[TMP27]], <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP29]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 -; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 12 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 15 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 18 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 21 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 24 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 27 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 30 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 33 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 36 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 39 +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], 42 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 45 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP32:%.*]] = load i1, ptr [[TMP16]], align 1 +; CHECK-NEXT: [[TMP33:%.*]] = load i1, ptr [[TMP17]], align 1 +; CHECK-NEXT: [[TMP34:%.*]] = load i1, ptr [[TMP18]], align 1 +; CHECK-NEXT: [[TMP35:%.*]] = load i1, ptr [[TMP19]], align 1 +; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i1> poison, i1 [[TMP32]], i32 0 +; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i1> [[TMP36]], i1 [[TMP33]], i32 1 +; CHECK-NEXT: [[TMP38:%.*]] = insertelement <4 x i1> [[TMP37]], i1 [[TMP34]], i32 2 +; CHECK-NEXT: [[TMP39:%.*]] = insertelement <4 x i1> [[TMP38]], i1 [[TMP35]], i32 3 +; CHECK-NEXT: [[TMP40:%.*]] = load i1, ptr [[TMP20]], align 1 +; CHECK-NEXT: [[TMP41:%.*]] = load i1, ptr [[TMP21]], align 1 +; CHECK-NEXT: [[TMP42:%.*]] = load i1, ptr [[TMP22]], align 1 +; CHECK-NEXT: [[TMP43:%.*]] = load i1, ptr [[TMP23]], align 1 +; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i1> poison, i1 [[TMP40]], i32 0 +; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x i1> [[TMP44]], i1 [[TMP41]], i32 1 +; CHECK-NEXT: [[TMP46:%.*]] = insertelement <4 x i1> [[TMP45]], i1 [[TMP42]], i32 2 +; CHECK-NEXT: [[TMP47:%.*]] = insertelement <4 x i1> [[TMP46]], i1 [[TMP43]], i32 3 +; CHECK-NEXT: [[TMP48:%.*]] = load i1, ptr [[TMP24]], align 1 +; CHECK-NEXT: [[TMP49:%.*]] = load i1, ptr [[TMP25]], align 1 +; CHECK-NEXT: [[TMP50:%.*]] = load i1, ptr [[TMP26]], align 1 +; CHECK-NEXT: [[TMP51:%.*]] = load i1, ptr [[TMP27]], align 1 +; CHECK-NEXT: [[TMP52:%.*]] = insertelement <4 x i1> poison, i1 [[TMP48]], i32 0 +; CHECK-NEXT: [[TMP53:%.*]] = insertelement <4 x i1> [[TMP52]], i1 [[TMP49]], i32 1 +; CHECK-NEXT: [[TMP54:%.*]] = insertelement <4 x i1> [[TMP53]], i1 [[TMP50]], i32 2 +; CHECK-NEXT: [[TMP55:%.*]] = insertelement <4 x i1> [[TMP54]], i1 [[TMP51]], i32 3 +; CHECK-NEXT: [[TMP56:%.*]] = load i1, ptr [[TMP28]], align 1 +; CHECK-NEXT: [[TMP57:%.*]] = load i1, ptr [[TMP29]], align 1 +; CHECK-NEXT: [[TMP58:%.*]] = load i1, ptr [[TMP30]], align 1 +; CHECK-NEXT: [[TMP59:%.*]] = load i1, ptr [[TMP31]], align 1 +; CHECK-NEXT: [[TMP60:%.*]] = insertelement <4 x i1> poison, i1 [[TMP56]], i32 0 +; CHECK-NEXT: [[TMP61:%.*]] = insertelement <4 x i1> [[TMP60]], i1 [[TMP57]], i32 1 +; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 +; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 +; CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP73:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP75:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP76:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP77:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP79:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP64]], align 4 +; CHECK-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP65]], align 4 +; CHECK-NEXT: [[TMP82:%.*]] = load i32, ptr [[TMP66]], align 4 +; CHECK-NEXT: [[TMP83:%.*]] = load i32, ptr [[TMP67]], align 4 +; CHECK-NEXT: [[TMP84:%.*]] = insertelement <4 x i32> poison, i32 [[TMP80]], i32 0 +; CHECK-NEXT: [[TMP85:%.*]] = insertelement <4 x i32> [[TMP84]], i32 [[TMP81]], i32 1 +; CHECK-NEXT: [[TMP86:%.*]] = insertelement <4 x i32> [[TMP85]], i32 [[TMP82]], i32 2 +; CHECK-NEXT: [[TMP87:%.*]] = insertelement <4 x i32> [[TMP86]], i32 [[TMP83]], i32 3 +; CHECK-NEXT: [[TMP88:%.*]] = load i32, ptr [[TMP68]], align 4 +; CHECK-NEXT: [[TMP89:%.*]] = load i32, ptr [[TMP69]], align 4 +; CHECK-NEXT: [[TMP90:%.*]] = load i32, ptr [[TMP70]], align 4 +; CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[TMP71]], align 4 +; CHECK-NEXT: [[TMP92:%.*]] = insertelement <4 x i32> poison, i32 [[TMP88]], i32 0 +; CHECK-NEXT: [[TMP93:%.*]] = insertelement <4 x i32> [[TMP92]], i32 [[TMP89]], i32 1 +; CHECK-NEXT: [[TMP94:%.*]] = insertelement <4 x i32> [[TMP93]], i32 [[TMP90]], i32 2 +; CHECK-NEXT: [[TMP95:%.*]] = insertelement <4 x i32> [[TMP94]], i32 [[TMP91]], i32 3 +; CHECK-NEXT: [[TMP96:%.*]] = load i32, ptr [[TMP72]], align 4 +; CHECK-NEXT: [[TMP97:%.*]] = load i32, ptr [[TMP73]], align 4 +; CHECK-NEXT: [[TMP98:%.*]] = load i32, ptr [[TMP74]], align 4 +; CHECK-NEXT: [[TMP99:%.*]] = load i32, ptr [[TMP75]], align 4 +; CHECK-NEXT: [[TMP100:%.*]] = insertelement <4 x i32> poison, i32 [[TMP96]], i32 0 +; CHECK-NEXT: [[TMP101:%.*]] = insertelement <4 x i32> [[TMP100]], i32 [[TMP97]], i32 1 +; CHECK-NEXT: [[TMP102:%.*]] = insertelement <4 x i32> [[TMP101]], i32 [[TMP98]], i32 2 +; CHECK-NEXT: [[TMP103:%.*]] = insertelement <4 x i32> [[TMP102]], i32 [[TMP99]], i32 3 +; CHECK-NEXT: [[TMP104:%.*]] = load i32, ptr [[TMP76]], align 4 +; CHECK-NEXT: [[TMP105:%.*]] = load i32, ptr [[TMP77]], align 4 +; CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[TMP78]], align 4 +; CHECK-NEXT: [[TMP107:%.*]] = load i32, ptr [[TMP79]], align 4 +; CHECK-NEXT: [[TMP108:%.*]] = insertelement <4 x i32> poison, i32 [[TMP104]], i32 0 +; CHECK-NEXT: [[TMP109:%.*]] = insertelement <4 x i32> [[TMP108]], i32 [[TMP105]], i32 1 +; CHECK-NEXT: [[TMP110:%.*]] = insertelement <4 x i32> [[TMP109]], i32 [[TMP106]], i32 2 +; CHECK-NEXT: [[TMP111:%.*]] = insertelement <4 x i32> [[TMP110]], i32 [[TMP107]], i32 3 +; CHECK-NEXT: [[TMP112:%.*]] = xor <4 x i1> [[TMP39]], +; CHECK-NEXT: [[TMP113:%.*]] = xor <4 x i1> [[TMP47]], +; CHECK-NEXT: [[TMP114:%.*]] = xor <4 x i1> [[TMP55]], +; CHECK-NEXT: [[TMP115:%.*]] = xor <4 x i1> [[TMP63]], +; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP39]], <4 x i32> [[TMP87]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI4:%.*]] = select <4 x i1> [[TMP47]], <4 x i32> [[TMP95]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI5:%.*]] = select <4 x i1> [[TMP55]], <4 x i32> [[TMP103]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI6:%.*]] = select <4 x i1> [[TMP63]], <4 x i32> [[TMP111]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP116]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] +; CHECK-NEXT: [[TMP117]] = add <4 x i32> [[VEC_PHI1]], [[PREDPHI4]] +; CHECK-NEXT: [[TMP118]] = add <4 x i32> [[VEC_PHI2]], [[PREDPHI5]] +; CHECK-NEXT: [[TMP119]] = add <4 x i32> [[VEC_PHI3]], [[PREDPHI6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP120:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 +; CHECK-NEXT: br i1 [[TMP120]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP29]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP117]], [[TMP116]] +; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP118]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i32> [[TMP119]], [[BIN_RDX7]] +; CHECK-NEXT: [[TMP121:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX8]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 35, 32 ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 96, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP121]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] @@ -2586,7 +2685,7 @@ define i32 @test_stride_three(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 100 ; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], !llvm.loop [[LOOP31:![0-9]+]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP121]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] ; entry: @@ -2624,49 +2723,82 @@ define i32 @test_non_unit_stride_four(i64 %len, ptr %test_base) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP29:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP58:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP59:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 4 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 8 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 12 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = load i1, ptr [[TMP4]], align 1 -; CHECK-NEXT: [[TMP9:%.*]] = load i1, ptr [[TMP5]], align 1 -; CHECK-NEXT: [[TMP10:%.*]] = load i1, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = load i1, ptr [[TMP7]], align 1 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i1> poison, i1 [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i1> [[TMP12]], i1 [[TMP9]], i32 1 -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i1> [[TMP13]], i1 [[TMP10]], i32 2 -; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i1> [[TMP14]], i1 [[TMP11]], i32 3 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP16]], align 4 -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 4 -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 4 -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP19]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> poison, i32 [[TMP20]], i32 0 -; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 1 -; CHECK-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 2 -; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 3 -; CHECK-NEXT: [[TMP28:%.*]] = xor <4 x i1> [[TMP15]], -; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> [[TMP27]], <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP29]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24 -; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 16 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 20 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 24 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 28 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP16:%.*]] = load i1, ptr [[TMP8]], align 1 +; CHECK-NEXT: [[TMP17:%.*]] = load i1, ptr [[TMP9]], align 1 +; CHECK-NEXT: [[TMP18:%.*]] = load i1, ptr [[TMP10]], align 1 +; CHECK-NEXT: [[TMP19:%.*]] = load i1, ptr [[TMP11]], align 1 +; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x i1> poison, i1 [[TMP16]], i32 0 +; CHECK-NEXT: [[TMP21:%.*]] = insertelement <4 x i1> [[TMP20]], i1 [[TMP17]], i32 1 +; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i1> [[TMP21]], i1 [[TMP18]], i32 2 +; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x i1> [[TMP22]], i1 [[TMP19]], i32 3 +; CHECK-NEXT: [[TMP24:%.*]] = load i1, ptr [[TMP12]], align 1 +; CHECK-NEXT: [[TMP25:%.*]] = load i1, ptr [[TMP13]], align 1 +; CHECK-NEXT: [[TMP26:%.*]] = load i1, ptr [[TMP14]], align 1 +; CHECK-NEXT: [[TMP27:%.*]] = load i1, ptr [[TMP15]], align 1 +; CHECK-NEXT: [[TMP28:%.*]] = insertelement <4 x i1> poison, i1 [[TMP24]], i32 0 +; CHECK-NEXT: [[TMP29:%.*]] = insertelement <4 x i1> [[TMP28]], i1 [[TMP25]], i32 1 +; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x i1> [[TMP29]], i1 [[TMP26]], i32 2 +; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i1> [[TMP30]], i1 [[TMP27]], i32 3 +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[TMP32]], align 4 +; CHECK-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP33]], align 4 +; CHECK-NEXT: [[TMP42:%.*]] = load i32, ptr [[TMP34]], align 4 +; CHECK-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP35]], align 4 +; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i32> poison, i32 [[TMP40]], i32 0 +; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x i32> [[TMP44]], i32 [[TMP41]], i32 1 +; CHECK-NEXT: [[TMP46:%.*]] = insertelement <4 x i32> [[TMP45]], i32 [[TMP42]], i32 2 +; CHECK-NEXT: [[TMP47:%.*]] = insertelement <4 x i32> [[TMP46]], i32 [[TMP43]], i32 3 +; CHECK-NEXT: [[TMP48:%.*]] = load i32, ptr [[TMP36]], align 4 +; CHECK-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP37]], align 4 +; CHECK-NEXT: [[TMP50:%.*]] = load i32, ptr [[TMP38]], align 4 +; CHECK-NEXT: [[TMP51:%.*]] = load i32, ptr [[TMP39]], align 4 +; CHECK-NEXT: [[TMP52:%.*]] = insertelement <4 x i32> poison, i32 [[TMP48]], i32 0 +; CHECK-NEXT: [[TMP53:%.*]] = insertelement <4 x i32> [[TMP52]], i32 [[TMP49]], i32 1 +; CHECK-NEXT: [[TMP54:%.*]] = insertelement <4 x i32> [[TMP53]], i32 [[TMP50]], i32 2 +; CHECK-NEXT: [[TMP55:%.*]] = insertelement <4 x i32> [[TMP54]], i32 [[TMP51]], i32 3 +; CHECK-NEXT: [[TMP56:%.*]] = xor <4 x i1> [[TMP23]], +; CHECK-NEXT: [[TMP57:%.*]] = xor <4 x i1> [[TMP31]], +; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP23]], <4 x i32> [[TMP47]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP31]], <4 x i32> [[TMP55]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP58]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] +; CHECK-NEXT: [[TMP59]] = add <4 x i32> [[VEC_PHI1]], [[PREDPHI2]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP60:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24 +; CHECK-NEXT: br i1 [[TMP60]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP29]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP59]], [[TMP58]] +; CHECK-NEXT: [[TMP61:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 27, 24 ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 96, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP61]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] @@ -2685,7 +2817,7 @@ define i32 @test_non_unit_stride_four(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 100 ; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], !llvm.loop [[LOOP33:![0-9]+]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP61]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] ; entry: @@ -2723,49 +2855,148 @@ define i32 @test_non_unit_stride_five(i64 %len, ptr %test_base) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP29:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP116:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP117:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP118:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP119:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 5 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 5 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 10 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 15 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = load i1, ptr [[TMP4]], align 1 -; CHECK-NEXT: [[TMP9:%.*]] = load i1, ptr [[TMP5]], align 1 -; CHECK-NEXT: [[TMP10:%.*]] = load i1, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = load i1, ptr [[TMP7]], align 1 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i1> poison, i1 [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i1> [[TMP12]], i1 [[TMP9]], i32 1 -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i1> [[TMP13]], i1 [[TMP10]], i32 2 -; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i1> [[TMP14]], i1 [[TMP11]], i32 3 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP16]], align 4 -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 4 -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 4 -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP19]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> poison, i32 [[TMP20]], i32 0 -; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 1 -; CHECK-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 2 -; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 3 -; CHECK-NEXT: [[TMP28:%.*]] = xor <4 x i1> [[TMP15]], -; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> [[TMP27]], <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP29]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 20 -; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 20 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 25 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 30 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 35 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 40 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 45 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 50 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 55 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 60 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 65 +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], 70 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 75 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP32:%.*]] = load i1, ptr [[TMP16]], align 1 +; CHECK-NEXT: [[TMP33:%.*]] = load i1, ptr [[TMP17]], align 1 +; CHECK-NEXT: [[TMP34:%.*]] = load i1, ptr [[TMP18]], align 1 +; CHECK-NEXT: [[TMP35:%.*]] = load i1, ptr [[TMP19]], align 1 +; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i1> poison, i1 [[TMP32]], i32 0 +; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i1> [[TMP36]], i1 [[TMP33]], i32 1 +; CHECK-NEXT: [[TMP38:%.*]] = insertelement <4 x i1> [[TMP37]], i1 [[TMP34]], i32 2 +; CHECK-NEXT: [[TMP39:%.*]] = insertelement <4 x i1> [[TMP38]], i1 [[TMP35]], i32 3 +; CHECK-NEXT: [[TMP40:%.*]] = load i1, ptr [[TMP20]], align 1 +; CHECK-NEXT: [[TMP41:%.*]] = load i1, ptr [[TMP21]], align 1 +; CHECK-NEXT: [[TMP42:%.*]] = load i1, ptr [[TMP22]], align 1 +; CHECK-NEXT: [[TMP43:%.*]] = load i1, ptr [[TMP23]], align 1 +; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i1> poison, i1 [[TMP40]], i32 0 +; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x i1> [[TMP44]], i1 [[TMP41]], i32 1 +; CHECK-NEXT: [[TMP46:%.*]] = insertelement <4 x i1> [[TMP45]], i1 [[TMP42]], i32 2 +; CHECK-NEXT: [[TMP47:%.*]] = insertelement <4 x i1> [[TMP46]], i1 [[TMP43]], i32 3 +; CHECK-NEXT: [[TMP48:%.*]] = load i1, ptr [[TMP24]], align 1 +; CHECK-NEXT: [[TMP49:%.*]] = load i1, ptr [[TMP25]], align 1 +; CHECK-NEXT: [[TMP50:%.*]] = load i1, ptr [[TMP26]], align 1 +; CHECK-NEXT: [[TMP51:%.*]] = load i1, ptr [[TMP27]], align 1 +; CHECK-NEXT: [[TMP52:%.*]] = insertelement <4 x i1> poison, i1 [[TMP48]], i32 0 +; CHECK-NEXT: [[TMP53:%.*]] = insertelement <4 x i1> [[TMP52]], i1 [[TMP49]], i32 1 +; CHECK-NEXT: [[TMP54:%.*]] = insertelement <4 x i1> [[TMP53]], i1 [[TMP50]], i32 2 +; CHECK-NEXT: [[TMP55:%.*]] = insertelement <4 x i1> [[TMP54]], i1 [[TMP51]], i32 3 +; CHECK-NEXT: [[TMP56:%.*]] = load i1, ptr [[TMP28]], align 1 +; CHECK-NEXT: [[TMP57:%.*]] = load i1, ptr [[TMP29]], align 1 +; CHECK-NEXT: [[TMP58:%.*]] = load i1, ptr [[TMP30]], align 1 +; CHECK-NEXT: [[TMP59:%.*]] = load i1, ptr [[TMP31]], align 1 +; CHECK-NEXT: [[TMP60:%.*]] = insertelement <4 x i1> poison, i1 [[TMP56]], i32 0 +; CHECK-NEXT: [[TMP61:%.*]] = insertelement <4 x i1> [[TMP60]], i1 [[TMP57]], i32 1 +; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 +; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 +; CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP73:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP75:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP76:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP77:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP79:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP64]], align 4 +; CHECK-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP65]], align 4 +; CHECK-NEXT: [[TMP82:%.*]] = load i32, ptr [[TMP66]], align 4 +; CHECK-NEXT: [[TMP83:%.*]] = load i32, ptr [[TMP67]], align 4 +; CHECK-NEXT: [[TMP84:%.*]] = insertelement <4 x i32> poison, i32 [[TMP80]], i32 0 +; CHECK-NEXT: [[TMP85:%.*]] = insertelement <4 x i32> [[TMP84]], i32 [[TMP81]], i32 1 +; CHECK-NEXT: [[TMP86:%.*]] = insertelement <4 x i32> [[TMP85]], i32 [[TMP82]], i32 2 +; CHECK-NEXT: [[TMP87:%.*]] = insertelement <4 x i32> [[TMP86]], i32 [[TMP83]], i32 3 +; CHECK-NEXT: [[TMP88:%.*]] = load i32, ptr [[TMP68]], align 4 +; CHECK-NEXT: [[TMP89:%.*]] = load i32, ptr [[TMP69]], align 4 +; CHECK-NEXT: [[TMP90:%.*]] = load i32, ptr [[TMP70]], align 4 +; CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[TMP71]], align 4 +; CHECK-NEXT: [[TMP92:%.*]] = insertelement <4 x i32> poison, i32 [[TMP88]], i32 0 +; CHECK-NEXT: [[TMP93:%.*]] = insertelement <4 x i32> [[TMP92]], i32 [[TMP89]], i32 1 +; CHECK-NEXT: [[TMP94:%.*]] = insertelement <4 x i32> [[TMP93]], i32 [[TMP90]], i32 2 +; CHECK-NEXT: [[TMP95:%.*]] = insertelement <4 x i32> [[TMP94]], i32 [[TMP91]], i32 3 +; CHECK-NEXT: [[TMP96:%.*]] = load i32, ptr [[TMP72]], align 4 +; CHECK-NEXT: [[TMP97:%.*]] = load i32, ptr [[TMP73]], align 4 +; CHECK-NEXT: [[TMP98:%.*]] = load i32, ptr [[TMP74]], align 4 +; CHECK-NEXT: [[TMP99:%.*]] = load i32, ptr [[TMP75]], align 4 +; CHECK-NEXT: [[TMP100:%.*]] = insertelement <4 x i32> poison, i32 [[TMP96]], i32 0 +; CHECK-NEXT: [[TMP101:%.*]] = insertelement <4 x i32> [[TMP100]], i32 [[TMP97]], i32 1 +; CHECK-NEXT: [[TMP102:%.*]] = insertelement <4 x i32> [[TMP101]], i32 [[TMP98]], i32 2 +; CHECK-NEXT: [[TMP103:%.*]] = insertelement <4 x i32> [[TMP102]], i32 [[TMP99]], i32 3 +; CHECK-NEXT: [[TMP104:%.*]] = load i32, ptr [[TMP76]], align 4 +; CHECK-NEXT: [[TMP105:%.*]] = load i32, ptr [[TMP77]], align 4 +; CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[TMP78]], align 4 +; CHECK-NEXT: [[TMP107:%.*]] = load i32, ptr [[TMP79]], align 4 +; CHECK-NEXT: [[TMP108:%.*]] = insertelement <4 x i32> poison, i32 [[TMP104]], i32 0 +; CHECK-NEXT: [[TMP109:%.*]] = insertelement <4 x i32> [[TMP108]], i32 [[TMP105]], i32 1 +; CHECK-NEXT: [[TMP110:%.*]] = insertelement <4 x i32> [[TMP109]], i32 [[TMP106]], i32 2 +; CHECK-NEXT: [[TMP111:%.*]] = insertelement <4 x i32> [[TMP110]], i32 [[TMP107]], i32 3 +; CHECK-NEXT: [[TMP112:%.*]] = xor <4 x i1> [[TMP39]], +; CHECK-NEXT: [[TMP113:%.*]] = xor <4 x i1> [[TMP47]], +; CHECK-NEXT: [[TMP114:%.*]] = xor <4 x i1> [[TMP55]], +; CHECK-NEXT: [[TMP115:%.*]] = xor <4 x i1> [[TMP63]], +; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP39]], <4 x i32> [[TMP87]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI4:%.*]] = select <4 x i1> [[TMP47]], <4 x i32> [[TMP95]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI5:%.*]] = select <4 x i1> [[TMP55]], <4 x i32> [[TMP103]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI6:%.*]] = select <4 x i1> [[TMP63]], <4 x i32> [[TMP111]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP116]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] +; CHECK-NEXT: [[TMP117]] = add <4 x i32> [[VEC_PHI1]], [[PREDPHI4]] +; CHECK-NEXT: [[TMP118]] = add <4 x i32> [[VEC_PHI2]], [[PREDPHI5]] +; CHECK-NEXT: [[TMP119]] = add <4 x i32> [[VEC_PHI3]], [[PREDPHI6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP120:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; CHECK-NEXT: br i1 [[TMP120]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP29]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 22, 20 +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP117]], [[TMP116]] +; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP118]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i32> [[TMP119]], [[BIN_RDX7]] +; CHECK-NEXT: [[TMP121:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX8]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 22, 16 ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 80, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP121]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] @@ -2784,7 +3015,7 @@ define i32 @test_non_unit_stride_five(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 100 ; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], !llvm.loop [[LOOP35:![0-9]+]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP121]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] ; entry: @@ -2821,74 +3052,245 @@ define i32 @neg_test_non_unit_stride_off_by_four_bytes(i64 %len, ptr %test_base) ; CHECK: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP37:%.*]], [[PRED_LOAD_CONTINUE6]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE33:%.*]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP148:%.*]], [[PRED_LOAD_CONTINUE33]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP149:%.*]], [[PRED_LOAD_CONTINUE33]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP150:%.*]], [[PRED_LOAD_CONTINUE33]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP151:%.*]], [[PRED_LOAD_CONTINUE33]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 4 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 6 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = load i1, ptr [[TMP4]], align 1 -; CHECK-NEXT: [[TMP9:%.*]] = load i1, ptr [[TMP5]], align 1 -; CHECK-NEXT: [[TMP10:%.*]] = load i1, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = load i1, ptr [[TMP7]], align 1 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i1> poison, i1 [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i1> [[TMP12]], i1 [[TMP9]], i32 1 -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i1> [[TMP13]], i1 [[TMP10]], i32 2 -; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i1> [[TMP14]], i1 [[TMP11]], i32 3 -; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP15]], i32 0 -; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 8 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 10 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 12 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 14 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 16 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 18 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 20 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 22 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 24 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 26 +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], 28 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 30 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP32:%.*]] = load i1, ptr [[TMP16]], align 1 +; CHECK-NEXT: [[TMP33:%.*]] = load i1, ptr [[TMP17]], align 1 +; CHECK-NEXT: [[TMP34:%.*]] = load i1, ptr [[TMP18]], align 1 +; CHECK-NEXT: [[TMP35:%.*]] = load i1, ptr [[TMP19]], align 1 +; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i1> poison, i1 [[TMP32]], i32 0 +; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i1> [[TMP36]], i1 [[TMP33]], i32 1 +; CHECK-NEXT: [[TMP38:%.*]] = insertelement <4 x i1> [[TMP37]], i1 [[TMP34]], i32 2 +; CHECK-NEXT: [[TMP39:%.*]] = insertelement <4 x i1> [[TMP38]], i1 [[TMP35]], i32 3 +; CHECK-NEXT: [[TMP40:%.*]] = load i1, ptr [[TMP20]], align 1 +; CHECK-NEXT: [[TMP41:%.*]] = load i1, ptr [[TMP21]], align 1 +; CHECK-NEXT: [[TMP42:%.*]] = load i1, ptr [[TMP22]], align 1 +; CHECK-NEXT: [[TMP43:%.*]] = load i1, ptr [[TMP23]], align 1 +; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i1> poison, i1 [[TMP40]], i32 0 +; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x i1> [[TMP44]], i1 [[TMP41]], i32 1 +; CHECK-NEXT: [[TMP46:%.*]] = insertelement <4 x i1> [[TMP45]], i1 [[TMP42]], i32 2 +; CHECK-NEXT: [[TMP47:%.*]] = insertelement <4 x i1> [[TMP46]], i1 [[TMP43]], i32 3 +; CHECK-NEXT: [[TMP48:%.*]] = load i1, ptr [[TMP24]], align 1 +; CHECK-NEXT: [[TMP49:%.*]] = load i1, ptr [[TMP25]], align 1 +; CHECK-NEXT: [[TMP50:%.*]] = load i1, ptr [[TMP26]], align 1 +; CHECK-NEXT: [[TMP51:%.*]] = load i1, ptr [[TMP27]], align 1 +; CHECK-NEXT: [[TMP52:%.*]] = insertelement <4 x i1> poison, i1 [[TMP48]], i32 0 +; CHECK-NEXT: [[TMP53:%.*]] = insertelement <4 x i1> [[TMP52]], i1 [[TMP49]], i32 1 +; CHECK-NEXT: [[TMP54:%.*]] = insertelement <4 x i1> [[TMP53]], i1 [[TMP50]], i32 2 +; CHECK-NEXT: [[TMP55:%.*]] = insertelement <4 x i1> [[TMP54]], i1 [[TMP51]], i32 3 +; CHECK-NEXT: [[TMP56:%.*]] = load i1, ptr [[TMP28]], align 1 +; CHECK-NEXT: [[TMP57:%.*]] = load i1, ptr [[TMP29]], align 1 +; CHECK-NEXT: [[TMP58:%.*]] = load i1, ptr [[TMP30]], align 1 +; CHECK-NEXT: [[TMP59:%.*]] = load i1, ptr [[TMP31]], align 1 +; CHECK-NEXT: [[TMP60:%.*]] = insertelement <4 x i1> poison, i1 [[TMP56]], i32 0 +; CHECK-NEXT: [[TMP61:%.*]] = insertelement <4 x i1> [[TMP60]], i1 [[TMP57]], i32 1 +; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 +; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 +; CHECK-NEXT: [[TMP64:%.*]] = extractelement <4 x i1> [[TMP39]], i32 0 +; CHECK-NEXT: br i1 [[TMP64]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]] ; CHECK: pred.load.if: -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i32> poison, i32 [[TMP18]], i32 0 +; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP66:%.*]] = load i32, ptr [[TMP65]], align 4 +; CHECK-NEXT: [[TMP67:%.*]] = insertelement <4 x i32> poison, i32 [[TMP66]], i32 0 ; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]] ; CHECK: pred.load.continue: -; CHECK-NEXT: [[TMP20:%.*]] = phi <4 x i32> [ poison, [[VECTOR_BODY]] ], [ [[TMP19]], [[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP15]], i32 1 -; CHECK-NEXT: br i1 [[TMP21]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]] -; CHECK: pred.load.if1: -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP20]], i32 [[TMP23]], i32 1 -; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]] -; CHECK: pred.load.continue2: -; CHECK-NEXT: [[TMP25:%.*]] = phi <4 x i32> [ [[TMP20]], [[PRED_LOAD_CONTINUE]] ], [ [[TMP24]], [[PRED_LOAD_IF1]] ] -; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[TMP15]], i32 2 -; CHECK-NEXT: br i1 [[TMP26]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]] -; CHECK: pred.load.if3: -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -; CHECK-NEXT: [[TMP29:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP28]], i32 2 -; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]] -; CHECK: pred.load.continue4: -; CHECK-NEXT: [[TMP30:%.*]] = phi <4 x i32> [ [[TMP25]], [[PRED_LOAD_CONTINUE2]] ], [ [[TMP29]], [[PRED_LOAD_IF3]] ] -; CHECK-NEXT: [[TMP31:%.*]] = extractelement <4 x i1> [[TMP15]], i32 3 -; CHECK-NEXT: br i1 [[TMP31]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]] -; CHECK: pred.load.if5: -; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 -; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP30]], i32 [[TMP33]], i32 3 -; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]] -; CHECK: pred.load.continue6: -; CHECK-NEXT: [[TMP35:%.*]] = phi <4 x i32> [ [[TMP30]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP34]], [[PRED_LOAD_IF5]] ] -; CHECK-NEXT: [[TMP36:%.*]] = xor <4 x i1> [[TMP15]], -; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> [[TMP35]], <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP37]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP38:%.*]] = icmp eq i64 [[INDEX_NEXT]], 52 -; CHECK-NEXT: br i1 [[TMP38]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; CHECK-NEXT: [[TMP68:%.*]] = phi <4 x i32> [ poison, [[VECTOR_BODY]] ], [ [[TMP67]], [[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP69:%.*]] = extractelement <4 x i1> [[TMP39]], i32 1 +; CHECK-NEXT: br i1 [[TMP69]], label [[PRED_LOAD_IF4:%.*]], label [[PRED_LOAD_CONTINUE5:%.*]] +; CHECK: pred.load.if4: +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP71:%.*]] = load i32, ptr [[TMP70]], align 4 +; CHECK-NEXT: [[TMP72:%.*]] = insertelement <4 x i32> [[TMP68]], i32 [[TMP71]], i32 1 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE5]] +; CHECK: pred.load.continue5: +; CHECK-NEXT: [[TMP73:%.*]] = phi <4 x i32> [ [[TMP68]], [[PRED_LOAD_CONTINUE]] ], [ [[TMP72]], [[PRED_LOAD_IF4]] ] +; CHECK-NEXT: [[TMP74:%.*]] = extractelement <4 x i1> [[TMP39]], i32 2 +; CHECK-NEXT: br i1 [[TMP74]], label [[PRED_LOAD_IF6:%.*]], label [[PRED_LOAD_CONTINUE7:%.*]] +; CHECK: pred.load.if6: +; CHECK-NEXT: [[TMP75:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP76:%.*]] = load i32, ptr [[TMP75]], align 4 +; CHECK-NEXT: [[TMP77:%.*]] = insertelement <4 x i32> [[TMP73]], i32 [[TMP76]], i32 2 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE7]] +; CHECK: pred.load.continue7: +; CHECK-NEXT: [[TMP78:%.*]] = phi <4 x i32> [ [[TMP73]], [[PRED_LOAD_CONTINUE5]] ], [ [[TMP77]], [[PRED_LOAD_IF6]] ] +; CHECK-NEXT: [[TMP79:%.*]] = extractelement <4 x i1> [[TMP39]], i32 3 +; CHECK-NEXT: br i1 [[TMP79]], label [[PRED_LOAD_IF8:%.*]], label [[PRED_LOAD_CONTINUE9:%.*]] +; CHECK: pred.load.if8: +; CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4 +; CHECK-NEXT: [[TMP82:%.*]] = insertelement <4 x i32> [[TMP78]], i32 [[TMP81]], i32 3 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE9]] +; CHECK: pred.load.continue9: +; CHECK-NEXT: [[TMP83:%.*]] = phi <4 x i32> [ [[TMP78]], [[PRED_LOAD_CONTINUE7]] ], [ [[TMP82]], [[PRED_LOAD_IF8]] ] +; CHECK-NEXT: [[TMP84:%.*]] = extractelement <4 x i1> [[TMP47]], i32 0 +; CHECK-NEXT: br i1 [[TMP84]], label [[PRED_LOAD_IF10:%.*]], label [[PRED_LOAD_CONTINUE11:%.*]] +; CHECK: pred.load.if10: +; CHECK-NEXT: [[TMP85:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP86:%.*]] = load i32, ptr [[TMP85]], align 4 +; CHECK-NEXT: [[TMP87:%.*]] = insertelement <4 x i32> poison, i32 [[TMP86]], i32 0 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE11]] +; CHECK: pred.load.continue11: +; CHECK-NEXT: [[TMP88:%.*]] = phi <4 x i32> [ poison, [[PRED_LOAD_CONTINUE9]] ], [ [[TMP87]], [[PRED_LOAD_IF10]] ] +; CHECK-NEXT: [[TMP89:%.*]] = extractelement <4 x i1> [[TMP47]], i32 1 +; CHECK-NEXT: br i1 [[TMP89]], label [[PRED_LOAD_IF12:%.*]], label [[PRED_LOAD_CONTINUE13:%.*]] +; CHECK: pred.load.if12: +; CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[TMP90]], align 4 +; CHECK-NEXT: [[TMP92:%.*]] = insertelement <4 x i32> [[TMP88]], i32 [[TMP91]], i32 1 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE13]] +; CHECK: pred.load.continue13: +; CHECK-NEXT: [[TMP93:%.*]] = phi <4 x i32> [ [[TMP88]], [[PRED_LOAD_CONTINUE11]] ], [ [[TMP92]], [[PRED_LOAD_IF12]] ] +; CHECK-NEXT: [[TMP94:%.*]] = extractelement <4 x i1> [[TMP47]], i32 2 +; CHECK-NEXT: br i1 [[TMP94]], label [[PRED_LOAD_IF14:%.*]], label [[PRED_LOAD_CONTINUE15:%.*]] +; CHECK: pred.load.if14: +; CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP96:%.*]] = load i32, ptr [[TMP95]], align 4 +; CHECK-NEXT: [[TMP97:%.*]] = insertelement <4 x i32> [[TMP93]], i32 [[TMP96]], i32 2 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE15]] +; CHECK: pred.load.continue15: +; CHECK-NEXT: [[TMP98:%.*]] = phi <4 x i32> [ [[TMP93]], [[PRED_LOAD_CONTINUE13]] ], [ [[TMP97]], [[PRED_LOAD_IF14]] ] +; CHECK-NEXT: [[TMP99:%.*]] = extractelement <4 x i1> [[TMP47]], i32 3 +; CHECK-NEXT: br i1 [[TMP99]], label [[PRED_LOAD_IF16:%.*]], label [[PRED_LOAD_CONTINUE17:%.*]] +; CHECK: pred.load.if16: +; CHECK-NEXT: [[TMP100:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP101:%.*]] = load i32, ptr [[TMP100]], align 4 +; CHECK-NEXT: [[TMP102:%.*]] = insertelement <4 x i32> [[TMP98]], i32 [[TMP101]], i32 3 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE17]] +; CHECK: pred.load.continue17: +; CHECK-NEXT: [[TMP103:%.*]] = phi <4 x i32> [ [[TMP98]], [[PRED_LOAD_CONTINUE15]] ], [ [[TMP102]], [[PRED_LOAD_IF16]] ] +; CHECK-NEXT: [[TMP104:%.*]] = extractelement <4 x i1> [[TMP55]], i32 0 +; CHECK-NEXT: br i1 [[TMP104]], label [[PRED_LOAD_IF18:%.*]], label [[PRED_LOAD_CONTINUE19:%.*]] +; CHECK: pred.load.if18: +; CHECK-NEXT: [[TMP105:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[TMP105]], align 4 +; CHECK-NEXT: [[TMP107:%.*]] = insertelement <4 x i32> poison, i32 [[TMP106]], i32 0 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE19]] +; CHECK: pred.load.continue19: +; CHECK-NEXT: [[TMP108:%.*]] = phi <4 x i32> [ poison, [[PRED_LOAD_CONTINUE17]] ], [ [[TMP107]], [[PRED_LOAD_IF18]] ] +; CHECK-NEXT: [[TMP109:%.*]] = extractelement <4 x i1> [[TMP55]], i32 1 +; CHECK-NEXT: br i1 [[TMP109]], label [[PRED_LOAD_IF20:%.*]], label [[PRED_LOAD_CONTINUE21:%.*]] +; CHECK: pred.load.if20: +; CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP111:%.*]] = load i32, ptr [[TMP110]], align 4 +; CHECK-NEXT: [[TMP112:%.*]] = insertelement <4 x i32> [[TMP108]], i32 [[TMP111]], i32 1 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE21]] +; CHECK: pred.load.continue21: +; CHECK-NEXT: [[TMP113:%.*]] = phi <4 x i32> [ [[TMP108]], [[PRED_LOAD_CONTINUE19]] ], [ [[TMP112]], [[PRED_LOAD_IF20]] ] +; CHECK-NEXT: [[TMP114:%.*]] = extractelement <4 x i1> [[TMP55]], i32 2 +; CHECK-NEXT: br i1 [[TMP114]], label [[PRED_LOAD_IF22:%.*]], label [[PRED_LOAD_CONTINUE23:%.*]] +; CHECK: pred.load.if22: +; CHECK-NEXT: [[TMP115:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP116:%.*]] = load i32, ptr [[TMP115]], align 4 +; CHECK-NEXT: [[TMP117:%.*]] = insertelement <4 x i32> [[TMP113]], i32 [[TMP116]], i32 2 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE23]] +; CHECK: pred.load.continue23: +; CHECK-NEXT: [[TMP118:%.*]] = phi <4 x i32> [ [[TMP113]], [[PRED_LOAD_CONTINUE21]] ], [ [[TMP117]], [[PRED_LOAD_IF22]] ] +; CHECK-NEXT: [[TMP119:%.*]] = extractelement <4 x i1> [[TMP55]], i32 3 +; CHECK-NEXT: br i1 [[TMP119]], label [[PRED_LOAD_IF24:%.*]], label [[PRED_LOAD_CONTINUE25:%.*]] +; CHECK: pred.load.if24: +; CHECK-NEXT: [[TMP120:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP121:%.*]] = load i32, ptr [[TMP120]], align 4 +; CHECK-NEXT: [[TMP122:%.*]] = insertelement <4 x i32> [[TMP118]], i32 [[TMP121]], i32 3 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE25]] +; CHECK: pred.load.continue25: +; CHECK-NEXT: [[TMP123:%.*]] = phi <4 x i32> [ [[TMP118]], [[PRED_LOAD_CONTINUE23]] ], [ [[TMP122]], [[PRED_LOAD_IF24]] ] +; CHECK-NEXT: [[TMP124:%.*]] = extractelement <4 x i1> [[TMP63]], i32 0 +; CHECK-NEXT: br i1 [[TMP124]], label [[PRED_LOAD_IF26:%.*]], label [[PRED_LOAD_CONTINUE27:%.*]] +; CHECK: pred.load.if26: +; CHECK-NEXT: [[TMP125:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP126:%.*]] = load i32, ptr [[TMP125]], align 4 +; CHECK-NEXT: [[TMP127:%.*]] = insertelement <4 x i32> poison, i32 [[TMP126]], i32 0 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE27]] +; CHECK: pred.load.continue27: +; CHECK-NEXT: [[TMP128:%.*]] = phi <4 x i32> [ poison, [[PRED_LOAD_CONTINUE25]] ], [ [[TMP127]], [[PRED_LOAD_IF26]] ] +; CHECK-NEXT: [[TMP129:%.*]] = extractelement <4 x i1> [[TMP63]], i32 1 +; CHECK-NEXT: br i1 [[TMP129]], label [[PRED_LOAD_IF28:%.*]], label [[PRED_LOAD_CONTINUE29:%.*]] +; CHECK: pred.load.if28: +; CHECK-NEXT: [[TMP130:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP131:%.*]] = load i32, ptr [[TMP130]], align 4 +; CHECK-NEXT: [[TMP132:%.*]] = insertelement <4 x i32> [[TMP128]], i32 [[TMP131]], i32 1 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE29]] +; CHECK: pred.load.continue29: +; CHECK-NEXT: [[TMP133:%.*]] = phi <4 x i32> [ [[TMP128]], [[PRED_LOAD_CONTINUE27]] ], [ [[TMP132]], [[PRED_LOAD_IF28]] ] +; CHECK-NEXT: [[TMP134:%.*]] = extractelement <4 x i1> [[TMP63]], i32 2 +; CHECK-NEXT: br i1 [[TMP134]], label [[PRED_LOAD_IF30:%.*]], label [[PRED_LOAD_CONTINUE31:%.*]] +; CHECK: pred.load.if30: +; CHECK-NEXT: [[TMP135:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP136:%.*]] = load i32, ptr [[TMP135]], align 4 +; CHECK-NEXT: [[TMP137:%.*]] = insertelement <4 x i32> [[TMP133]], i32 [[TMP136]], i32 2 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE31]] +; CHECK: pred.load.continue31: +; CHECK-NEXT: [[TMP138:%.*]] = phi <4 x i32> [ [[TMP133]], [[PRED_LOAD_CONTINUE29]] ], [ [[TMP137]], [[PRED_LOAD_IF30]] ] +; CHECK-NEXT: [[TMP139:%.*]] = extractelement <4 x i1> [[TMP63]], i32 3 +; CHECK-NEXT: br i1 [[TMP139]], label [[PRED_LOAD_IF32:%.*]], label [[PRED_LOAD_CONTINUE33]] +; CHECK: pred.load.if32: +; CHECK-NEXT: [[TMP140:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP141:%.*]] = load i32, ptr [[TMP140]], align 4 +; CHECK-NEXT: [[TMP142:%.*]] = insertelement <4 x i32> [[TMP138]], i32 [[TMP141]], i32 3 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE33]] +; CHECK: pred.load.continue33: +; CHECK-NEXT: [[TMP143:%.*]] = phi <4 x i32> [ [[TMP138]], [[PRED_LOAD_CONTINUE31]] ], [ [[TMP142]], [[PRED_LOAD_IF32]] ] +; CHECK-NEXT: [[TMP144:%.*]] = xor <4 x i1> [[TMP39]], +; CHECK-NEXT: [[TMP145:%.*]] = xor <4 x i1> [[TMP47]], +; CHECK-NEXT: [[TMP146:%.*]] = xor <4 x i1> [[TMP55]], +; CHECK-NEXT: [[TMP147:%.*]] = xor <4 x i1> [[TMP63]], +; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP39]], <4 x i32> [[TMP83]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI34:%.*]] = select <4 x i1> [[TMP47]], <4 x i32> [[TMP103]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI35:%.*]] = select <4 x i1> [[TMP55]], <4 x i32> [[TMP123]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI36:%.*]] = select <4 x i1> [[TMP63]], <4 x i32> [[TMP143]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP148]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] +; CHECK-NEXT: [[TMP149]] = add <4 x i32> [[VEC_PHI1]], [[PREDPHI34]] +; CHECK-NEXT: [[TMP150]] = add <4 x i32> [[VEC_PHI2]], [[PREDPHI35]] +; CHECK-NEXT: [[TMP151]] = add <4 x i32> [[VEC_PHI3]], [[PREDPHI36]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP152:%.*]] = icmp eq i64 [[INDEX_NEXT]], 48 +; CHECK-NEXT: br i1 [[TMP152]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP37]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 52, 52 +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP149]], [[TMP148]] +; CHECK-NEXT: [[BIN_RDX37:%.*]] = add <4 x i32> [[TMP150]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX38:%.*]] = add <4 x i32> [[TMP151]], [[BIN_RDX37]] +; CHECK-NEXT: [[TMP153:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX38]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 52, 48 ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 104, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 96, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP153]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] @@ -2907,7 +3309,7 @@ define i32 @neg_test_non_unit_stride_off_by_four_bytes(i64 %len, ptr %test_base) ; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 100 ; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], !llvm.loop [[LOOP37:![0-9]+]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP153]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll index 60b7398e8d7d2..c718b8372d604 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll @@ -19,766 +19,784 @@ target triple = "x86_64-unknown-linux-gnu" define i32 @enabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, i32 %N) { ; O1-LABEL: @enabled( ; O1-NEXT: entry: -; O1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O1-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O1-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; O1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; O1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; O1-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O1-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O1-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; O1-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O1-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; O1-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O1-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; O1-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; O1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; O1-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; O1-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O1-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; O1-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O1-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; O1-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O1-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; O1-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O1-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; O1-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; O1-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; O1-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; O1-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O1-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O1-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; O1-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O1-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; O1-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O1-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; O1-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O1-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; O1-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O1-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; O1-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; O1-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; O1-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; O1-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O1-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O1-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; O1-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O1-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; O1-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O1-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; O1-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O1-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O1-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; O1-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O1-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; O1-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; O1-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; O1-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; O1-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O1-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O1-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; O1-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O1-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; O1-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O1-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; O1-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O1-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O1-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; O1-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O1-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; O1-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; O1-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; O1-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; O1-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O1-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O1-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; O1-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O1-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; O1-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O1-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; O1-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O1-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O1-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; O1-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O1-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; O1-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; O1-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; O1-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; O1-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O1-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O1-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; O1-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O1-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; O1-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O1-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; O1-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O1-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O1-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; O1-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O1-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; O1-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; O1-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; O1-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; O1-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O1-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O1-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; O1-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O1-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; O1-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O1-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; O1-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O1-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O1-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; O1-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O1-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; O1-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; O1-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; O1-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; O1-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O1-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O1-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O1-NEXT: ret i32 [[TMP46]] ; ; O2-LABEL: @enabled( ; O2-NEXT: entry: -; O2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O2-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O2-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; O2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; O2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; O2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O2-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O2-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; O2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; O2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O2-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; O2-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; O2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; O2-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; O2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; O2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; O2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O2-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; O2-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; O2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; O2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; O2-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; O2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O2-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; O2-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; O2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O2-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; O2-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O2-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; O2-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; O2-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; O2-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O2-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; O2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O2-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; O2-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; O2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O2-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; O2-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O2-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; O2-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; O2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; O2-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O2-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; O2-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O2-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; O2-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; O2-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O2-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; O2-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O2-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; O2-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; O2-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; O2-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O2-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; O2-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O2-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; O2-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; O2-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O2-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; O2-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O2-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; O2-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; O2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; O2-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O2-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; O2-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O2-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; O2-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O2-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; O2-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O2-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O2-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; O2-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O2-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; O2-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; O2-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; O2-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O2-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O2-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; O2-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O2-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; O2-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O2-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; O2-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O2-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O2-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; O2-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O2-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; O2-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; O2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; O2-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O2-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O2-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O2-NEXT: ret i32 [[TMP46]] ; ; O3-LABEL: @enabled( ; O3-NEXT: entry: -; O3-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O3-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O3-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; O3-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; O3-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; O3-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O3-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O3-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O3-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; O3-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O3-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; O3-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O3-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; O3-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; O3-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; O3-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O3-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; O3-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O3-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; O3-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O3-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; O3-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O3-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O3-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; O3-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O3-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; O3-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; O3-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; O3-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O3-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O3-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; O3-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O3-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; O3-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O3-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; O3-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O3-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O3-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; O3-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O3-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; O3-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; O3-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; O3-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O3-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O3-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; O3-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O3-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; O3-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O3-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; O3-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O3-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O3-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; O3-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O3-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; O3-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; O3-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; O3-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O3-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O3-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; O3-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O3-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; O3-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O3-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; O3-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O3-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O3-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; O3-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O3-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; O3-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; O3-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; O3-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O3-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O3-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; O3-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O3-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; O3-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O3-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; O3-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O3-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O3-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; O3-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O3-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; O3-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; O3-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; O3-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O3-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O3-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; O3-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O3-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; O3-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O3-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; O3-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O3-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O3-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; O3-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O3-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; O3-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; O3-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; O3-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O3-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O3-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; O3-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O3-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; O3-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O3-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; O3-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O3-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O3-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; O3-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O3-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; O3-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; O3-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; O3-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O3-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O3-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O3-NEXT: ret i32 [[TMP46]] ; ; O3DEFAULT-LABEL: @enabled( ; O3DEFAULT-NEXT: entry: -; O3DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O3DEFAULT-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O3DEFAULT-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; O3DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; O3DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; O3DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O3DEFAULT-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O3DEFAULT-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; O3DEFAULT-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O3DEFAULT-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O3DEFAULT-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; O3DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; O3DEFAULT-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O3DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; O3DEFAULT-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O3DEFAULT-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O3DEFAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; O3DEFAULT-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O3DEFAULT-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O3DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O3DEFAULT-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; O3DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O3DEFAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; O3DEFAULT-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O3DEFAULT-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O3DEFAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; O3DEFAULT-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O3DEFAULT-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O3DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O3DEFAULT-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; O3DEFAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O3DEFAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; O3DEFAULT-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O3DEFAULT-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O3DEFAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; O3DEFAULT-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O3DEFAULT-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O3DEFAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O3DEFAULT-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; O3DEFAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O3DEFAULT-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; O3DEFAULT-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O3DEFAULT-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O3DEFAULT-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; O3DEFAULT-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O3DEFAULT-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O3DEFAULT-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O3DEFAULT-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; O3DEFAULT-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O3DEFAULT-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; O3DEFAULT-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O3DEFAULT-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O3DEFAULT-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; O3DEFAULT-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O3DEFAULT-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O3DEFAULT-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O3DEFAULT-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; O3DEFAULT-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O3DEFAULT-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; O3DEFAULT-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O3DEFAULT-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O3DEFAULT-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; O3DEFAULT-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O3DEFAULT-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O3DEFAULT-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O3DEFAULT-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; O3DEFAULT-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O3DEFAULT-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; O3DEFAULT-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O3DEFAULT-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O3DEFAULT-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; O3DEFAULT-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O3DEFAULT-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O3DEFAULT-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O3DEFAULT-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; O3DEFAULT-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O3DEFAULT-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O3DEFAULT-NEXT: ret i32 [[TMP46]] ; ; Os-LABEL: @enabled( ; Os-NEXT: entry: -; Os-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; Os-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; Os-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; Os-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; Os-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; Os-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; Os-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; Os-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; Os-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; Os-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; Os-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; Os-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; Os-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; Os-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; Os-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; Os-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; Os-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; Os-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; Os-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; Os-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; Os-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; Os-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; Os-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; Os-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; Os-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; Os-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; Os-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; Os-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; Os-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; Os-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; Os-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; Os-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; Os-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; Os-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; Os-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; Os-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; Os-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; Os-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; Os-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; Os-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; Os-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; Os-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; Os-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; Os-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; Os-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; Os-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; Os-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; Os-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; Os-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; Os-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; Os-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; Os-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; Os-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; Os-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; Os-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; Os-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; Os-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; Os-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; Os-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; Os-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; Os-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; Os-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; Os-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; Os-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; Os-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; Os-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; Os-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; Os-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; Os-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; Os-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; Os-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; Os-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; Os-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; Os-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; Os-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; Os-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; Os-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; Os-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; Os-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; Os-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; Os-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; Os-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; Os-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; Os-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; Os-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; Os-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; Os-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; Os-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; Os-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; Os-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; Os-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; Os-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; Os-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; Os-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; Os-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; Os-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; Os-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; Os-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; Os-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; Os-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; Os-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; Os-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; Os-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; Os-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; Os-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; Os-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; Os-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; Os-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; Os-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; Os-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; Os-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; Os-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; Os-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; Os-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; Os-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; Os-NEXT: ret i32 [[TMP46]] ; ; Oz-LABEL: @enabled( ; Oz-NEXT: entry: -; Oz-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; Oz-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; Oz-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; Oz-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; Oz-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; Oz-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; Oz-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; Oz-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; Oz-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; Oz-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; Oz-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; Oz-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; Oz-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; Oz-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; Oz-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; Oz-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; Oz-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; Oz-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; Oz-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; Oz-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; Oz-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; Oz-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; Oz-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; Oz-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; Oz-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; Oz-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; Oz-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; Oz-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; Oz-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; Oz-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; Oz-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; Oz-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; Oz-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; Oz-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; Oz-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; Oz-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; Oz-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; Oz-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; Oz-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; Oz-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; Oz-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; Oz-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; Oz-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; Oz-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; Oz-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; Oz-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; Oz-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; Oz-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; Oz-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; Oz-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; Oz-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; Oz-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; Oz-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; Oz-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; Oz-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; Oz-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; Oz-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; Oz-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; Oz-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; Oz-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; Oz-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; Oz-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; Oz-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; Oz-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; Oz-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; Oz-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; Oz-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; Oz-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; Oz-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; Oz-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; Oz-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; Oz-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; Oz-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; Oz-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; Oz-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; Oz-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; Oz-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; Oz-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; Oz-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; Oz-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; Oz-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; Oz-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; Oz-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; Oz-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; Oz-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; Oz-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; Oz-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; Oz-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; Oz-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; Oz-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; Oz-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; Oz-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; Oz-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; Oz-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; Oz-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; Oz-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; Oz-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; Oz-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; Oz-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; Oz-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; Oz-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; Oz-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; Oz-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; Oz-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; Oz-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; Oz-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; Oz-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; Oz-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; Oz-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; Oz-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; Oz-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; Oz-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; Oz-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; Oz-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; Oz-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; Oz-NEXT: ret i32 [[TMP46]] ; ; O1VEC2-LABEL: @enabled( ; O1VEC2-NEXT: entry: -; O1VEC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O1VEC2-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O1VEC2-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; O1VEC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; O1VEC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O1VEC2-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O1VEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; O1VEC2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O1VEC2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; O1VEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O1VEC2-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; O1VEC2-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; O1VEC2-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O1VEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; O1VEC2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O1VEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; O1VEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O1VEC2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; O1VEC2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O1VEC2-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O1VEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; O1VEC2-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O1VEC2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; O1VEC2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; O1VEC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; O1VEC2-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O1VEC2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; O1VEC2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O1VEC2-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; O1VEC2-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O1VEC2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; O1VEC2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O1VEC2-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O1VEC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; O1VEC2-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O1VEC2-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; O1VEC2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; O1VEC2-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; O1VEC2-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; O1VEC2-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O1VEC2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; O1VEC2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O1VEC2-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; O1VEC2-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O1VEC2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; O1VEC2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O1VEC2-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O1VEC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; O1VEC2-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O1VEC2-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; O1VEC2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; O1VEC2-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; O1VEC2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; O1VEC2-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O1VEC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; O1VEC2-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O1VEC2-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; O1VEC2-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O1VEC2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; O1VEC2-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O1VEC2-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O1VEC2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; O1VEC2-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O1VEC2-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; O1VEC2-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; O1VEC2-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; O1VEC2-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; O1VEC2-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O1VEC2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; O1VEC2-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O1VEC2-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; O1VEC2-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O1VEC2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; O1VEC2-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O1VEC2-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O1VEC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; O1VEC2-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O1VEC2-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; O1VEC2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; O1VEC2-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; O1VEC2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; O1VEC2-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O1VEC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; O1VEC2-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O1VEC2-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; O1VEC2-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O1VEC2-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; O1VEC2-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O1VEC2-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O1VEC2-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; O1VEC2-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O1VEC2-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; O1VEC2-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; O1VEC2-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; O1VEC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; O1VEC2-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O1VEC2-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; O1VEC2-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O1VEC2-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; O1VEC2-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O1VEC2-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; O1VEC2-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O1VEC2-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O1VEC2-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; O1VEC2-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O1VEC2-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; O1VEC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; O1VEC2-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; O1VEC2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; O1VEC2-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O1VEC2-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O1VEC2-NEXT: ret i32 [[TMP46]] ; ; OzVEC2-LABEL: @enabled( ; OzVEC2-NEXT: entry: -; OzVEC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; OzVEC2-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; OzVEC2-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; OzVEC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; OzVEC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; OzVEC2-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; OzVEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; OzVEC2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; OzVEC2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; OzVEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; OzVEC2-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; OzVEC2-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; OzVEC2-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; OzVEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; OzVEC2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; OzVEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; OzVEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; OzVEC2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; OzVEC2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; OzVEC2-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; OzVEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; OzVEC2-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; OzVEC2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; OzVEC2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; OzVEC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; OzVEC2-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; OzVEC2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; OzVEC2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; OzVEC2-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; OzVEC2-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; OzVEC2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; OzVEC2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; OzVEC2-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; OzVEC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; OzVEC2-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; OzVEC2-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; OzVEC2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; OzVEC2-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; OzVEC2-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; OzVEC2-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; OzVEC2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; OzVEC2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; OzVEC2-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; OzVEC2-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; OzVEC2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; OzVEC2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; OzVEC2-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; OzVEC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; OzVEC2-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; OzVEC2-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; OzVEC2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; OzVEC2-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; OzVEC2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; OzVEC2-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; OzVEC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; OzVEC2-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; OzVEC2-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; OzVEC2-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; OzVEC2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; OzVEC2-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; OzVEC2-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; OzVEC2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; OzVEC2-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; OzVEC2-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; OzVEC2-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; OzVEC2-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; OzVEC2-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; OzVEC2-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; OzVEC2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; OzVEC2-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; OzVEC2-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; OzVEC2-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; OzVEC2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; OzVEC2-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; OzVEC2-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; OzVEC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; OzVEC2-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; OzVEC2-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; OzVEC2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; OzVEC2-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; OzVEC2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; OzVEC2-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; OzVEC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; OzVEC2-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; OzVEC2-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; OzVEC2-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; OzVEC2-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; OzVEC2-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; OzVEC2-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; OzVEC2-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; OzVEC2-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; OzVEC2-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; OzVEC2-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; OzVEC2-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; OzVEC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; OzVEC2-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; OzVEC2-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; OzVEC2-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; OzVEC2-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; OzVEC2-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; OzVEC2-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; OzVEC2-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; OzVEC2-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; OzVEC2-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; OzVEC2-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; OzVEC2-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; OzVEC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; OzVEC2-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; OzVEC2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; OzVEC2-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; OzVEC2-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; OzVEC2-NEXT: ret i32 [[TMP46]] ; ; O3DIS-LABEL: @enabled( ; O3DIS-NEXT: entry: -; O3DIS-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O3DIS-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O3DIS-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; O3DIS-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; O3DIS-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; O3DIS-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O3DIS-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O3DIS-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; O3DIS-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O3DIS-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; O3DIS-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O3DIS-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; O3DIS-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; O3DIS-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; O3DIS-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O3DIS-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; O3DIS-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O3DIS-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; O3DIS-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O3DIS-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; O3DIS-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O3DIS-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O3DIS-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; O3DIS-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O3DIS-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; O3DIS-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; O3DIS-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; O3DIS-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; O3DIS-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O3DIS-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; O3DIS-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O3DIS-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; O3DIS-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O3DIS-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; O3DIS-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O3DIS-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O3DIS-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; O3DIS-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O3DIS-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; O3DIS-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; O3DIS-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; O3DIS-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; O3DIS-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O3DIS-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; O3DIS-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O3DIS-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; O3DIS-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O3DIS-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; O3DIS-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O3DIS-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O3DIS-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; O3DIS-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O3DIS-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; O3DIS-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; O3DIS-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; O3DIS-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; O3DIS-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O3DIS-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; O3DIS-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O3DIS-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; O3DIS-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O3DIS-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; O3DIS-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O3DIS-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O3DIS-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; O3DIS-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O3DIS-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; O3DIS-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; O3DIS-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; O3DIS-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; O3DIS-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O3DIS-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; O3DIS-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O3DIS-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; O3DIS-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O3DIS-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; O3DIS-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O3DIS-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O3DIS-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; O3DIS-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O3DIS-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; O3DIS-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; O3DIS-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; O3DIS-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; O3DIS-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O3DIS-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; O3DIS-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O3DIS-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; O3DIS-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O3DIS-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; O3DIS-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O3DIS-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O3DIS-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; O3DIS-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O3DIS-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; O3DIS-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; O3DIS-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; O3DIS-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; O3DIS-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O3DIS-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; O3DIS-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O3DIS-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; O3DIS-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O3DIS-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; O3DIS-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O3DIS-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O3DIS-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; O3DIS-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O3DIS-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; O3DIS-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; O3DIS-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; O3DIS-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; O3DIS-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O3DIS-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O3DIS-NEXT: ret i32 [[TMP46]] ; @@ -821,341 +839,349 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; ; O2-LABEL: @nopragma( ; O2-NEXT: entry: -; O2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O2-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O2-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; O2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; O2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; O2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O2-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O2-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; O2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; O2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O2-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; O2-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; O2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; O2-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; O2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; O2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; O2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O2-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; O2-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; O2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; O2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; O2-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; O2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O2-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; O2-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; O2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O2-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; O2-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O2-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; O2-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; O2-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; O2-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O2-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; O2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O2-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; O2-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; O2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O2-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; O2-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O2-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; O2-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; O2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; O2-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O2-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; O2-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O2-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; O2-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; O2-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O2-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; O2-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O2-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; O2-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; O2-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; O2-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O2-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; O2-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O2-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; O2-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; O2-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O2-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; O2-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O2-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; O2-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; O2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; O2-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O2-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; O2-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O2-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; O2-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O2-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; O2-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O2-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O2-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; O2-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O2-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; O2-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; O2-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; O2-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O2-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O2-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; O2-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O2-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; O2-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O2-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; O2-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O2-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O2-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; O2-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O2-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; O2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; O2-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; O2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; O2-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O2-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O2-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O2-NEXT: ret i32 [[TMP46]] ; ; O3-LABEL: @nopragma( ; O3-NEXT: entry: -; O3-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O3-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O3-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; O3-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; O3-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; O3-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O3-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O3-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O3-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; O3-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O3-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; O3-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O3-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; O3-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; O3-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; O3-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O3-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; O3-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O3-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; O3-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O3-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; O3-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O3-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O3-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; O3-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O3-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; O3-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; O3-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; O3-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O3-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O3-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; O3-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O3-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; O3-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O3-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; O3-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O3-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O3-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; O3-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O3-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; O3-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; O3-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; O3-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O3-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O3-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; O3-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O3-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; O3-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O3-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; O3-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O3-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O3-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; O3-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O3-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; O3-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; O3-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; O3-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O3-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O3-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; O3-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O3-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; O3-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O3-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; O3-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O3-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O3-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; O3-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O3-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; O3-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; O3-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; O3-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O3-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O3-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; O3-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O3-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; O3-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O3-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; O3-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O3-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O3-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; O3-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O3-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; O3-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; O3-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; O3-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O3-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O3-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; O3-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O3-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; O3-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O3-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; O3-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O3-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O3-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; O3-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O3-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; O3-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; O3-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; O3-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O3-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O3-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; O3-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O3-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; O3-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O3-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; O3-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O3-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O3-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; O3-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O3-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; O3-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; O3-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; O3-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; O3-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O3-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O3-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O3-NEXT: ret i32 [[TMP46]] ; ; O3DEFAULT-LABEL: @nopragma( ; O3DEFAULT-NEXT: entry: -; O3DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O3DEFAULT-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; O3DEFAULT-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; O3DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; O3DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; O3DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O3DEFAULT-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O3DEFAULT-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; O3DEFAULT-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O3DEFAULT-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O3DEFAULT-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; O3DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; O3DEFAULT-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O3DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; O3DEFAULT-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O3DEFAULT-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O3DEFAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; O3DEFAULT-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O3DEFAULT-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O3DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O3DEFAULT-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; O3DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O3DEFAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; O3DEFAULT-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O3DEFAULT-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O3DEFAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; O3DEFAULT-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O3DEFAULT-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O3DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O3DEFAULT-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; O3DEFAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O3DEFAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; O3DEFAULT-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O3DEFAULT-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O3DEFAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; O3DEFAULT-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O3DEFAULT-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O3DEFAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O3DEFAULT-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; O3DEFAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O3DEFAULT-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; O3DEFAULT-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O3DEFAULT-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O3DEFAULT-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; O3DEFAULT-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O3DEFAULT-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O3DEFAULT-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O3DEFAULT-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; O3DEFAULT-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O3DEFAULT-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; O3DEFAULT-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O3DEFAULT-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O3DEFAULT-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; O3DEFAULT-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O3DEFAULT-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O3DEFAULT-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O3DEFAULT-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; O3DEFAULT-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O3DEFAULT-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; O3DEFAULT-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O3DEFAULT-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O3DEFAULT-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; O3DEFAULT-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O3DEFAULT-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O3DEFAULT-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O3DEFAULT-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; O3DEFAULT-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O3DEFAULT-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; O3DEFAULT-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O3DEFAULT-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O3DEFAULT-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; O3DEFAULT-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O3DEFAULT-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O3DEFAULT-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O3DEFAULT-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; O3DEFAULT-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; O3DEFAULT-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O3DEFAULT-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O3DEFAULT-NEXT: ret i32 [[TMP46]] ; ; Os-LABEL: @nopragma( ; Os-NEXT: entry: -; Os-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; Os-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 +; Os-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer +; Os-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 ; Os-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; Os-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; Os-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; Os-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; Os-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 -; Os-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; Os-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 4 +; Os-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; Os-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; Os-NEXT: store <4 x i32> [[TMP1]], ptr [[A:%.*]], align 4 ; Os-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 4 ; Os-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; Os-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 8 -; Os-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; Os-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP6:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 -; Os-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; Os-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 -; Os-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; Os-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; Os-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 12 +; Os-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; Os-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 8 +; Os-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 ; Os-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 12 -; Os-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; Os-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; Os-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 16 -; Os-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; Os-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 -; Os-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; Os-NEXT: [[TMP13:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 -; Os-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; Os-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; Os-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 20 +; Os-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; Os-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP14:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 16 +; Os-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 ; Os-NEXT: [[TMP15:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 20 -; Os-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; Os-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; Os-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 24 -; Os-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; Os-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 -; Os-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; Os-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 -; Os-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; Os-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; Os-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 28 +; Os-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; Os-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP20:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 24 +; Os-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 ; Os-NEXT: [[TMP21:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 28 -; Os-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; Os-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; Os-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 32 -; Os-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; Os-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP24:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 -; Os-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; Os-NEXT: [[TMP25:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 -; Os-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; Os-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; Os-NEXT: [[TMP23:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 36 +; Os-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; Os-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP26:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 32 +; Os-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 ; Os-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 36 -; Os-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; Os-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; Os-NEXT: [[TMP28:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 40 -; Os-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; Os-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP30:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 -; Os-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; Os-NEXT: [[TMP31:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 -; Os-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; Os-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; Os-NEXT: [[TMP29:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 44 +; Os-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; Os-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 40 +; Os-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 ; Os-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 44 -; Os-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; Os-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; Os-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 48 -; Os-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; Os-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 -; Os-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; Os-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 -; Os-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; Os-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; Os-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 52 +; Os-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; Os-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 48 +; Os-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 ; Os-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 52 -; Os-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; Os-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; Os-NEXT: [[TMP40:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 56 -; Os-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; Os-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP42:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 -; Os-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; Os-NEXT: [[TMP43:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 -; Os-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; Os-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; Os-NEXT: [[TMP41:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 60 +; Os-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; Os-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT3]] +; Os-NEXT: [[TMP44:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 56 +; Os-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 ; Os-NEXT: [[TMP45:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 60 -; Os-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; Os-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; Os-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; Os-NEXT: ret i32 [[TMP46]] ; @@ -1182,20 +1208,30 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O1VEC2: vector.ph: ; O1VEC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; O1VEC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; O1VEC2-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 +; O1VEC2-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer ; O1VEC2-NEXT: br label [[VECTOR_BODY:%.*]] ; O1VEC2: vector.body: ; O1VEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; O1VEC2-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; O1VEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] -; O1VEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 -; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 -; O1VEC2-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] -; O1VEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 -; O1VEC2-NEXT: store <4 x i32> [[TMP3]], ptr [[TMP5]], align 4 -; O1VEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; O1VEC2-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 -; O1VEC2-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; O1VEC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; O1VEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] +; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]] +; O1VEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 0 +; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O1VEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; O1VEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O1VEC2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] +; O1VEC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP1]] +; O1VEC2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 0 +; O1VEC2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP10]], align 4 +; O1VEC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP11]], align 4 +; O1VEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; O1VEC2-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 +; O1VEC2-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; O1VEC2: middle.block: ; O1VEC2-NEXT: [[CMP_N:%.*]] = icmp eq i64 64, 64 ; O1VEC2-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -1205,16 +1241,16 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O1VEC2: for.body: ; O1VEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; O1VEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; O1VEC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; O1VEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], [[N]] +; O1VEC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; O1VEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[N]] ; O1VEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] ; O1VEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 ; O1VEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; O1VEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 ; O1VEC2-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; O1VEC2: for.end: -; O1VEC2-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 -; O1VEC2-NEXT: ret i32 [[TMP8]] +; O1VEC2-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 +; O1VEC2-NEXT: ret i32 [[TMP14]] ; ; OzVEC2-LABEL: @nopragma( ; OzVEC2-NEXT: entry: @@ -1222,20 +1258,30 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; OzVEC2: vector.ph: ; OzVEC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; OzVEC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer +; OzVEC2-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i32> poison, i32 [[N]], i64 0 +; OzVEC2-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT2]], <4 x i32> poison, <4 x i32> zeroinitializer ; OzVEC2-NEXT: br label [[VECTOR_BODY:%.*]] ; OzVEC2: vector.body: ; OzVEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; OzVEC2-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; OzVEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] -; OzVEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 -; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 -; OzVEC2-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] -; OzVEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 -; OzVEC2-NEXT: store <4 x i32> [[TMP3]], ptr [[TMP5]], align 4 -; OzVEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; OzVEC2-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 -; OzVEC2-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; OzVEC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; OzVEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] +; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]] +; OzVEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 0 +; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; OzVEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; OzVEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; OzVEC2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT3]] +; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] +; OzVEC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP1]] +; OzVEC2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 0 +; OzVEC2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP10]], align 4 +; OzVEC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP11]], align 4 +; OzVEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; OzVEC2-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 +; OzVEC2-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; OzVEC2: middle.block: ; OzVEC2-NEXT: [[CMP_N:%.*]] = icmp eq i64 64, 64 ; OzVEC2-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] @@ -1245,16 +1291,16 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; OzVEC2: for.body: ; OzVEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; OzVEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; OzVEC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OzVEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], [[N]] +; OzVEC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; OzVEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[N]] ; OzVEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] ; OzVEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 ; OzVEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; OzVEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 ; OzVEC2-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; OzVEC2: for.end: -; OzVEC2-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 -; OzVEC2-NEXT: ret i32 [[TMP8]] +; OzVEC2-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 +; OzVEC2-NEXT: ret i32 [[TMP14]] ; ; O3DIS-LABEL: @nopragma( ; O3DIS-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll index e27e44b4fd91f..9d45121c0e0a8 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll @@ -18,7 +18,10 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP36:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP144:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP145:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP146:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP147:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 @@ -27,63 +30,180 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 5 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 6 ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 7 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA:%.*]], i64 [[IDXPROM]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 4, !tbaa [[TBAA1:![0-9]+]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP0]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP1]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP2]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP3]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP4]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP5]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP6]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP7]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP10]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP11]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP12]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP13]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP14]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP15]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP16]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP17]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP26:%.*]] = insertelement <8 x i32> poison, i32 [[TMP18]], i32 0 -; CHECK-NEXT: [[TMP27:%.*]] = insertelement <8 x i32> [[TMP26]], i32 [[TMP19]], i32 1 -; CHECK-NEXT: [[TMP28:%.*]] = insertelement <8 x i32> [[TMP27]], i32 [[TMP20]], i32 2 -; CHECK-NEXT: [[TMP29:%.*]] = insertelement <8 x i32> [[TMP28]], i32 [[TMP21]], i32 3 -; CHECK-NEXT: [[TMP30:%.*]] = insertelement <8 x i32> [[TMP29]], i32 [[TMP22]], i32 4 -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <8 x i32> [[TMP30]], i32 [[TMP23]], i32 5 -; CHECK-NEXT: [[TMP32:%.*]] = insertelement <8 x i32> [[TMP31]], i32 [[TMP24]], i32 6 -; CHECK-NEXT: [[TMP33:%.*]] = insertelement <8 x i32> [[TMP32]], i32 [[TMP25]], i32 7 -; CHECK-NEXT: [[TMP34:%.*]] = mul nsw <8 x i32> [[TMP33]], [[WIDE_LOAD]] -; CHECK-NEXT: [[TMP35:%.*]] = add <8 x i32> [[VEC_PHI]], -; CHECK-NEXT: [[TMP36]] = add <8 x i32> [[TMP35]], [[TMP34]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 -; CHECK-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96 -; CHECK-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 9 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 10 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 11 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 13 +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 14 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 15 +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 17 +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 18 +; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX]], 19 +; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[INDEX]], 20 +; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 21 +; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], 22 +; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[INDEX]], 23 +; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[INDEX]], 24 +; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[INDEX]], 25 +; CHECK-NEXT: [[TMP26:%.*]] = add i64 [[INDEX]], 26 +; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[INDEX]], 27 +; CHECK-NEXT: [[TMP28:%.*]] = add i64 [[INDEX]], 28 +; CHECK-NEXT: [[TMP29:%.*]] = add i64 [[INDEX]], 29 +; CHECK-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 +; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA:%.*]], i64 [[IDXPROM]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[TMP16]] +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[TMP24]] +; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP36]], align 4, !tbaa [[TBAA1:![0-9]+]] +; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 8 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, ptr [[TMP37]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 16 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP38]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 24 +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP39]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP0]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP1]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP2]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP3]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP4]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP5]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP6]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP7]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP8]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP9]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP10]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP11]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP12]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP13]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP14]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP15]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP16]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP17]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP18]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP19]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP20]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP21]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP22]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP23]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP24]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP25]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP26]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP27]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP28]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP29]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP30]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP31]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP72:%.*]] = load i32, ptr [[TMP40]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP73:%.*]] = load i32, ptr [[TMP41]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP42]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP75:%.*]] = load i32, ptr [[TMP43]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP76:%.*]] = load i32, ptr [[TMP44]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP77:%.*]] = load i32, ptr [[TMP45]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP78:%.*]] = load i32, ptr [[TMP46]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP79:%.*]] = load i32, ptr [[TMP47]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP80:%.*]] = insertelement <8 x i32> poison, i32 [[TMP72]], i32 0 +; CHECK-NEXT: [[TMP81:%.*]] = insertelement <8 x i32> [[TMP80]], i32 [[TMP73]], i32 1 +; CHECK-NEXT: [[TMP82:%.*]] = insertelement <8 x i32> [[TMP81]], i32 [[TMP74]], i32 2 +; CHECK-NEXT: [[TMP83:%.*]] = insertelement <8 x i32> [[TMP82]], i32 [[TMP75]], i32 3 +; CHECK-NEXT: [[TMP84:%.*]] = insertelement <8 x i32> [[TMP83]], i32 [[TMP76]], i32 4 +; CHECK-NEXT: [[TMP85:%.*]] = insertelement <8 x i32> [[TMP84]], i32 [[TMP77]], i32 5 +; CHECK-NEXT: [[TMP86:%.*]] = insertelement <8 x i32> [[TMP85]], i32 [[TMP78]], i32 6 +; CHECK-NEXT: [[TMP87:%.*]] = insertelement <8 x i32> [[TMP86]], i32 [[TMP79]], i32 7 +; CHECK-NEXT: [[TMP88:%.*]] = load i32, ptr [[TMP48]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP89:%.*]] = load i32, ptr [[TMP49]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP90:%.*]] = load i32, ptr [[TMP50]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[TMP51]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP92:%.*]] = load i32, ptr [[TMP52]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP93:%.*]] = load i32, ptr [[TMP53]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP94:%.*]] = load i32, ptr [[TMP54]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP95:%.*]] = load i32, ptr [[TMP55]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP96:%.*]] = insertelement <8 x i32> poison, i32 [[TMP88]], i32 0 +; CHECK-NEXT: [[TMP97:%.*]] = insertelement <8 x i32> [[TMP96]], i32 [[TMP89]], i32 1 +; CHECK-NEXT: [[TMP98:%.*]] = insertelement <8 x i32> [[TMP97]], i32 [[TMP90]], i32 2 +; CHECK-NEXT: [[TMP99:%.*]] = insertelement <8 x i32> [[TMP98]], i32 [[TMP91]], i32 3 +; CHECK-NEXT: [[TMP100:%.*]] = insertelement <8 x i32> [[TMP99]], i32 [[TMP92]], i32 4 +; CHECK-NEXT: [[TMP101:%.*]] = insertelement <8 x i32> [[TMP100]], i32 [[TMP93]], i32 5 +; CHECK-NEXT: [[TMP102:%.*]] = insertelement <8 x i32> [[TMP101]], i32 [[TMP94]], i32 6 +; CHECK-NEXT: [[TMP103:%.*]] = insertelement <8 x i32> [[TMP102]], i32 [[TMP95]], i32 7 +; CHECK-NEXT: [[TMP104:%.*]] = load i32, ptr [[TMP56]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP105:%.*]] = load i32, ptr [[TMP57]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[TMP58]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP107:%.*]] = load i32, ptr [[TMP59]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP108:%.*]] = load i32, ptr [[TMP60]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP109:%.*]] = load i32, ptr [[TMP61]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP110:%.*]] = load i32, ptr [[TMP62]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP111:%.*]] = load i32, ptr [[TMP63]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP112:%.*]] = insertelement <8 x i32> poison, i32 [[TMP104]], i32 0 +; CHECK-NEXT: [[TMP113:%.*]] = insertelement <8 x i32> [[TMP112]], i32 [[TMP105]], i32 1 +; CHECK-NEXT: [[TMP114:%.*]] = insertelement <8 x i32> [[TMP113]], i32 [[TMP106]], i32 2 +; CHECK-NEXT: [[TMP115:%.*]] = insertelement <8 x i32> [[TMP114]], i32 [[TMP107]], i32 3 +; CHECK-NEXT: [[TMP116:%.*]] = insertelement <8 x i32> [[TMP115]], i32 [[TMP108]], i32 4 +; CHECK-NEXT: [[TMP117:%.*]] = insertelement <8 x i32> [[TMP116]], i32 [[TMP109]], i32 5 +; CHECK-NEXT: [[TMP118:%.*]] = insertelement <8 x i32> [[TMP117]], i32 [[TMP110]], i32 6 +; CHECK-NEXT: [[TMP119:%.*]] = insertelement <8 x i32> [[TMP118]], i32 [[TMP111]], i32 7 +; CHECK-NEXT: [[TMP120:%.*]] = load i32, ptr [[TMP64]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP121:%.*]] = load i32, ptr [[TMP65]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP122:%.*]] = load i32, ptr [[TMP66]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP123:%.*]] = load i32, ptr [[TMP67]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP124:%.*]] = load i32, ptr [[TMP68]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP125:%.*]] = load i32, ptr [[TMP69]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP126:%.*]] = load i32, ptr [[TMP70]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP127:%.*]] = load i32, ptr [[TMP71]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP128:%.*]] = insertelement <8 x i32> poison, i32 [[TMP120]], i32 0 +; CHECK-NEXT: [[TMP129:%.*]] = insertelement <8 x i32> [[TMP128]], i32 [[TMP121]], i32 1 +; CHECK-NEXT: [[TMP130:%.*]] = insertelement <8 x i32> [[TMP129]], i32 [[TMP122]], i32 2 +; CHECK-NEXT: [[TMP131:%.*]] = insertelement <8 x i32> [[TMP130]], i32 [[TMP123]], i32 3 +; CHECK-NEXT: [[TMP132:%.*]] = insertelement <8 x i32> [[TMP131]], i32 [[TMP124]], i32 4 +; CHECK-NEXT: [[TMP133:%.*]] = insertelement <8 x i32> [[TMP132]], i32 [[TMP125]], i32 5 +; CHECK-NEXT: [[TMP134:%.*]] = insertelement <8 x i32> [[TMP133]], i32 [[TMP126]], i32 6 +; CHECK-NEXT: [[TMP135:%.*]] = insertelement <8 x i32> [[TMP134]], i32 [[TMP127]], i32 7 +; CHECK-NEXT: [[TMP136:%.*]] = mul nsw <8 x i32> [[TMP87]], [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP137:%.*]] = mul nsw <8 x i32> [[TMP103]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP138:%.*]] = mul nsw <8 x i32> [[TMP119]], [[WIDE_LOAD5]] +; CHECK-NEXT: [[TMP139:%.*]] = mul nsw <8 x i32> [[TMP135]], [[WIDE_LOAD6]] +; CHECK-NEXT: [[TMP140:%.*]] = add <8 x i32> [[VEC_PHI]], +; CHECK-NEXT: [[TMP141:%.*]] = add <8 x i32> [[VEC_PHI1]], +; CHECK-NEXT: [[TMP142:%.*]] = add <8 x i32> [[VEC_PHI2]], +; CHECK-NEXT: [[TMP143:%.*]] = add <8 x i32> [[VEC_PHI3]], +; CHECK-NEXT: [[TMP144]] = add <8 x i32> [[TMP140]], [[TMP136]] +; CHECK-NEXT: [[TMP145]] = add <8 x i32> [[TMP141]], [[TMP137]] +; CHECK-NEXT: [[TMP146]] = add <8 x i32> [[TMP142]], [[TMP138]] +; CHECK-NEXT: [[TMP147]] = add <8 x i32> [[TMP143]], [[TMP139]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; CHECK-NEXT: [[TMP148:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96 +; CHECK-NEXT: br i1 [[TMP148]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP36]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP145]], [[TMP144]] +; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <8 x i32> [[TMP146]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <8 x i32> [[TMP147]], [[BIN_RDX7]] +; CHECK-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX8]]) ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 100, 96 ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 96, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP38]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP149]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], [[FOR_BODY]] ], [ [[TMP38]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], [[FOR_BODY]] ], [ [[TMP149]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ADD7_LCSSA]] ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD7]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP150:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !tbaa [[TBAA1]] ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[INDVARS_IV]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP40]], [[TMP39]] +; CHECK-NEXT: [[TMP151:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP151]], [[TMP150]] ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUM_015]], 4 ; CHECK-NEXT: [[ADD7]] = add i32 [[ADD]], [[MUL]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; entry: %idxprom = sext i32 %i to i64 diff --git a/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll b/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll index 290be569bc125..76230994ed87d 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll @@ -6,37 +6,284 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.8.0" -; We don't unroll this loop because it has a small constant trip count. +; We don't unroll this loop because it has a small constant trip count +; that is not profitable for generating a scalar epilogue ; -; CHECK-VECTOR-LABEL: @foo( +; CHECK-VECTOR-LABEL: @foo_trip_count_8( ; CHECK-VECTOR: load <4 x i32> ; CHECK-VECTOR-NOT: load <4 x i32> ; CHECK-VECTOR: store <4 x i32> ; CHECK-VECTOR-NOT: store <4 x i32> ; CHECK-VECTOR: ret ; -; CHECK-SCALAR-LABEL: @foo( +; CHECK-SCALAR-LABEL: @foo_trip_count_8( ; CHECK-SCALAR: load i32, ptr ; CHECK-SCALAR-NOT: load i32, ptr ; CHECK-SCALAR: store i32 ; CHECK-SCALAR-NOT: store i32 ; CHECK-SCALAR: ret -define i32 @foo(ptr nocapture %A) nounwind uwtable ssp { - br label %1 +define void @foo_trip_count_8(ptr nocapture %A) nounwind uwtable ssp { +entry: + br label %for.body -;