diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp index 55466720aa11f..1a7b301c35f2b 100644 --- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp +++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp @@ -267,11 +267,6 @@ static cl::opt EnableMaskedInterleavedMemAccesses( "enable-masked-interleaved-mem-accesses", cl::init(false), cl::Hidden, cl::desc("Enable vectorization on masked interleaved memory accesses in a loop")); -static cl::opt TinyTripCountInterleaveThreshold( - "tiny-trip-count-interleave-threshold", cl::init(128), cl::Hidden, - cl::desc("We don't interleave loops with a estimated constant trip count " - "below this number")); - static cl::opt ForceTargetNumScalarRegs( "force-target-num-scalar-regs", cl::init(0), cl::Hidden, cl::desc("A flag that overrides the target's number of scalar registers.")); @@ -5348,14 +5343,6 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF, auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop); const bool HasReductions = !Legal->getReductionVars().empty(); - // Do not interleave loops with a relatively small known or estimated trip - // count. But we will interleave when InterleaveSmallLoopScalarReduction is - // enabled, and the code has scalar reductions(HasReductions && VF = 1), - // because with the above conditions interleaving can expose ILP and break - // cross iteration dependences for reductions. - if (BestKnownTC && (*BestKnownTC < TinyTripCountInterleaveThreshold) && - !(InterleaveSmallLoopScalarReduction && HasReductions && VF.isScalar())) - return 1; // If we did not calculate the cost for VF (because the user selected the VF) // then we calculate the cost of VF here. diff --git a/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll b/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll index d1cff14c3f4b7..27ca1a7541db6 100644 --- a/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll +++ b/llvm/test/Transforms/LoopDistribute/basic-with-memchecks.ll @@ -79,6 +79,8 @@ entry: ; VECTORIZE: mul <4 x i32> +; VECTORIZE: mul <4 x i32> +; VECTORIZE-NOT: mul <4 x i32> for.body: ; preds = %for.body, %entry %ind = phi i64 [ 0, %entry ], [ %add, %for.body ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll index 03f055413583f..9a7b9f570cf77 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll @@ -326,34 +326,40 @@ define void @trunc_invariant_sdiv_result(i32 %a, i32 %b, ptr noalias %src, ptr % ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP3]], align 1 -; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i16> -; CHECK-NEXT: [[TMP5:%.*]] = mul <16 x i16> [[TMP2]], [[TMP4]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[INDEX]] -; CHECK-NEXT: store <16 x i16> [[TMP5]], ptr [[TMP6]], align 2 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 -; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96 -; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <16 x i8>, ptr [[TMP4]], align 1 +; CHECK-NEXT: [[TMP5:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i16> +; CHECK-NEXT: [[TMP6:%.*]] = zext <16 x i8> [[WIDE_LOAD1]] to <16 x i16> +; CHECK-NEXT: [[TMP7:%.*]] = mul <16 x i16> [[TMP2]], [[TMP5]] +; CHECK-NEXT: [[TMP8:%.*]] = mul <16 x i16> [[TMP2]], [[TMP6]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i64 32 +; CHECK-NEXT: store <16 x i16> [[TMP7]], ptr [[TMP9]], align 2 +; CHECK-NEXT: store <16 x i16> [[TMP8]], ptr [[TMP10]], align 2 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96 +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]] ; CHECK: vec.epilog.iter.check: ; CHECK-NEXT: br i1 false, label [[VEC_EPILOG_SCALAR_PH]], label [[VEC_EPILOG_PH]] ; CHECK: vec.epilog.ph: -; CHECK-NEXT: [[TMP8:%.*]] = trunc i32 [[INVAR_DIV]] to i16 -; CHECK-NEXT: [[TMP9:%.*]] = insertelement <4 x i16> poison, i16 [[TMP8]], i64 0 -; CHECK-NEXT: [[TMP10:%.*]] = shufflevector <4 x i16> [[TMP9]], <4 x i16> poison, <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP12:%.*]] = trunc i32 [[INVAR_DIV]] to i16 +; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i16> poison, i16 [[TMP12]], i64 0 +; CHECK-NEXT: [[TMP14:%.*]] = shufflevector <4 x i16> [[TMP13]], <4 x i16> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; CHECK: vec.epilog.vector.body: -; CHECK-NEXT: [[INDEX3:%.*]] = phi i64 [ 96, [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT5:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX3]] -; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x i8>, ptr [[TMP11]], align 1 -; CHECK-NEXT: [[TMP12:%.*]] = zext <4 x i8> [[WIDE_LOAD4]] to <4 x i16> -; CHECK-NEXT: [[TMP13:%.*]] = mul <4 x i16> [[TMP10]], [[TMP12]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[INDEX3]] -; CHECK-NEXT: store <4 x i16> [[TMP13]], ptr [[TMP14]], align 2 -; CHECK-NEXT: [[INDEX_NEXT5]] = add nuw i64 [[INDEX3]], 4 -; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT5]], 100 -; CHECK-NEXT: br i1 [[TMP15]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[INDEX4:%.*]] = phi i64 [ 96, [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT6:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[INDEX4]] +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x i8>, ptr [[TMP15]], align 1 +; CHECK-NEXT: [[TMP16:%.*]] = zext <4 x i8> [[WIDE_LOAD5]] to <4 x i16> +; CHECK-NEXT: [[TMP17:%.*]] = mul <4 x i16> [[TMP14]], [[TMP16]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i16, ptr [[DST]], i64 [[INDEX4]] +; CHECK-NEXT: store <4 x i16> [[TMP17]], ptr [[TMP18]], align 2 +; CHECK-NEXT: [[INDEX_NEXT6]] = add nuw i64 [[INDEX4]], 4 +; CHECK-NEXT: [[TMP19:%.*]] = icmp eq i64 [[INDEX_NEXT6]], 100 +; CHECK-NEXT: br i1 [[TMP19]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] ; CHECK: vec.epilog.scalar.ph: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_estimated_tc.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_estimated_tc.ll index 691c0fc8facc4..95cbf121377f9 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_estimated_tc.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_estimated_tc.ll @@ -1,6 +1,5 @@ -; RUN: opt < %s -tiny-trip-count-interleave-threshold=16 -force-target-max-vector-interleave=8 -p loop-vectorize -S -pass-remarks=loop-vectorize -disable-output 2>&1 | FileCheck %s -; RUN: opt < %s -tiny-trip-count-interleave-threshold=16 -force-target-max-vector-interleave=8 -p loop-vectorize -S 2>&1 | FileCheck %s -check-prefix=CHECK-IR -; TODO: remove -tiny-trip-count-interleave-threshold once the interleave threshold is removed +; RUN: opt < %s -force-target-max-vector-interleave=8 -p loop-vectorize -S -pass-remarks=loop-vectorize -disable-output 2>&1 | FileCheck %s +; RUN: opt < %s -force-target-max-vector-interleave=8 -p loop-vectorize -S 2>&1 | FileCheck %s -check-prefix=CHECK-IR target triple = "aarch64-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_known_tc.ll b/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_known_tc.ll index 6ea0229ab8ea0..a3b45f7b5ca51 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_known_tc.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/interleave_count_for_known_tc.ll @@ -1,6 +1,5 @@ -; RUN: opt < %s -tiny-trip-count-interleave-threshold=16 -force-target-max-vector-interleave=8 -p loop-vectorize -S -pass-remarks=loop-vectorize -disable-output 2>&1 | FileCheck %s -; RUN: opt < %s -tiny-trip-count-interleave-threshold=16 -force-target-max-vector-interleave=8 -p loop-vectorize -S 2>&1 | FileCheck %s -check-prefix=CHECK-IR -; TODO: remove -tiny-trip-count-interleave-threshold once the interleave threshold is removed +; RUN: opt < %s -force-target-max-vector-interleave=8 -p loop-vectorize -S -pass-remarks=loop-vectorize -disable-output 2>&1 | FileCheck %s +; RUN: opt < %s -force-target-max-vector-interleave=8 -p loop-vectorize -S 2>&1 | FileCheck %s -check-prefix=CHECK-IR target triple = "aarch64-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll index 0fe8fa3f4154a..3217f508f0adc 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll @@ -12,25 +12,38 @@ define void @induction_i7(ptr %dst) #0 { ; CHECK-LABEL: @induction_i7( ; CHECK: vector.ph: ; CHECK: %ind.end = trunc i64 %n.vec to i7 -; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 2 -; CHECK-NEXT: [[TMP4:%.*]] = call @llvm.experimental.stepvector.nxv2i8() -; CHECK: [[TMP5:%.*]] = trunc [[TMP4]] to -; CHECK-NEXT: [[TMP6:%.*]] = add [[TMP5]], zeroinitializer -; CHECK-NEXT: [[TMP7:%.*]] = mul [[TMP6]], shufflevector ( insertelement ( poison, i7 1, i64 0), poison, zeroinitializer) -; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP7]] +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; CHECK-NEXT: [[TMP6:%.*]] = call @llvm.experimental.stepvector.nxv2i8() +; CHECK-NEXT: [[TMP7:%.*]] = trunc [[TMP6]] to +; CHECK-NEXT: [[TMP8:%.*]] = add [[TMP7]], zeroinitializer +; CHECK-NEXT: [[TMP9:%.*]] = mul [[TMP8]], shufflevector ( insertelement ( poison, i7 1, i64 0), poison, zeroinitializer) +; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP9]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ] -; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP11:%.*]] = add [[VEC_IND]], zeroinitializer -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP10]] -; CHECK-NEXT: [[EXT:%.+]] = zext [[TMP11]] to -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i32 0 -; CHECK-NEXT: store [[EXT]], ptr [[TMP13]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], -; +; CHECK-NEXT: [[STEP_ADD:%.*]] = add [[VEC_IND]], [[DOTSPLAT:%.*]] +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2 +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 0 +; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 1 +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], [[TMP17]] +; CHECK-NEXT: [[TMP19:%.*]] = add [[VEC_IND]], zeroinitializer +; CHECK-NEXT: [[TMP20:%.*]] = add [[STEP_ADD]], zeroinitializer +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP18]] +; CHECK-NEXT: [[TMP23:%.*]] = zext [[TMP19]] to +; CHECK-NEXT: [[TMP24:%.*]] = zext [[TMP20]] to +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i32 0 +; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP27:%.*]] = mul i64 [[TMP26]], 2 +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i64 [[TMP27]] +; CHECK-NEXT: store [[TMP23]], ptr [[TMP25]], align 8 +; CHECK-NEXT: store [[TMP24]], ptr [[TMP28]], align 8 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[STEP_ADD]], [[DOTSPLAT]] + entry: br label %for.body @@ -59,24 +72,34 @@ define void @induction_i3_zext(ptr %dst) #0 { ; CHECK-LABEL: @induction_i3_zext( ; CHECK: vector.ph: ; CHECK: %ind.end = trunc i64 %n.vec to i3 -; CHECK-NEXT: [[TMP15:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP16:%.*]] = mul i64 [[TMP15]], 2 -; CHECK: [[TMP4:%.*]] = call @llvm.experimental.stepvector.nxv2i8() -; CHECK: [[TMP5:%.*]] = trunc [[TMP4]] to -; CHECK-NEXT: [[TMP6:%.*]] = add [[TMP5]], zeroinitializer -; CHECK-NEXT: [[TMP7:%.*]] = mul [[TMP6]], shufflevector ( insertelement ( poison, i3 1, i64 0), poison, zeroinitializer) -; CHECK-NEXT: [[INDUCTION:%.*]] = add zeroinitializer, [[TMP7]] +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4 +; CHECK-NEXT: [[TMP6:%.*]] = call @llvm.experimental.stepvector.nxv2i8() +; CHECK-NEXT: [[TMP7:%.*]] = trunc [[TMP6]] to +; CHECK-NEXT: [[TMP8:%.*]] = add [[TMP7]], zeroinitializer +; CHECK-NEXT: [[TMP9:%.*]] = mul [[TMP8]], shufflevector ( insertelement ( poison, i3 1, i64 0), poison, zeroinitializer) ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %vector.ph ], [ [[INDEX_NEXT:%.*]], %vector.body ] ; CHECK-NEXT: [[VEC_IND:%.*]] = phi [ [[INDUCTION]], %vector.ph ], [ [[VEC_IND_NEXT:%.*]], %vector.body ] -; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP10:%.*]] = zext [[VEC_IND]] to -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, ptr [[DST:%.*]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP12]], i32 0 -; CHECK-NEXT: store [[TMP10]], ptr [[TMP13]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP16]] -; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[VEC_IND]], -; +; CHECK-NEXT: [[STEP_ADD:%.*]] = add [[VEC_IND]], [[DOTSPLAT]] +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 0 +; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP15:%.*]] = mul i64 [[TMP14]], 2 +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP15]], 0 +; CHECK-NEXT: [[TMP17:%.*]] = mul i64 [[TMP16]], 1 +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], [[TMP17]] +; CHECK-NEXT: [[TMP19:%.*]] = zext [[VEC_IND]] to +; CHECK-NEXT: [[TMP20:%.*]] = zext [[STEP_ADD]] to +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[DST]], i64 [[TMP18]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i32 0 +; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[TMP24]], 2 +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i64, ptr [[TMP21]], i64 [[TMP25]] +; CHECK-NEXT: store [[TMP19]], ptr [[TMP23]], align 8 +; CHECK-NEXT: store [[TMP20]], ptr [[TMP26]], align 8 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] +; CHECK-NEXT: [[VEC_IND_NEXT]] = add [[STEP_ADD]], [[DOTSPLAT]] entry: br label %for.body diff --git a/llvm/test/Transforms/LoopVectorize/SystemZ/zero_unroll.ll b/llvm/test/Transforms/LoopVectorize/SystemZ/zero_unroll.ll index d469178fec0e7..d8535d510ca03 100644 --- a/llvm/test/Transforms/LoopVectorize/SystemZ/zero_unroll.ll +++ b/llvm/test/Transforms/LoopVectorize/SystemZ/zero_unroll.ll @@ -1,4 +1,4 @@ -; RUN: opt -S -passes=loop-vectorize -mtriple=s390x-linux-gnu -tiny-trip-count-interleave-threshold=4 -vectorizer-min-trip-count=8 < %s | FileCheck %s +; RUN: opt -S -passes=loop-vectorize -mtriple=s390x-linux-gnu -vectorizer-min-trip-count=8 < %s | FileCheck %s define i32 @main(i32 %arg, ptr nocapture readnone %arg1) #0 { ;CHECK: vector.body: diff --git a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll index fd826ce454ed8..87f525fbce17a 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/imprecise-through-phis.ll @@ -73,23 +73,33 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; SSE: vector.body: ; SSE-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; SSE-NEXT: [[VEC_PHI:%.*]] = phi <2 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ] +; SSE-NEXT: [[VEC_PHI1:%.*]] = phi <2 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI3:%.*]], [[VECTOR_BODY]] ] ; SSE-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 -; SSE-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[TMP0]] -; SSE-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[TMP1]], i32 0 -; SSE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 8 -; SSE-NEXT: [[TMP3:%.*]] = fcmp fast une <2 x double> [[WIDE_LOAD]], -; SSE-NEXT: [[TMP5:%.*]] = xor <2 x i1> [[TMP3]], -; SSE-NEXT: [[TMP4:%.*]] = fadd fast <2 x double> [[VEC_PHI]], [[WIDE_LOAD]] -; SSE-NEXT: [[PREDPHI]] = select <2 x i1> [[TMP3]], <2 x double> [[TMP4]], <2 x double> [[VEC_PHI]] -; SSE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 2 -; SSE-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 32 -; SSE-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; SSE-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 2 +; SSE-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[TMP0]] +; SSE-NEXT: [[TMP3:%.*]] = getelementptr double, ptr [[ARR]], i32 [[TMP1]] +; SSE-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[TMP2]], i32 0 +; SSE-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[TMP2]], i32 2 +; SSE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP4]], align 8 +; SSE-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x double>, ptr [[TMP5]], align 8 +; SSE-NEXT: [[TMP6:%.*]] = fcmp fast une <2 x double> [[WIDE_LOAD]], +; SSE-NEXT: [[TMP7:%.*]] = fcmp fast une <2 x double> [[WIDE_LOAD2]], +; SSE-NEXT: [[TMP8:%.*]] = xor <2 x i1> [[TMP6]], +; SSE-NEXT: [[TMP9:%.*]] = xor <2 x i1> [[TMP7]], +; SSE-NEXT: [[TMP10:%.*]] = fadd fast <2 x double> [[VEC_PHI]], [[WIDE_LOAD]] +; SSE-NEXT: [[TMP11:%.*]] = fadd fast <2 x double> [[VEC_PHI1]], [[WIDE_LOAD2]] +; SSE-NEXT: [[PREDPHI]] = select <2 x i1> [[TMP6]], <2 x double> [[TMP10]], <2 x double> [[VEC_PHI]] +; SSE-NEXT: [[PREDPHI3]] = select <2 x i1> [[TMP7]], <2 x double> [[TMP11]], <2 x double> [[VEC_PHI1]] +; SSE-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 +; SSE-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 32 +; SSE-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; SSE: middle.block: -; SSE-NEXT: [[TMP7:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> [[PREDPHI]]) +; SSE-NEXT: [[BIN_RDX:%.*]] = fadd fast <2 x double> [[PREDPHI3]], [[PREDPHI]] +; SSE-NEXT: [[TMP13:%.*]] = call fast double @llvm.vector.reduce.fadd.v2f64(double -0.000000e+00, <2 x double> [[BIN_RDX]]) ; SSE-NEXT: br i1 true, label [[DONE:%.*]], label [[SCALAR_PH]] ; SSE: scalar.ph: ; SSE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 32, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; SSE-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; SSE-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] ; SSE-NEXT: br label [[LOOP:%.*]] ; SSE: loop: ; SSE-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[NEXT_ITER:%.*]] ] @@ -107,9 +117,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; SSE-NEXT: [[TOT_NEXT]] = phi double [ [[TOT]], [[NO_ADD]] ], [ [[TOT_NEW]], [[DO_ADD]] ] ; SSE-NEXT: [[I_NEXT]] = add i32 [[I]], 1 ; SSE-NEXT: [[AGAIN:%.*]] = icmp ult i32 [[I_NEXT]], 32 -; SSE-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]], !llvm.loop [[LOOP2:![0-9]+]] +; SSE-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]], !llvm.loop [[LOOP3:![0-9]+]] ; SSE: done: -; SSE-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; SSE-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP13]], [[MIDDLE_BLOCK]] ] ; SSE-NEXT: ret double [[TOT_NEXT_LCSSA]] ; ; AVX-LABEL: @sumIfVector( @@ -120,23 +130,53 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; AVX: vector.body: ; AVX-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; AVX-NEXT: [[VEC_PHI:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ] +; AVX-NEXT: [[VEC_PHI1:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI7:%.*]], [[VECTOR_BODY]] ] +; AVX-NEXT: [[VEC_PHI2:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI8:%.*]], [[VECTOR_BODY]] ] +; AVX-NEXT: [[VEC_PHI3:%.*]] = phi <4 x double> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI9:%.*]], [[VECTOR_BODY]] ] ; AVX-NEXT: [[TMP0:%.*]] = add i32 [[INDEX]], 0 -; AVX-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[TMP0]] -; AVX-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[TMP1]], i32 0 -; AVX-NEXT: [[WIDE_LOAD:%.*]] = load <4 x double>, ptr [[TMP2]], align 8 -; AVX-NEXT: [[TMP3:%.*]] = fcmp fast une <4 x double> [[WIDE_LOAD]], -; AVX-NEXT: [[TMP5:%.*]] = xor <4 x i1> [[TMP3]], -; AVX-NEXT: [[TMP4:%.*]] = fadd fast <4 x double> [[VEC_PHI]], [[WIDE_LOAD]] -; AVX-NEXT: [[PREDPHI]] = select <4 x i1> [[TMP3]], <4 x double> [[TMP4]], <4 x double> [[VEC_PHI]] -; AVX-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; AVX-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 32 -; AVX-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; AVX-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 4 +; AVX-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 8 +; AVX-NEXT: [[TMP3:%.*]] = add i32 [[INDEX]], 12 +; AVX-NEXT: [[TMP4:%.*]] = getelementptr double, ptr [[ARR:%.*]], i32 [[TMP0]] +; AVX-NEXT: [[TMP5:%.*]] = getelementptr double, ptr [[ARR]], i32 [[TMP1]] +; AVX-NEXT: [[TMP6:%.*]] = getelementptr double, ptr [[ARR]], i32 [[TMP2]] +; AVX-NEXT: [[TMP7:%.*]] = getelementptr double, ptr [[ARR]], i32 [[TMP3]] +; AVX-NEXT: [[TMP8:%.*]] = getelementptr double, ptr [[TMP4]], i32 0 +; AVX-NEXT: [[TMP9:%.*]] = getelementptr double, ptr [[TMP4]], i32 4 +; AVX-NEXT: [[TMP10:%.*]] = getelementptr double, ptr [[TMP4]], i32 8 +; AVX-NEXT: [[TMP11:%.*]] = getelementptr double, ptr [[TMP4]], i32 12 +; AVX-NEXT: [[WIDE_LOAD:%.*]] = load <4 x double>, ptr [[TMP8]], align 8 +; AVX-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x double>, ptr [[TMP9]], align 8 +; AVX-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x double>, ptr [[TMP10]], align 8 +; AVX-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x double>, ptr [[TMP11]], align 8 +; AVX-NEXT: [[TMP12:%.*]] = fcmp fast une <4 x double> [[WIDE_LOAD]], +; AVX-NEXT: [[TMP13:%.*]] = fcmp fast une <4 x double> [[WIDE_LOAD4]], +; AVX-NEXT: [[TMP14:%.*]] = fcmp fast une <4 x double> [[WIDE_LOAD5]], +; AVX-NEXT: [[TMP15:%.*]] = fcmp fast une <4 x double> [[WIDE_LOAD6]], +; AVX-NEXT: [[TMP16:%.*]] = xor <4 x i1> [[TMP12]], +; AVX-NEXT: [[TMP17:%.*]] = xor <4 x i1> [[TMP13]], +; AVX-NEXT: [[TMP18:%.*]] = xor <4 x i1> [[TMP14]], +; AVX-NEXT: [[TMP19:%.*]] = xor <4 x i1> [[TMP15]], +; AVX-NEXT: [[TMP20:%.*]] = fadd fast <4 x double> [[VEC_PHI]], [[WIDE_LOAD]] +; AVX-NEXT: [[TMP21:%.*]] = fadd fast <4 x double> [[VEC_PHI1]], [[WIDE_LOAD4]] +; AVX-NEXT: [[TMP22:%.*]] = fadd fast <4 x double> [[VEC_PHI2]], [[WIDE_LOAD5]] +; AVX-NEXT: [[TMP23:%.*]] = fadd fast <4 x double> [[VEC_PHI3]], [[WIDE_LOAD6]] +; AVX-NEXT: [[PREDPHI]] = select <4 x i1> [[TMP12]], <4 x double> [[TMP20]], <4 x double> [[VEC_PHI]] +; AVX-NEXT: [[PREDPHI7]] = select <4 x i1> [[TMP13]], <4 x double> [[TMP21]], <4 x double> [[VEC_PHI1]] +; AVX-NEXT: [[PREDPHI8]] = select <4 x i1> [[TMP14]], <4 x double> [[TMP22]], <4 x double> [[VEC_PHI2]] +; AVX-NEXT: [[PREDPHI9]] = select <4 x i1> [[TMP15]], <4 x double> [[TMP23]], <4 x double> [[VEC_PHI3]] +; AVX-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 +; AVX-NEXT: [[TMP24:%.*]] = icmp eq i32 [[INDEX_NEXT]], 32 +; AVX-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; AVX: middle.block: -; AVX-NEXT: [[TMP7:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double -0.000000e+00, <4 x double> [[PREDPHI]]) +; AVX-NEXT: [[BIN_RDX:%.*]] = fadd fast <4 x double> [[PREDPHI7]], [[PREDPHI]] +; AVX-NEXT: [[BIN_RDX10:%.*]] = fadd fast <4 x double> [[PREDPHI8]], [[BIN_RDX]] +; AVX-NEXT: [[BIN_RDX11:%.*]] = fadd fast <4 x double> [[PREDPHI9]], [[BIN_RDX10]] +; AVX-NEXT: [[TMP25:%.*]] = call fast double @llvm.vector.reduce.fadd.v4f64(double -0.000000e+00, <4 x double> [[BIN_RDX11]]) ; AVX-NEXT: br i1 true, label [[DONE:%.*]], label [[SCALAR_PH]] ; AVX: scalar.ph: ; AVX-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ 32, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; AVX-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; AVX-NEXT: [[BC_MERGE_RDX:%.*]] = phi double [ 0.000000e+00, [[ENTRY]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ] ; AVX-NEXT: br label [[LOOP:%.*]] ; AVX: loop: ; AVX-NEXT: [[I:%.*]] = phi i32 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[NEXT_ITER:%.*]] ] @@ -154,9 +194,9 @@ define double @sumIfVector(ptr nocapture readonly %arr) { ; AVX-NEXT: [[TOT_NEXT]] = phi double [ [[TOT]], [[NO_ADD]] ], [ [[TOT_NEW]], [[DO_ADD]] ] ; AVX-NEXT: [[I_NEXT]] = add i32 [[I]], 1 ; AVX-NEXT: [[AGAIN:%.*]] = icmp ult i32 [[I_NEXT]], 32 -; AVX-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]], !llvm.loop [[LOOP2:![0-9]+]] +; AVX-NEXT: br i1 [[AGAIN]], label [[LOOP]], label [[DONE]], !llvm.loop [[LOOP3:![0-9]+]] ; AVX: done: -; AVX-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP7]], [[MIDDLE_BLOCK]] ] +; AVX-NEXT: [[TOT_NEXT_LCSSA:%.*]] = phi double [ [[TOT_NEXT]], [[NEXT_ITER]] ], [ [[TMP25]], [[MIDDLE_BLOCK]] ] ; AVX-NEXT: ret double [[TOT_NEXT_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/interleave_short_tc.ll b/llvm/test/Transforms/LoopVectorize/X86/interleave_short_tc.ll index 4857d27df6331..93d4e4c6d1673 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/interleave_short_tc.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/interleave_short_tc.ll @@ -1,18 +1,15 @@ ; Check that we won't interleave by more than half the "best known" estimated trip count. -; The loop is expected to be vectorized by 4 and interleaving suppresed due to -; short trip count which is controled by "tiny-trip-count-interleave-threshold". -; RUN: opt -passes=loop-vectorize -force-vector-width=4 -vectorizer-min-trip-count=4 -S < %s | FileCheck %s ; ; The loop is expected to be vectorized by 4 and computed interleaving factor is 1. ; Thus the resulting step is 4. -; RUN: opt -passes=loop-vectorize -force-vector-width=4 -vectorizer-min-trip-count=4 -tiny-trip-count-interleave-threshold=4 -S < %s | FileCheck %s +; RUN: opt -passes=loop-vectorize -force-vector-width=4 -vectorizer-min-trip-count=4 -S < %s | FileCheck %s ; The loop is expected to be vectorized by 2 and computed interleaving factor is 2. ; Thus the resulting step is 4. -; RUN: opt -passes=loop-vectorize -force-vector-width=2 -vectorizer-min-trip-count=4 -tiny-trip-count-interleave-threshold=4 -S < %s | FileCheck %s +; RUN: opt -passes=loop-vectorize -force-vector-width=2 -vectorizer-min-trip-count=4 -S < %s | FileCheck %s -; Check that we won't interleave by more than half the "best known" estimated trip count. +; Check that we won't interleave by more than "best known" estimated trip count. target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" diff --git a/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll b/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll index e30230120c44c..7159a54234b4b 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/limit-vf-by-tripcount.ll @@ -85,16 +85,16 @@ define void @test_tc_18(ptr noalias %src, ptr noalias %dst) { ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 16, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; CHECK: vec.epilog.vector.body: -; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT4:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX2]], 0 +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX1]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <2 x i8>, ptr [[TMP8]], align 64 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i8>, ptr [[TMP8]], align 64 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP6]] ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i32 0 -; CHECK-NEXT: store <2 x i8> [[WIDE_LOAD3]], ptr [[TMP10]], align 64 -; CHECK-NEXT: [[INDEX_NEXT4]] = add nuw i64 [[INDEX2]], 2 -; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT4]], 18 +; CHECK-NEXT: store <2 x i8> [[WIDE_LOAD2]], ptr [[TMP10]], align 64 +; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 18 ; CHECK-NEXT: br i1 [[TMP11]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: br i1 true, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -158,16 +158,16 @@ define void @test_tc_19(ptr noalias %src, ptr noalias %dst) { ; CHECK-NEXT: [[VEC_EPILOG_RESUME_VAL:%.*]] = phi i64 [ 16, [[VEC_EPILOG_ITER_CHECK]] ], [ 0, [[VECTOR_MAIN_LOOP_ITER_CHECK]] ] ; CHECK-NEXT: br label [[VEC_EPILOG_VECTOR_BODY:%.*]] ; CHECK: vec.epilog.vector.body: -; CHECK-NEXT: [[INDEX2:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT4:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX2]], 0 +; CHECK-NEXT: [[INDEX1:%.*]] = phi i64 [ [[VEC_EPILOG_RESUME_VAL]], [[VEC_EPILOG_PH]] ], [ [[INDEX_NEXT3:%.*]], [[VEC_EPILOG_VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX1]], 0 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP6]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP7]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <2 x i8>, ptr [[TMP8]], align 64 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <2 x i8>, ptr [[TMP8]], align 64 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP6]] ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP9]], i32 0 -; CHECK-NEXT: store <2 x i8> [[WIDE_LOAD3]], ptr [[TMP10]], align 64 -; CHECK-NEXT: [[INDEX_NEXT4]] = add nuw i64 [[INDEX2]], 2 -; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT4]], 18 +; CHECK-NEXT: store <2 x i8> [[WIDE_LOAD2]], ptr [[TMP10]], align 64 +; CHECK-NEXT: [[INDEX_NEXT3]] = add nuw i64 [[INDEX1]], 2 +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT3]], 18 ; CHECK-NEXT: br i1 [[TMP11]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: vec.epilog.middle.block: ; CHECK-NEXT: br i1 false, label [[EXIT]], label [[VEC_EPILOG_SCALAR_PH]] @@ -212,19 +212,40 @@ define void @test_tc_20(ptr noalias %src, ptr noalias %dst) { ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i8, ptr [[TMP1]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP2]], align 64 -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP3]], i32 0 -; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], ptr [[TMP4]], align 64 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 20 -; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 0 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 8 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i32 12 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i8>, ptr [[TMP8]], align 64 +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i8>, ptr [[TMP9]], align 64 +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x i8>, ptr [[TMP10]], align 64 +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i8>, ptr [[TMP11]], align 64 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[DST:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 0 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 4 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 8 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[TMP12]], i32 12 +; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD]], ptr [[TMP16]], align 64 +; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD1]], ptr [[TMP17]], align 64 +; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD2]], ptr [[TMP18]], align 64 +; CHECK-NEXT: store <4 x i8> [[WIDE_LOAD3]], ptr [[TMP19]], align 64 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP20:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; CHECK-NEXT: br i1 [[TMP20]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br i1 true, label [[EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br i1 false, label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 20, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[I:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[I_NEXT:%.*]], [[LOOP]] ] diff --git a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll index 19ac52cf0d0b3..91b8e149487a8 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/load-deref-pred.ll @@ -2507,48 +2507,147 @@ define i32 @test_stride_three(i64 %len, ptr %test_base) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP29:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP116:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP117:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP118:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP119:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 3 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 3 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 6 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 9 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = load i1, ptr [[TMP4]], align 1 -; CHECK-NEXT: [[TMP9:%.*]] = load i1, ptr [[TMP5]], align 1 -; CHECK-NEXT: [[TMP10:%.*]] = load i1, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = load i1, ptr [[TMP7]], align 1 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i1> poison, i1 [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i1> [[TMP12]], i1 [[TMP9]], i32 1 -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i1> [[TMP13]], i1 [[TMP10]], i32 2 -; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i1> [[TMP14]], i1 [[TMP11]], i32 3 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP16]], align 4 -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 4 -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 4 -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP19]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> poison, i32 [[TMP20]], i32 0 -; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 1 -; CHECK-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 2 -; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 3 -; CHECK-NEXT: [[TMP28:%.*]] = xor <4 x i1> [[TMP15]], -; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> [[TMP27]], <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP29]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 -; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 12 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 15 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 18 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 21 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 24 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 27 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 30 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 33 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 36 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 39 +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], 42 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 45 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP32:%.*]] = load i1, ptr [[TMP16]], align 1 +; CHECK-NEXT: [[TMP33:%.*]] = load i1, ptr [[TMP17]], align 1 +; CHECK-NEXT: [[TMP34:%.*]] = load i1, ptr [[TMP18]], align 1 +; CHECK-NEXT: [[TMP35:%.*]] = load i1, ptr [[TMP19]], align 1 +; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i1> poison, i1 [[TMP32]], i32 0 +; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i1> [[TMP36]], i1 [[TMP33]], i32 1 +; CHECK-NEXT: [[TMP38:%.*]] = insertelement <4 x i1> [[TMP37]], i1 [[TMP34]], i32 2 +; CHECK-NEXT: [[TMP39:%.*]] = insertelement <4 x i1> [[TMP38]], i1 [[TMP35]], i32 3 +; CHECK-NEXT: [[TMP40:%.*]] = load i1, ptr [[TMP20]], align 1 +; CHECK-NEXT: [[TMP41:%.*]] = load i1, ptr [[TMP21]], align 1 +; CHECK-NEXT: [[TMP42:%.*]] = load i1, ptr [[TMP22]], align 1 +; CHECK-NEXT: [[TMP43:%.*]] = load i1, ptr [[TMP23]], align 1 +; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i1> poison, i1 [[TMP40]], i32 0 +; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x i1> [[TMP44]], i1 [[TMP41]], i32 1 +; CHECK-NEXT: [[TMP46:%.*]] = insertelement <4 x i1> [[TMP45]], i1 [[TMP42]], i32 2 +; CHECK-NEXT: [[TMP47:%.*]] = insertelement <4 x i1> [[TMP46]], i1 [[TMP43]], i32 3 +; CHECK-NEXT: [[TMP48:%.*]] = load i1, ptr [[TMP24]], align 1 +; CHECK-NEXT: [[TMP49:%.*]] = load i1, ptr [[TMP25]], align 1 +; CHECK-NEXT: [[TMP50:%.*]] = load i1, ptr [[TMP26]], align 1 +; CHECK-NEXT: [[TMP51:%.*]] = load i1, ptr [[TMP27]], align 1 +; CHECK-NEXT: [[TMP52:%.*]] = insertelement <4 x i1> poison, i1 [[TMP48]], i32 0 +; CHECK-NEXT: [[TMP53:%.*]] = insertelement <4 x i1> [[TMP52]], i1 [[TMP49]], i32 1 +; CHECK-NEXT: [[TMP54:%.*]] = insertelement <4 x i1> [[TMP53]], i1 [[TMP50]], i32 2 +; CHECK-NEXT: [[TMP55:%.*]] = insertelement <4 x i1> [[TMP54]], i1 [[TMP51]], i32 3 +; CHECK-NEXT: [[TMP56:%.*]] = load i1, ptr [[TMP28]], align 1 +; CHECK-NEXT: [[TMP57:%.*]] = load i1, ptr [[TMP29]], align 1 +; CHECK-NEXT: [[TMP58:%.*]] = load i1, ptr [[TMP30]], align 1 +; CHECK-NEXT: [[TMP59:%.*]] = load i1, ptr [[TMP31]], align 1 +; CHECK-NEXT: [[TMP60:%.*]] = insertelement <4 x i1> poison, i1 [[TMP56]], i32 0 +; CHECK-NEXT: [[TMP61:%.*]] = insertelement <4 x i1> [[TMP60]], i1 [[TMP57]], i32 1 +; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 +; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 +; CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP73:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP75:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP76:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP77:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP79:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP64]], align 4 +; CHECK-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP65]], align 4 +; CHECK-NEXT: [[TMP82:%.*]] = load i32, ptr [[TMP66]], align 4 +; CHECK-NEXT: [[TMP83:%.*]] = load i32, ptr [[TMP67]], align 4 +; CHECK-NEXT: [[TMP84:%.*]] = insertelement <4 x i32> poison, i32 [[TMP80]], i32 0 +; CHECK-NEXT: [[TMP85:%.*]] = insertelement <4 x i32> [[TMP84]], i32 [[TMP81]], i32 1 +; CHECK-NEXT: [[TMP86:%.*]] = insertelement <4 x i32> [[TMP85]], i32 [[TMP82]], i32 2 +; CHECK-NEXT: [[TMP87:%.*]] = insertelement <4 x i32> [[TMP86]], i32 [[TMP83]], i32 3 +; CHECK-NEXT: [[TMP88:%.*]] = load i32, ptr [[TMP68]], align 4 +; CHECK-NEXT: [[TMP89:%.*]] = load i32, ptr [[TMP69]], align 4 +; CHECK-NEXT: [[TMP90:%.*]] = load i32, ptr [[TMP70]], align 4 +; CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[TMP71]], align 4 +; CHECK-NEXT: [[TMP92:%.*]] = insertelement <4 x i32> poison, i32 [[TMP88]], i32 0 +; CHECK-NEXT: [[TMP93:%.*]] = insertelement <4 x i32> [[TMP92]], i32 [[TMP89]], i32 1 +; CHECK-NEXT: [[TMP94:%.*]] = insertelement <4 x i32> [[TMP93]], i32 [[TMP90]], i32 2 +; CHECK-NEXT: [[TMP95:%.*]] = insertelement <4 x i32> [[TMP94]], i32 [[TMP91]], i32 3 +; CHECK-NEXT: [[TMP96:%.*]] = load i32, ptr [[TMP72]], align 4 +; CHECK-NEXT: [[TMP97:%.*]] = load i32, ptr [[TMP73]], align 4 +; CHECK-NEXT: [[TMP98:%.*]] = load i32, ptr [[TMP74]], align 4 +; CHECK-NEXT: [[TMP99:%.*]] = load i32, ptr [[TMP75]], align 4 +; CHECK-NEXT: [[TMP100:%.*]] = insertelement <4 x i32> poison, i32 [[TMP96]], i32 0 +; CHECK-NEXT: [[TMP101:%.*]] = insertelement <4 x i32> [[TMP100]], i32 [[TMP97]], i32 1 +; CHECK-NEXT: [[TMP102:%.*]] = insertelement <4 x i32> [[TMP101]], i32 [[TMP98]], i32 2 +; CHECK-NEXT: [[TMP103:%.*]] = insertelement <4 x i32> [[TMP102]], i32 [[TMP99]], i32 3 +; CHECK-NEXT: [[TMP104:%.*]] = load i32, ptr [[TMP76]], align 4 +; CHECK-NEXT: [[TMP105:%.*]] = load i32, ptr [[TMP77]], align 4 +; CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[TMP78]], align 4 +; CHECK-NEXT: [[TMP107:%.*]] = load i32, ptr [[TMP79]], align 4 +; CHECK-NEXT: [[TMP108:%.*]] = insertelement <4 x i32> poison, i32 [[TMP104]], i32 0 +; CHECK-NEXT: [[TMP109:%.*]] = insertelement <4 x i32> [[TMP108]], i32 [[TMP105]], i32 1 +; CHECK-NEXT: [[TMP110:%.*]] = insertelement <4 x i32> [[TMP109]], i32 [[TMP106]], i32 2 +; CHECK-NEXT: [[TMP111:%.*]] = insertelement <4 x i32> [[TMP110]], i32 [[TMP107]], i32 3 +; CHECK-NEXT: [[TMP112:%.*]] = xor <4 x i1> [[TMP39]], +; CHECK-NEXT: [[TMP113:%.*]] = xor <4 x i1> [[TMP47]], +; CHECK-NEXT: [[TMP114:%.*]] = xor <4 x i1> [[TMP55]], +; CHECK-NEXT: [[TMP115:%.*]] = xor <4 x i1> [[TMP63]], +; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP39]], <4 x i32> [[TMP87]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI4:%.*]] = select <4 x i1> [[TMP47]], <4 x i32> [[TMP95]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI5:%.*]] = select <4 x i1> [[TMP55]], <4 x i32> [[TMP103]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI6:%.*]] = select <4 x i1> [[TMP63]], <4 x i32> [[TMP111]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP116]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] +; CHECK-NEXT: [[TMP117]] = add <4 x i32> [[VEC_PHI1]], [[PREDPHI4]] +; CHECK-NEXT: [[TMP118]] = add <4 x i32> [[VEC_PHI2]], [[PREDPHI5]] +; CHECK-NEXT: [[TMP119]] = add <4 x i32> [[VEC_PHI3]], [[PREDPHI6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP120:%.*]] = icmp eq i64 [[INDEX_NEXT]], 32 +; CHECK-NEXT: br i1 [[TMP120]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP30:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP29]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP117]], [[TMP116]] +; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP118]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i32> [[TMP119]], [[BIN_RDX7]] +; CHECK-NEXT: [[TMP121:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX8]]) ; CHECK-NEXT: br i1 false, label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 96, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP121]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] @@ -2567,7 +2666,7 @@ define i32 @test_stride_three(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 100 ; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], !llvm.loop [[LOOP31:![0-9]+]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP121]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] ; entry: @@ -2605,48 +2704,81 @@ define i32 @test_non_unit_stride_four(i64 %len, ptr %test_base) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP29:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP58:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP59:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 4 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 8 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 12 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = load i1, ptr [[TMP4]], align 1 -; CHECK-NEXT: [[TMP9:%.*]] = load i1, ptr [[TMP5]], align 1 -; CHECK-NEXT: [[TMP10:%.*]] = load i1, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = load i1, ptr [[TMP7]], align 1 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i1> poison, i1 [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i1> [[TMP12]], i1 [[TMP9]], i32 1 -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i1> [[TMP13]], i1 [[TMP10]], i32 2 -; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i1> [[TMP14]], i1 [[TMP11]], i32 3 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP16]], align 4 -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 4 -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 4 -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP19]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> poison, i32 [[TMP20]], i32 0 -; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 1 -; CHECK-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 2 -; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 3 -; CHECK-NEXT: [[TMP28:%.*]] = xor <4 x i1> [[TMP15]], -; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> [[TMP27]], <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP29]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24 -; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 16 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 20 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 24 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 28 +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP16:%.*]] = load i1, ptr [[TMP8]], align 1 +; CHECK-NEXT: [[TMP17:%.*]] = load i1, ptr [[TMP9]], align 1 +; CHECK-NEXT: [[TMP18:%.*]] = load i1, ptr [[TMP10]], align 1 +; CHECK-NEXT: [[TMP19:%.*]] = load i1, ptr [[TMP11]], align 1 +; CHECK-NEXT: [[TMP20:%.*]] = insertelement <4 x i1> poison, i1 [[TMP16]], i32 0 +; CHECK-NEXT: [[TMP21:%.*]] = insertelement <4 x i1> [[TMP20]], i1 [[TMP17]], i32 1 +; CHECK-NEXT: [[TMP22:%.*]] = insertelement <4 x i1> [[TMP21]], i1 [[TMP18]], i32 2 +; CHECK-NEXT: [[TMP23:%.*]] = insertelement <4 x i1> [[TMP22]], i1 [[TMP19]], i32 3 +; CHECK-NEXT: [[TMP24:%.*]] = load i1, ptr [[TMP12]], align 1 +; CHECK-NEXT: [[TMP25:%.*]] = load i1, ptr [[TMP13]], align 1 +; CHECK-NEXT: [[TMP26:%.*]] = load i1, ptr [[TMP14]], align 1 +; CHECK-NEXT: [[TMP27:%.*]] = load i1, ptr [[TMP15]], align 1 +; CHECK-NEXT: [[TMP28:%.*]] = insertelement <4 x i1> poison, i1 [[TMP24]], i32 0 +; CHECK-NEXT: [[TMP29:%.*]] = insertelement <4 x i1> [[TMP28]], i1 [[TMP25]], i32 1 +; CHECK-NEXT: [[TMP30:%.*]] = insertelement <4 x i1> [[TMP29]], i1 [[TMP26]], i32 2 +; CHECK-NEXT: [[TMP31:%.*]] = insertelement <4 x i1> [[TMP30]], i1 [[TMP27]], i32 3 +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[TMP32]], align 4 +; CHECK-NEXT: [[TMP41:%.*]] = load i32, ptr [[TMP33]], align 4 +; CHECK-NEXT: [[TMP42:%.*]] = load i32, ptr [[TMP34]], align 4 +; CHECK-NEXT: [[TMP43:%.*]] = load i32, ptr [[TMP35]], align 4 +; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i32> poison, i32 [[TMP40]], i32 0 +; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x i32> [[TMP44]], i32 [[TMP41]], i32 1 +; CHECK-NEXT: [[TMP46:%.*]] = insertelement <4 x i32> [[TMP45]], i32 [[TMP42]], i32 2 +; CHECK-NEXT: [[TMP47:%.*]] = insertelement <4 x i32> [[TMP46]], i32 [[TMP43]], i32 3 +; CHECK-NEXT: [[TMP48:%.*]] = load i32, ptr [[TMP36]], align 4 +; CHECK-NEXT: [[TMP49:%.*]] = load i32, ptr [[TMP37]], align 4 +; CHECK-NEXT: [[TMP50:%.*]] = load i32, ptr [[TMP38]], align 4 +; CHECK-NEXT: [[TMP51:%.*]] = load i32, ptr [[TMP39]], align 4 +; CHECK-NEXT: [[TMP52:%.*]] = insertelement <4 x i32> poison, i32 [[TMP48]], i32 0 +; CHECK-NEXT: [[TMP53:%.*]] = insertelement <4 x i32> [[TMP52]], i32 [[TMP49]], i32 1 +; CHECK-NEXT: [[TMP54:%.*]] = insertelement <4 x i32> [[TMP53]], i32 [[TMP50]], i32 2 +; CHECK-NEXT: [[TMP55:%.*]] = insertelement <4 x i32> [[TMP54]], i32 [[TMP51]], i32 3 +; CHECK-NEXT: [[TMP56:%.*]] = xor <4 x i1> [[TMP23]], +; CHECK-NEXT: [[TMP57:%.*]] = xor <4 x i1> [[TMP31]], +; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP23]], <4 x i32> [[TMP47]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI2:%.*]] = select <4 x i1> [[TMP31]], <4 x i32> [[TMP55]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP58]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] +; CHECK-NEXT: [[TMP59]] = add <4 x i32> [[VEC_PHI1]], [[PREDPHI2]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP60:%.*]] = icmp eq i64 [[INDEX_NEXT]], 24 +; CHECK-NEXT: br i1 [[TMP60]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP32:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP29]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP59]], [[TMP58]] +; CHECK-NEXT: [[TMP61:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br i1 false, label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 96, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP61]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] @@ -2665,7 +2797,7 @@ define i32 @test_non_unit_stride_four(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 100 ; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], !llvm.loop [[LOOP33:![0-9]+]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP61]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] ; entry: @@ -2703,48 +2835,147 @@ define i32 @test_non_unit_stride_five(i64 %len, ptr %test_base) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP29:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP116:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP117:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP118:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP119:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 5 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 5 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 10 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 15 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = load i1, ptr [[TMP4]], align 1 -; CHECK-NEXT: [[TMP9:%.*]] = load i1, ptr [[TMP5]], align 1 -; CHECK-NEXT: [[TMP10:%.*]] = load i1, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = load i1, ptr [[TMP7]], align 1 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i1> poison, i1 [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i1> [[TMP12]], i1 [[TMP9]], i32 1 -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i1> [[TMP13]], i1 [[TMP10]], i32 2 -; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i1> [[TMP14]], i1 [[TMP11]], i32 3 -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP16]], align 4 -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP17]], align 4 -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP18]], align 4 -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP19]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> poison, i32 [[TMP20]], i32 0 -; CHECK-NEXT: [[TMP25:%.*]] = insertelement <4 x i32> [[TMP24]], i32 [[TMP21]], i32 1 -; CHECK-NEXT: [[TMP26:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP22]], i32 2 -; CHECK-NEXT: [[TMP27:%.*]] = insertelement <4 x i32> [[TMP26]], i32 [[TMP23]], i32 3 -; CHECK-NEXT: [[TMP28:%.*]] = xor <4 x i1> [[TMP15]], -; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> [[TMP27]], <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP29]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP30:%.*]] = icmp eq i64 [[INDEX_NEXT]], 20 -; CHECK-NEXT: br i1 [[TMP30]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 20 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 25 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 30 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 35 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 40 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 45 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 50 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 55 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 60 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 65 +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], 70 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 75 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP32:%.*]] = load i1, ptr [[TMP16]], align 1 +; CHECK-NEXT: [[TMP33:%.*]] = load i1, ptr [[TMP17]], align 1 +; CHECK-NEXT: [[TMP34:%.*]] = load i1, ptr [[TMP18]], align 1 +; CHECK-NEXT: [[TMP35:%.*]] = load i1, ptr [[TMP19]], align 1 +; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i1> poison, i1 [[TMP32]], i32 0 +; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i1> [[TMP36]], i1 [[TMP33]], i32 1 +; CHECK-NEXT: [[TMP38:%.*]] = insertelement <4 x i1> [[TMP37]], i1 [[TMP34]], i32 2 +; CHECK-NEXT: [[TMP39:%.*]] = insertelement <4 x i1> [[TMP38]], i1 [[TMP35]], i32 3 +; CHECK-NEXT: [[TMP40:%.*]] = load i1, ptr [[TMP20]], align 1 +; CHECK-NEXT: [[TMP41:%.*]] = load i1, ptr [[TMP21]], align 1 +; CHECK-NEXT: [[TMP42:%.*]] = load i1, ptr [[TMP22]], align 1 +; CHECK-NEXT: [[TMP43:%.*]] = load i1, ptr [[TMP23]], align 1 +; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i1> poison, i1 [[TMP40]], i32 0 +; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x i1> [[TMP44]], i1 [[TMP41]], i32 1 +; CHECK-NEXT: [[TMP46:%.*]] = insertelement <4 x i1> [[TMP45]], i1 [[TMP42]], i32 2 +; CHECK-NEXT: [[TMP47:%.*]] = insertelement <4 x i1> [[TMP46]], i1 [[TMP43]], i32 3 +; CHECK-NEXT: [[TMP48:%.*]] = load i1, ptr [[TMP24]], align 1 +; CHECK-NEXT: [[TMP49:%.*]] = load i1, ptr [[TMP25]], align 1 +; CHECK-NEXT: [[TMP50:%.*]] = load i1, ptr [[TMP26]], align 1 +; CHECK-NEXT: [[TMP51:%.*]] = load i1, ptr [[TMP27]], align 1 +; CHECK-NEXT: [[TMP52:%.*]] = insertelement <4 x i1> poison, i1 [[TMP48]], i32 0 +; CHECK-NEXT: [[TMP53:%.*]] = insertelement <4 x i1> [[TMP52]], i1 [[TMP49]], i32 1 +; CHECK-NEXT: [[TMP54:%.*]] = insertelement <4 x i1> [[TMP53]], i1 [[TMP50]], i32 2 +; CHECK-NEXT: [[TMP55:%.*]] = insertelement <4 x i1> [[TMP54]], i1 [[TMP51]], i32 3 +; CHECK-NEXT: [[TMP56:%.*]] = load i1, ptr [[TMP28]], align 1 +; CHECK-NEXT: [[TMP57:%.*]] = load i1, ptr [[TMP29]], align 1 +; CHECK-NEXT: [[TMP58:%.*]] = load i1, ptr [[TMP30]], align 1 +; CHECK-NEXT: [[TMP59:%.*]] = load i1, ptr [[TMP31]], align 1 +; CHECK-NEXT: [[TMP60:%.*]] = insertelement <4 x i1> poison, i1 [[TMP56]], i32 0 +; CHECK-NEXT: [[TMP61:%.*]] = insertelement <4 x i1> [[TMP60]], i1 [[TMP57]], i32 1 +; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 +; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 +; CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP72:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP73:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP74:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP75:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP76:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP77:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP79:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP80:%.*]] = load i32, ptr [[TMP64]], align 4 +; CHECK-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP65]], align 4 +; CHECK-NEXT: [[TMP82:%.*]] = load i32, ptr [[TMP66]], align 4 +; CHECK-NEXT: [[TMP83:%.*]] = load i32, ptr [[TMP67]], align 4 +; CHECK-NEXT: [[TMP84:%.*]] = insertelement <4 x i32> poison, i32 [[TMP80]], i32 0 +; CHECK-NEXT: [[TMP85:%.*]] = insertelement <4 x i32> [[TMP84]], i32 [[TMP81]], i32 1 +; CHECK-NEXT: [[TMP86:%.*]] = insertelement <4 x i32> [[TMP85]], i32 [[TMP82]], i32 2 +; CHECK-NEXT: [[TMP87:%.*]] = insertelement <4 x i32> [[TMP86]], i32 [[TMP83]], i32 3 +; CHECK-NEXT: [[TMP88:%.*]] = load i32, ptr [[TMP68]], align 4 +; CHECK-NEXT: [[TMP89:%.*]] = load i32, ptr [[TMP69]], align 4 +; CHECK-NEXT: [[TMP90:%.*]] = load i32, ptr [[TMP70]], align 4 +; CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[TMP71]], align 4 +; CHECK-NEXT: [[TMP92:%.*]] = insertelement <4 x i32> poison, i32 [[TMP88]], i32 0 +; CHECK-NEXT: [[TMP93:%.*]] = insertelement <4 x i32> [[TMP92]], i32 [[TMP89]], i32 1 +; CHECK-NEXT: [[TMP94:%.*]] = insertelement <4 x i32> [[TMP93]], i32 [[TMP90]], i32 2 +; CHECK-NEXT: [[TMP95:%.*]] = insertelement <4 x i32> [[TMP94]], i32 [[TMP91]], i32 3 +; CHECK-NEXT: [[TMP96:%.*]] = load i32, ptr [[TMP72]], align 4 +; CHECK-NEXT: [[TMP97:%.*]] = load i32, ptr [[TMP73]], align 4 +; CHECK-NEXT: [[TMP98:%.*]] = load i32, ptr [[TMP74]], align 4 +; CHECK-NEXT: [[TMP99:%.*]] = load i32, ptr [[TMP75]], align 4 +; CHECK-NEXT: [[TMP100:%.*]] = insertelement <4 x i32> poison, i32 [[TMP96]], i32 0 +; CHECK-NEXT: [[TMP101:%.*]] = insertelement <4 x i32> [[TMP100]], i32 [[TMP97]], i32 1 +; CHECK-NEXT: [[TMP102:%.*]] = insertelement <4 x i32> [[TMP101]], i32 [[TMP98]], i32 2 +; CHECK-NEXT: [[TMP103:%.*]] = insertelement <4 x i32> [[TMP102]], i32 [[TMP99]], i32 3 +; CHECK-NEXT: [[TMP104:%.*]] = load i32, ptr [[TMP76]], align 4 +; CHECK-NEXT: [[TMP105:%.*]] = load i32, ptr [[TMP77]], align 4 +; CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[TMP78]], align 4 +; CHECK-NEXT: [[TMP107:%.*]] = load i32, ptr [[TMP79]], align 4 +; CHECK-NEXT: [[TMP108:%.*]] = insertelement <4 x i32> poison, i32 [[TMP104]], i32 0 +; CHECK-NEXT: [[TMP109:%.*]] = insertelement <4 x i32> [[TMP108]], i32 [[TMP105]], i32 1 +; CHECK-NEXT: [[TMP110:%.*]] = insertelement <4 x i32> [[TMP109]], i32 [[TMP106]], i32 2 +; CHECK-NEXT: [[TMP111:%.*]] = insertelement <4 x i32> [[TMP110]], i32 [[TMP107]], i32 3 +; CHECK-NEXT: [[TMP112:%.*]] = xor <4 x i1> [[TMP39]], +; CHECK-NEXT: [[TMP113:%.*]] = xor <4 x i1> [[TMP47]], +; CHECK-NEXT: [[TMP114:%.*]] = xor <4 x i1> [[TMP55]], +; CHECK-NEXT: [[TMP115:%.*]] = xor <4 x i1> [[TMP63]], +; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP39]], <4 x i32> [[TMP87]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI4:%.*]] = select <4 x i1> [[TMP47]], <4 x i32> [[TMP95]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI5:%.*]] = select <4 x i1> [[TMP55]], <4 x i32> [[TMP103]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI6:%.*]] = select <4 x i1> [[TMP63]], <4 x i32> [[TMP111]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP116]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] +; CHECK-NEXT: [[TMP117]] = add <4 x i32> [[VEC_PHI1]], [[PREDPHI4]] +; CHECK-NEXT: [[TMP118]] = add <4 x i32> [[VEC_PHI2]], [[PREDPHI5]] +; CHECK-NEXT: [[TMP119]] = add <4 x i32> [[VEC_PHI3]], [[PREDPHI6]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP120:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; CHECK-NEXT: br i1 [[TMP120]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP34:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP31:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP29]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP117]], [[TMP116]] +; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <4 x i32> [[TMP118]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <4 x i32> [[TMP119]], [[BIN_RDX7]] +; CHECK-NEXT: [[TMP121:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX8]]) ; CHECK-NEXT: br i1 false, label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 100, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 80, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP121]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] @@ -2763,7 +2994,7 @@ define i32 @test_non_unit_stride_five(i64 %len, ptr %test_base) { ; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 100 ; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], !llvm.loop [[LOOP35:![0-9]+]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP31]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP121]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] ; entry: @@ -2800,73 +3031,244 @@ define i32 @neg_test_non_unit_stride_off_by_four_bytes(i64 %len, ptr %test_base) ; CHECK: vector.ph: ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP37:%.*]], [[PRED_LOAD_CONTINUE6]] ] +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE33:%.*]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP148:%.*]], [[PRED_LOAD_CONTINUE33]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP149:%.*]], [[PRED_LOAD_CONTINUE33]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP150:%.*]], [[PRED_LOAD_CONTINUE33]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <4 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP151:%.*]], [[PRED_LOAD_CONTINUE33]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 4 ; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 6 -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP8:%.*]] = load i1, ptr [[TMP4]], align 1 -; CHECK-NEXT: [[TMP9:%.*]] = load i1, ptr [[TMP5]], align 1 -; CHECK-NEXT: [[TMP10:%.*]] = load i1, ptr [[TMP6]], align 1 -; CHECK-NEXT: [[TMP11:%.*]] = load i1, ptr [[TMP7]], align 1 -; CHECK-NEXT: [[TMP12:%.*]] = insertelement <4 x i1> poison, i1 [[TMP8]], i32 0 -; CHECK-NEXT: [[TMP13:%.*]] = insertelement <4 x i1> [[TMP12]], i1 [[TMP9]], i32 1 -; CHECK-NEXT: [[TMP14:%.*]] = insertelement <4 x i1> [[TMP13]], i1 [[TMP10]], i32 2 -; CHECK-NEXT: [[TMP15:%.*]] = insertelement <4 x i1> [[TMP14]], i1 [[TMP11]], i32 3 -; CHECK-NEXT: [[TMP16:%.*]] = extractelement <4 x i1> [[TMP15]], i32 0 -; CHECK-NEXT: br i1 [[TMP16]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]] +; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 8 +; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 10 +; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 12 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 14 +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 16 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 18 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 20 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 22 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 24 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 26 +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], 28 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 30 +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i1, ptr [[TEST_BASE]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP32:%.*]] = load i1, ptr [[TMP16]], align 1 +; CHECK-NEXT: [[TMP33:%.*]] = load i1, ptr [[TMP17]], align 1 +; CHECK-NEXT: [[TMP34:%.*]] = load i1, ptr [[TMP18]], align 1 +; CHECK-NEXT: [[TMP35:%.*]] = load i1, ptr [[TMP19]], align 1 +; CHECK-NEXT: [[TMP36:%.*]] = insertelement <4 x i1> poison, i1 [[TMP32]], i32 0 +; CHECK-NEXT: [[TMP37:%.*]] = insertelement <4 x i1> [[TMP36]], i1 [[TMP33]], i32 1 +; CHECK-NEXT: [[TMP38:%.*]] = insertelement <4 x i1> [[TMP37]], i1 [[TMP34]], i32 2 +; CHECK-NEXT: [[TMP39:%.*]] = insertelement <4 x i1> [[TMP38]], i1 [[TMP35]], i32 3 +; CHECK-NEXT: [[TMP40:%.*]] = load i1, ptr [[TMP20]], align 1 +; CHECK-NEXT: [[TMP41:%.*]] = load i1, ptr [[TMP21]], align 1 +; CHECK-NEXT: [[TMP42:%.*]] = load i1, ptr [[TMP22]], align 1 +; CHECK-NEXT: [[TMP43:%.*]] = load i1, ptr [[TMP23]], align 1 +; CHECK-NEXT: [[TMP44:%.*]] = insertelement <4 x i1> poison, i1 [[TMP40]], i32 0 +; CHECK-NEXT: [[TMP45:%.*]] = insertelement <4 x i1> [[TMP44]], i1 [[TMP41]], i32 1 +; CHECK-NEXT: [[TMP46:%.*]] = insertelement <4 x i1> [[TMP45]], i1 [[TMP42]], i32 2 +; CHECK-NEXT: [[TMP47:%.*]] = insertelement <4 x i1> [[TMP46]], i1 [[TMP43]], i32 3 +; CHECK-NEXT: [[TMP48:%.*]] = load i1, ptr [[TMP24]], align 1 +; CHECK-NEXT: [[TMP49:%.*]] = load i1, ptr [[TMP25]], align 1 +; CHECK-NEXT: [[TMP50:%.*]] = load i1, ptr [[TMP26]], align 1 +; CHECK-NEXT: [[TMP51:%.*]] = load i1, ptr [[TMP27]], align 1 +; CHECK-NEXT: [[TMP52:%.*]] = insertelement <4 x i1> poison, i1 [[TMP48]], i32 0 +; CHECK-NEXT: [[TMP53:%.*]] = insertelement <4 x i1> [[TMP52]], i1 [[TMP49]], i32 1 +; CHECK-NEXT: [[TMP54:%.*]] = insertelement <4 x i1> [[TMP53]], i1 [[TMP50]], i32 2 +; CHECK-NEXT: [[TMP55:%.*]] = insertelement <4 x i1> [[TMP54]], i1 [[TMP51]], i32 3 +; CHECK-NEXT: [[TMP56:%.*]] = load i1, ptr [[TMP28]], align 1 +; CHECK-NEXT: [[TMP57:%.*]] = load i1, ptr [[TMP29]], align 1 +; CHECK-NEXT: [[TMP58:%.*]] = load i1, ptr [[TMP30]], align 1 +; CHECK-NEXT: [[TMP59:%.*]] = load i1, ptr [[TMP31]], align 1 +; CHECK-NEXT: [[TMP60:%.*]] = insertelement <4 x i1> poison, i1 [[TMP56]], i32 0 +; CHECK-NEXT: [[TMP61:%.*]] = insertelement <4 x i1> [[TMP60]], i1 [[TMP57]], i32 1 +; CHECK-NEXT: [[TMP62:%.*]] = insertelement <4 x i1> [[TMP61]], i1 [[TMP58]], i32 2 +; CHECK-NEXT: [[TMP63:%.*]] = insertelement <4 x i1> [[TMP62]], i1 [[TMP59]], i32 3 +; CHECK-NEXT: [[TMP64:%.*]] = extractelement <4 x i1> [[TMP39]], i32 0 +; CHECK-NEXT: br i1 [[TMP64]], label [[PRED_LOAD_IF:%.*]], label [[PRED_LOAD_CONTINUE:%.*]] ; CHECK: pred.load.if: -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP17]], align 4 -; CHECK-NEXT: [[TMP19:%.*]] = insertelement <4 x i32> poison, i32 [[TMP18]], i32 0 +; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP66:%.*]] = load i32, ptr [[TMP65]], align 4 +; CHECK-NEXT: [[TMP67:%.*]] = insertelement <4 x i32> poison, i32 [[TMP66]], i32 0 ; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE]] ; CHECK: pred.load.continue: -; CHECK-NEXT: [[TMP20:%.*]] = phi <4 x i32> [ poison, [[VECTOR_BODY]] ], [ [[TMP19]], [[PRED_LOAD_IF]] ] -; CHECK-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP15]], i32 1 -; CHECK-NEXT: br i1 [[TMP21]], label [[PRED_LOAD_IF1:%.*]], label [[PRED_LOAD_CONTINUE2:%.*]] -; CHECK: pred.load.if1: -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP22]], align 4 -; CHECK-NEXT: [[TMP24:%.*]] = insertelement <4 x i32> [[TMP20]], i32 [[TMP23]], i32 1 -; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE2]] -; CHECK: pred.load.continue2: -; CHECK-NEXT: [[TMP25:%.*]] = phi <4 x i32> [ [[TMP20]], [[PRED_LOAD_CONTINUE]] ], [ [[TMP24]], [[PRED_LOAD_IF1]] ] -; CHECK-NEXT: [[TMP26:%.*]] = extractelement <4 x i1> [[TMP15]], i32 2 -; CHECK-NEXT: br i1 [[TMP26]], label [[PRED_LOAD_IF3:%.*]], label [[PRED_LOAD_CONTINUE4:%.*]] -; CHECK: pred.load.if3: -; CHECK-NEXT: [[TMP27:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] -; CHECK-NEXT: [[TMP28:%.*]] = load i32, ptr [[TMP27]], align 4 -; CHECK-NEXT: [[TMP29:%.*]] = insertelement <4 x i32> [[TMP25]], i32 [[TMP28]], i32 2 -; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE4]] -; CHECK: pred.load.continue4: -; CHECK-NEXT: [[TMP30:%.*]] = phi <4 x i32> [ [[TMP25]], [[PRED_LOAD_CONTINUE2]] ], [ [[TMP29]], [[PRED_LOAD_IF3]] ] -; CHECK-NEXT: [[TMP31:%.*]] = extractelement <4 x i1> [[TMP15]], i32 3 -; CHECK-NEXT: br i1 [[TMP31]], label [[PRED_LOAD_IF5:%.*]], label [[PRED_LOAD_CONTINUE6]] -; CHECK: pred.load.if5: -; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] -; CHECK-NEXT: [[TMP33:%.*]] = load i32, ptr [[TMP32]], align 4 -; CHECK-NEXT: [[TMP34:%.*]] = insertelement <4 x i32> [[TMP30]], i32 [[TMP33]], i32 3 -; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE6]] -; CHECK: pred.load.continue6: -; CHECK-NEXT: [[TMP35:%.*]] = phi <4 x i32> [ [[TMP30]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP34]], [[PRED_LOAD_IF5]] ] -; CHECK-NEXT: [[TMP36:%.*]] = xor <4 x i1> [[TMP15]], -; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP15]], <4 x i32> [[TMP35]], <4 x i32> zeroinitializer -; CHECK-NEXT: [[TMP37]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP38:%.*]] = icmp eq i64 [[INDEX_NEXT]], 52 -; CHECK-NEXT: br i1 [[TMP38]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] +; CHECK-NEXT: [[TMP68:%.*]] = phi <4 x i32> [ poison, [[VECTOR_BODY]] ], [ [[TMP67]], [[PRED_LOAD_IF]] ] +; CHECK-NEXT: [[TMP69:%.*]] = extractelement <4 x i1> [[TMP39]], i32 1 +; CHECK-NEXT: br i1 [[TMP69]], label [[PRED_LOAD_IF4:%.*]], label [[PRED_LOAD_CONTINUE5:%.*]] +; CHECK: pred.load.if4: +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP71:%.*]] = load i32, ptr [[TMP70]], align 4 +; CHECK-NEXT: [[TMP72:%.*]] = insertelement <4 x i32> [[TMP68]], i32 [[TMP71]], i32 1 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE5]] +; CHECK: pred.load.continue5: +; CHECK-NEXT: [[TMP73:%.*]] = phi <4 x i32> [ [[TMP68]], [[PRED_LOAD_CONTINUE]] ], [ [[TMP72]], [[PRED_LOAD_IF4]] ] +; CHECK-NEXT: [[TMP74:%.*]] = extractelement <4 x i1> [[TMP39]], i32 2 +; CHECK-NEXT: br i1 [[TMP74]], label [[PRED_LOAD_IF6:%.*]], label [[PRED_LOAD_CONTINUE7:%.*]] +; CHECK: pred.load.if6: +; CHECK-NEXT: [[TMP75:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP76:%.*]] = load i32, ptr [[TMP75]], align 4 +; CHECK-NEXT: [[TMP77:%.*]] = insertelement <4 x i32> [[TMP73]], i32 [[TMP76]], i32 2 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE7]] +; CHECK: pred.load.continue7: +; CHECK-NEXT: [[TMP78:%.*]] = phi <4 x i32> [ [[TMP73]], [[PRED_LOAD_CONTINUE5]] ], [ [[TMP77]], [[PRED_LOAD_IF6]] ] +; CHECK-NEXT: [[TMP79:%.*]] = extractelement <4 x i1> [[TMP39]], i32 3 +; CHECK-NEXT: br i1 [[TMP79]], label [[PRED_LOAD_IF8:%.*]], label [[PRED_LOAD_CONTINUE9:%.*]] +; CHECK: pred.load.if8: +; CHECK-NEXT: [[TMP80:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP81:%.*]] = load i32, ptr [[TMP80]], align 4 +; CHECK-NEXT: [[TMP82:%.*]] = insertelement <4 x i32> [[TMP78]], i32 [[TMP81]], i32 3 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE9]] +; CHECK: pred.load.continue9: +; CHECK-NEXT: [[TMP83:%.*]] = phi <4 x i32> [ [[TMP78]], [[PRED_LOAD_CONTINUE7]] ], [ [[TMP82]], [[PRED_LOAD_IF8]] ] +; CHECK-NEXT: [[TMP84:%.*]] = extractelement <4 x i1> [[TMP47]], i32 0 +; CHECK-NEXT: br i1 [[TMP84]], label [[PRED_LOAD_IF10:%.*]], label [[PRED_LOAD_CONTINUE11:%.*]] +; CHECK: pred.load.if10: +; CHECK-NEXT: [[TMP85:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP4]] +; CHECK-NEXT: [[TMP86:%.*]] = load i32, ptr [[TMP85]], align 4 +; CHECK-NEXT: [[TMP87:%.*]] = insertelement <4 x i32> poison, i32 [[TMP86]], i32 0 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE11]] +; CHECK: pred.load.continue11: +; CHECK-NEXT: [[TMP88:%.*]] = phi <4 x i32> [ poison, [[PRED_LOAD_CONTINUE9]] ], [ [[TMP87]], [[PRED_LOAD_IF10]] ] +; CHECK-NEXT: [[TMP89:%.*]] = extractelement <4 x i1> [[TMP47]], i32 1 +; CHECK-NEXT: br i1 [[TMP89]], label [[PRED_LOAD_IF12:%.*]], label [[PRED_LOAD_CONTINUE13:%.*]] +; CHECK: pred.load.if12: +; CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP5]] +; CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[TMP90]], align 4 +; CHECK-NEXT: [[TMP92:%.*]] = insertelement <4 x i32> [[TMP88]], i32 [[TMP91]], i32 1 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE13]] +; CHECK: pred.load.continue13: +; CHECK-NEXT: [[TMP93:%.*]] = phi <4 x i32> [ [[TMP88]], [[PRED_LOAD_CONTINUE11]] ], [ [[TMP92]], [[PRED_LOAD_IF12]] ] +; CHECK-NEXT: [[TMP94:%.*]] = extractelement <4 x i1> [[TMP47]], i32 2 +; CHECK-NEXT: br i1 [[TMP94]], label [[PRED_LOAD_IF14:%.*]], label [[PRED_LOAD_CONTINUE15:%.*]] +; CHECK: pred.load.if14: +; CHECK-NEXT: [[TMP95:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP6]] +; CHECK-NEXT: [[TMP96:%.*]] = load i32, ptr [[TMP95]], align 4 +; CHECK-NEXT: [[TMP97:%.*]] = insertelement <4 x i32> [[TMP93]], i32 [[TMP96]], i32 2 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE15]] +; CHECK: pred.load.continue15: +; CHECK-NEXT: [[TMP98:%.*]] = phi <4 x i32> [ [[TMP93]], [[PRED_LOAD_CONTINUE13]] ], [ [[TMP97]], [[PRED_LOAD_IF14]] ] +; CHECK-NEXT: [[TMP99:%.*]] = extractelement <4 x i1> [[TMP47]], i32 3 +; CHECK-NEXT: br i1 [[TMP99]], label [[PRED_LOAD_IF16:%.*]], label [[PRED_LOAD_CONTINUE17:%.*]] +; CHECK: pred.load.if16: +; CHECK-NEXT: [[TMP100:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP101:%.*]] = load i32, ptr [[TMP100]], align 4 +; CHECK-NEXT: [[TMP102:%.*]] = insertelement <4 x i32> [[TMP98]], i32 [[TMP101]], i32 3 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE17]] +; CHECK: pred.load.continue17: +; CHECK-NEXT: [[TMP103:%.*]] = phi <4 x i32> [ [[TMP98]], [[PRED_LOAD_CONTINUE15]] ], [ [[TMP102]], [[PRED_LOAD_IF16]] ] +; CHECK-NEXT: [[TMP104:%.*]] = extractelement <4 x i1> [[TMP55]], i32 0 +; CHECK-NEXT: br i1 [[TMP104]], label [[PRED_LOAD_IF18:%.*]], label [[PRED_LOAD_CONTINUE19:%.*]] +; CHECK: pred.load.if18: +; CHECK-NEXT: [[TMP105:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[TMP105]], align 4 +; CHECK-NEXT: [[TMP107:%.*]] = insertelement <4 x i32> poison, i32 [[TMP106]], i32 0 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE19]] +; CHECK: pred.load.continue19: +; CHECK-NEXT: [[TMP108:%.*]] = phi <4 x i32> [ poison, [[PRED_LOAD_CONTINUE17]] ], [ [[TMP107]], [[PRED_LOAD_IF18]] ] +; CHECK-NEXT: [[TMP109:%.*]] = extractelement <4 x i1> [[TMP55]], i32 1 +; CHECK-NEXT: br i1 [[TMP109]], label [[PRED_LOAD_IF20:%.*]], label [[PRED_LOAD_CONTINUE21:%.*]] +; CHECK: pred.load.if20: +; CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP9]] +; CHECK-NEXT: [[TMP111:%.*]] = load i32, ptr [[TMP110]], align 4 +; CHECK-NEXT: [[TMP112:%.*]] = insertelement <4 x i32> [[TMP108]], i32 [[TMP111]], i32 1 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE21]] +; CHECK: pred.load.continue21: +; CHECK-NEXT: [[TMP113:%.*]] = phi <4 x i32> [ [[TMP108]], [[PRED_LOAD_CONTINUE19]] ], [ [[TMP112]], [[PRED_LOAD_IF20]] ] +; CHECK-NEXT: [[TMP114:%.*]] = extractelement <4 x i1> [[TMP55]], i32 2 +; CHECK-NEXT: br i1 [[TMP114]], label [[PRED_LOAD_IF22:%.*]], label [[PRED_LOAD_CONTINUE23:%.*]] +; CHECK: pred.load.if22: +; CHECK-NEXT: [[TMP115:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP116:%.*]] = load i32, ptr [[TMP115]], align 4 +; CHECK-NEXT: [[TMP117:%.*]] = insertelement <4 x i32> [[TMP113]], i32 [[TMP116]], i32 2 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE23]] +; CHECK: pred.load.continue23: +; CHECK-NEXT: [[TMP118:%.*]] = phi <4 x i32> [ [[TMP113]], [[PRED_LOAD_CONTINUE21]] ], [ [[TMP117]], [[PRED_LOAD_IF22]] ] +; CHECK-NEXT: [[TMP119:%.*]] = extractelement <4 x i1> [[TMP55]], i32 3 +; CHECK-NEXT: br i1 [[TMP119]], label [[PRED_LOAD_IF24:%.*]], label [[PRED_LOAD_CONTINUE25:%.*]] +; CHECK: pred.load.if24: +; CHECK-NEXT: [[TMP120:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP121:%.*]] = load i32, ptr [[TMP120]], align 4 +; CHECK-NEXT: [[TMP122:%.*]] = insertelement <4 x i32> [[TMP118]], i32 [[TMP121]], i32 3 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE25]] +; CHECK: pred.load.continue25: +; CHECK-NEXT: [[TMP123:%.*]] = phi <4 x i32> [ [[TMP118]], [[PRED_LOAD_CONTINUE23]] ], [ [[TMP122]], [[PRED_LOAD_IF24]] ] +; CHECK-NEXT: [[TMP124:%.*]] = extractelement <4 x i1> [[TMP63]], i32 0 +; CHECK-NEXT: br i1 [[TMP124]], label [[PRED_LOAD_IF26:%.*]], label [[PRED_LOAD_CONTINUE27:%.*]] +; CHECK: pred.load.if26: +; CHECK-NEXT: [[TMP125:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP12]] +; CHECK-NEXT: [[TMP126:%.*]] = load i32, ptr [[TMP125]], align 4 +; CHECK-NEXT: [[TMP127:%.*]] = insertelement <4 x i32> poison, i32 [[TMP126]], i32 0 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE27]] +; CHECK: pred.load.continue27: +; CHECK-NEXT: [[TMP128:%.*]] = phi <4 x i32> [ poison, [[PRED_LOAD_CONTINUE25]] ], [ [[TMP127]], [[PRED_LOAD_IF26]] ] +; CHECK-NEXT: [[TMP129:%.*]] = extractelement <4 x i1> [[TMP63]], i32 1 +; CHECK-NEXT: br i1 [[TMP129]], label [[PRED_LOAD_IF28:%.*]], label [[PRED_LOAD_CONTINUE29:%.*]] +; CHECK: pred.load.if28: +; CHECK-NEXT: [[TMP130:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP13]] +; CHECK-NEXT: [[TMP131:%.*]] = load i32, ptr [[TMP130]], align 4 +; CHECK-NEXT: [[TMP132:%.*]] = insertelement <4 x i32> [[TMP128]], i32 [[TMP131]], i32 1 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE29]] +; CHECK: pred.load.continue29: +; CHECK-NEXT: [[TMP133:%.*]] = phi <4 x i32> [ [[TMP128]], [[PRED_LOAD_CONTINUE27]] ], [ [[TMP132]], [[PRED_LOAD_IF28]] ] +; CHECK-NEXT: [[TMP134:%.*]] = extractelement <4 x i1> [[TMP63]], i32 2 +; CHECK-NEXT: br i1 [[TMP134]], label [[PRED_LOAD_IF30:%.*]], label [[PRED_LOAD_CONTINUE31:%.*]] +; CHECK: pred.load.if30: +; CHECK-NEXT: [[TMP135:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP136:%.*]] = load i32, ptr [[TMP135]], align 4 +; CHECK-NEXT: [[TMP137:%.*]] = insertelement <4 x i32> [[TMP133]], i32 [[TMP136]], i32 2 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE31]] +; CHECK: pred.load.continue31: +; CHECK-NEXT: [[TMP138:%.*]] = phi <4 x i32> [ [[TMP133]], [[PRED_LOAD_CONTINUE29]] ], [ [[TMP137]], [[PRED_LOAD_IF30]] ] +; CHECK-NEXT: [[TMP139:%.*]] = extractelement <4 x i1> [[TMP63]], i32 3 +; CHECK-NEXT: br i1 [[TMP139]], label [[PRED_LOAD_IF32:%.*]], label [[PRED_LOAD_CONTINUE33]] +; CHECK: pred.load.if32: +; CHECK-NEXT: [[TMP140:%.*]] = getelementptr inbounds i32, ptr [[ALLOCA]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP141:%.*]] = load i32, ptr [[TMP140]], align 4 +; CHECK-NEXT: [[TMP142:%.*]] = insertelement <4 x i32> [[TMP138]], i32 [[TMP141]], i32 3 +; CHECK-NEXT: br label [[PRED_LOAD_CONTINUE33]] +; CHECK: pred.load.continue33: +; CHECK-NEXT: [[TMP143:%.*]] = phi <4 x i32> [ [[TMP138]], [[PRED_LOAD_CONTINUE31]] ], [ [[TMP142]], [[PRED_LOAD_IF32]] ] +; CHECK-NEXT: [[TMP144:%.*]] = xor <4 x i1> [[TMP39]], +; CHECK-NEXT: [[TMP145:%.*]] = xor <4 x i1> [[TMP47]], +; CHECK-NEXT: [[TMP146:%.*]] = xor <4 x i1> [[TMP55]], +; CHECK-NEXT: [[TMP147:%.*]] = xor <4 x i1> [[TMP63]], +; CHECK-NEXT: [[PREDPHI:%.*]] = select <4 x i1> [[TMP39]], <4 x i32> [[TMP83]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI34:%.*]] = select <4 x i1> [[TMP47]], <4 x i32> [[TMP103]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI35:%.*]] = select <4 x i1> [[TMP55]], <4 x i32> [[TMP123]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[PREDPHI36:%.*]] = select <4 x i1> [[TMP63]], <4 x i32> [[TMP143]], <4 x i32> zeroinitializer +; CHECK-NEXT: [[TMP148]] = add <4 x i32> [[VEC_PHI]], [[PREDPHI]] +; CHECK-NEXT: [[TMP149]] = add <4 x i32> [[VEC_PHI1]], [[PREDPHI34]] +; CHECK-NEXT: [[TMP150]] = add <4 x i32> [[VEC_PHI2]], [[PREDPHI35]] +; CHECK-NEXT: [[TMP151]] = add <4 x i32> [[VEC_PHI3]], [[PREDPHI36]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP152:%.*]] = icmp eq i64 [[INDEX_NEXT]], 48 +; CHECK-NEXT: br i1 [[TMP152]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP39:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP37]]) -; CHECK-NEXT: br i1 true, label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP149]], [[TMP148]] +; CHECK-NEXT: [[BIN_RDX37:%.*]] = add <4 x i32> [[TMP150]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX38:%.*]] = add <4 x i32> [[TMP151]], [[BIN_RDX37]] +; CHECK-NEXT: [[TMP153:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX38]]) +; CHECK-NEXT: br i1 false, label [[LOOP_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 104, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 96, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP153]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[LATCH:%.*]] ] @@ -2885,7 +3287,7 @@ define i32 @neg_test_non_unit_stride_off_by_four_bytes(i64 %len, ptr %test_base) ; CHECK-NEXT: [[EXIT:%.*]] = icmp ugt i64 [[IV]], 100 ; CHECK-NEXT: br i1 [[EXIT]], label [[LOOP_EXIT]], label [[LOOP]], !llvm.loop [[LOOP37:![0-9]+]] ; CHECK: loop_exit: -; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP39]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_NEXT_LCSSA:%.*]] = phi i32 [ [[ACCUM_NEXT]], [[LATCH]] ], [ [[TMP153]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ACCUM_NEXT_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll index cc36c2ba6f758..e76b8261515ed 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/metadata-enable.ll @@ -21,84 +21,84 @@ define i32 @enabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ; O1-NEXT: entry: ; O1-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; O1-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; O1-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O1-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O1-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O1-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; O1-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O1-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; O1-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; O1-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; O1-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O1-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; O1-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; O1-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O1-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; O1-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O1-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; O1-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O1-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; O1-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O1-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; O1-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O1-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O1-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; O1-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; O1-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O1-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; O1-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O1-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; O1-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O1-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; O1-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O1-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; O1-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O1-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; O1-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O1-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O1-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; O1-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; O1-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O1-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; O1-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O1-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; O1-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O1-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; O1-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O1-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; O1-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O1-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; O1-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O1-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O1-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; O1-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; O1-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O1-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; O1-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O1-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; O1-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O1-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; O1-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O1-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; O1-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O1-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; O1-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O1-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O1-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; O1-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; O1-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O1-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; O1-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O1-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; O1-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O1-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; O1-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O1-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; O1-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O1-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; O1-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O1-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O1-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; O1-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; O1-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O1-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; O1-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O1-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; O1-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O1-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; O1-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O1-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; O1-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O1-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; O1-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O1-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O1-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; O1-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; O1-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O1-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; O1-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O1-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; O1-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O1-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O1-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; O1-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O1-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; O1-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O1-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; O1-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O1-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O1-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; O1-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; O1-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; O1-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O1-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; O1-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O1-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O1-NEXT: ret i32 [[TMP46]] ; @@ -106,84 +106,84 @@ define i32 @enabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ; O2-NEXT: entry: ; O2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; O2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; O2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O2-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O2-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; O2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; O2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; O2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; O2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O2-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; O2-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; O2-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; O2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; O2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; O2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O2-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; O2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O2-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; O2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; O2-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; O2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; O2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O2-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; O2-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; O2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O2-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; O2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O2-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O2-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; O2-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; O2-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O2-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; O2-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; O2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O2-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; O2-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; O2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O2-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; O2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O2-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O2-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; O2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; O2-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O2-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; O2-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; O2-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O2-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; O2-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; O2-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O2-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; O2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O2-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O2-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; O2-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; O2-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O2-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; O2-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; O2-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O2-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; O2-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; O2-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O2-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; O2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O2-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O2-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; O2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; O2-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O2-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; O2-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; O2-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O2-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; O2-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O2-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; O2-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O2-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; O2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O2-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O2-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; O2-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; O2-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O2-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; O2-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O2-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; O2-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O2-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; O2-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O2-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; O2-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O2-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; O2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O2-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O2-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; O2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; O2-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O2-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; O2-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O2-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O2-NEXT: ret i32 [[TMP46]] ; @@ -191,84 +191,84 @@ define i32 @enabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ; O3-NEXT: entry: ; O3-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; O3-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; O3-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O3-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O3-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O3-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; O3-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O3-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; O3-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; O3-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; O3-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O3-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; O3-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; O3-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O3-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; O3-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O3-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; O3-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O3-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; O3-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O3-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; O3-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O3-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O3-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; O3-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; O3-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O3-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; O3-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O3-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; O3-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O3-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; O3-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O3-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; O3-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O3-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; O3-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O3-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O3-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; O3-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; O3-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O3-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; O3-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O3-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; O3-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O3-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; O3-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O3-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; O3-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O3-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; O3-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O3-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O3-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; O3-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; O3-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O3-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; O3-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O3-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; O3-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O3-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; O3-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O3-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; O3-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O3-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; O3-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O3-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O3-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; O3-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; O3-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O3-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; O3-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O3-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; O3-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O3-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; O3-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O3-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; O3-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O3-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; O3-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O3-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O3-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; O3-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; O3-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O3-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; O3-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O3-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; O3-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O3-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; O3-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O3-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; O3-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O3-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; O3-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O3-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O3-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; O3-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; O3-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O3-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; O3-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O3-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; O3-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O3-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; O3-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O3-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; O3-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O3-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; O3-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O3-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O3-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; O3-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; O3-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O3-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; O3-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O3-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O3-NEXT: ret i32 [[TMP46]] ; @@ -276,84 +276,84 @@ define i32 @enabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ; O3DEFAULT-NEXT: entry: ; O3DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; O3DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; O3DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O3DEFAULT-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O3DEFAULT-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; O3DEFAULT-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O3DEFAULT-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; O3DEFAULT-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; O3DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O3DEFAULT-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; O3DEFAULT-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O3DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; O3DEFAULT-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O3DEFAULT-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O3DEFAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; O3DEFAULT-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O3DEFAULT-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; O3DEFAULT-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O3DEFAULT-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; O3DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O3DEFAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; O3DEFAULT-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O3DEFAULT-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O3DEFAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; O3DEFAULT-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O3DEFAULT-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; O3DEFAULT-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O3DEFAULT-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; O3DEFAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O3DEFAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; O3DEFAULT-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O3DEFAULT-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O3DEFAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; O3DEFAULT-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O3DEFAULT-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; O3DEFAULT-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O3DEFAULT-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; O3DEFAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O3DEFAULT-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; O3DEFAULT-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O3DEFAULT-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O3DEFAULT-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; O3DEFAULT-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O3DEFAULT-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; O3DEFAULT-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O3DEFAULT-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; O3DEFAULT-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O3DEFAULT-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; O3DEFAULT-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O3DEFAULT-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O3DEFAULT-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; O3DEFAULT-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O3DEFAULT-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; O3DEFAULT-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O3DEFAULT-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; O3DEFAULT-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O3DEFAULT-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; O3DEFAULT-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O3DEFAULT-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O3DEFAULT-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; O3DEFAULT-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O3DEFAULT-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; O3DEFAULT-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O3DEFAULT-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; O3DEFAULT-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O3DEFAULT-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; O3DEFAULT-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O3DEFAULT-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O3DEFAULT-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; O3DEFAULT-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O3DEFAULT-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; O3DEFAULT-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O3DEFAULT-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; O3DEFAULT-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O3DEFAULT-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O3DEFAULT-NEXT: ret i32 [[TMP46]] ; @@ -361,84 +361,84 @@ define i32 @enabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ; Os-NEXT: entry: ; Os-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; Os-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; Os-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; Os-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; Os-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; Os-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; Os-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; Os-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; Os-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; Os-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; Os-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; Os-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; Os-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; Os-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; Os-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; Os-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; Os-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; Os-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; Os-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; Os-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; Os-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; Os-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; Os-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; Os-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; Os-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; Os-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; Os-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; Os-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; Os-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; Os-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; Os-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; Os-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; Os-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; Os-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; Os-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; Os-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; Os-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; Os-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; Os-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; Os-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; Os-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; Os-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; Os-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; Os-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; Os-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; Os-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; Os-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; Os-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; Os-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; Os-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; Os-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; Os-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; Os-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; Os-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; Os-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; Os-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; Os-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; Os-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; Os-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; Os-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; Os-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; Os-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; Os-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; Os-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; Os-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; Os-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; Os-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; Os-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; Os-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; Os-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; Os-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; Os-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; Os-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; Os-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; Os-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; Os-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; Os-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; Os-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; Os-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; Os-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; Os-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; Os-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; Os-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; Os-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; Os-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; Os-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; Os-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; Os-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; Os-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; Os-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; Os-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; Os-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; Os-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; Os-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; Os-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; Os-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; Os-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; Os-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; Os-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; Os-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; Os-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; Os-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; Os-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; Os-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; Os-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; Os-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; Os-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; Os-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; Os-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; Os-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; Os-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; Os-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; Os-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; Os-NEXT: ret i32 [[TMP46]] ; @@ -446,84 +446,84 @@ define i32 @enabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ; Oz-NEXT: entry: ; Oz-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; Oz-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; Oz-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; Oz-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; Oz-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; Oz-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; Oz-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; Oz-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; Oz-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; Oz-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; Oz-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; Oz-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; Oz-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; Oz-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; Oz-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; Oz-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; Oz-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; Oz-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; Oz-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; Oz-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; Oz-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; Oz-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; Oz-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; Oz-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; Oz-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; Oz-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; Oz-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; Oz-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; Oz-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; Oz-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; Oz-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; Oz-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; Oz-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; Oz-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; Oz-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; Oz-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; Oz-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; Oz-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; Oz-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; Oz-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; Oz-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; Oz-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; Oz-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; Oz-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; Oz-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; Oz-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; Oz-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; Oz-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; Oz-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; Oz-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; Oz-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; Oz-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; Oz-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; Oz-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; Oz-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; Oz-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; Oz-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; Oz-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; Oz-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; Oz-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; Oz-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; Oz-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; Oz-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; Oz-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; Oz-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; Oz-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; Oz-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; Oz-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; Oz-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; Oz-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; Oz-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; Oz-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; Oz-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; Oz-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; Oz-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; Oz-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; Oz-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; Oz-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; Oz-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; Oz-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; Oz-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; Oz-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; Oz-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; Oz-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; Oz-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; Oz-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; Oz-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; Oz-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; Oz-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; Oz-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; Oz-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; Oz-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; Oz-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; Oz-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; Oz-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; Oz-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; Oz-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; Oz-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; Oz-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; Oz-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; Oz-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; Oz-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; Oz-NEXT: ret i32 [[TMP46]] ; @@ -531,84 +531,84 @@ define i32 @enabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ; O1VEC2-NEXT: entry: ; O1VEC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; O1VEC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O1VEC2-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O1VEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; O1VEC2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O1VEC2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; O1VEC2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; O1VEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O1VEC2-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; O1VEC2-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; O1VEC2-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O1VEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; O1VEC2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O1VEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; O1VEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O1VEC2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; O1VEC2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O1VEC2-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; O1VEC2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O1VEC2-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O1VEC2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; O1VEC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; O1VEC2-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O1VEC2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; O1VEC2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O1VEC2-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; O1VEC2-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O1VEC2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; O1VEC2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O1VEC2-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; O1VEC2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O1VEC2-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O1VEC2-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; O1VEC2-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; O1VEC2-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O1VEC2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; O1VEC2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O1VEC2-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; O1VEC2-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O1VEC2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; O1VEC2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O1VEC2-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; O1VEC2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O1VEC2-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O1VEC2-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; O1VEC2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; O1VEC2-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O1VEC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; O1VEC2-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O1VEC2-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; O1VEC2-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O1VEC2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; O1VEC2-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O1VEC2-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; O1VEC2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O1VEC2-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O1VEC2-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; O1VEC2-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; O1VEC2-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O1VEC2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; O1VEC2-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O1VEC2-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; O1VEC2-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O1VEC2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; O1VEC2-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O1VEC2-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; O1VEC2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O1VEC2-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O1VEC2-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; O1VEC2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; O1VEC2-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O1VEC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; O1VEC2-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O1VEC2-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; O1VEC2-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O1VEC2-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; O1VEC2-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O1VEC2-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; O1VEC2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O1VEC2-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O1VEC2-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; O1VEC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; O1VEC2-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O1VEC2-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; O1VEC2-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O1VEC2-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; O1VEC2-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O1VEC2-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; O1VEC2-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O1VEC2-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; O1VEC2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O1VEC2-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O1VEC2-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; O1VEC2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; O1VEC2-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O1VEC2-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O1VEC2-NEXT: ret i32 [[TMP46]] ; @@ -616,84 +616,84 @@ define i32 @enabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ; OzVEC2-NEXT: entry: ; OzVEC2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; OzVEC2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; OzVEC2-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; OzVEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; OzVEC2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; OzVEC2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; OzVEC2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; OzVEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; OzVEC2-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; OzVEC2-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; OzVEC2-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; OzVEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; OzVEC2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; OzVEC2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; OzVEC2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; OzVEC2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; OzVEC2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; OzVEC2-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; OzVEC2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; OzVEC2-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; OzVEC2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; OzVEC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; OzVEC2-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; OzVEC2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; OzVEC2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; OzVEC2-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; OzVEC2-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; OzVEC2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; OzVEC2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; OzVEC2-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; OzVEC2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; OzVEC2-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; OzVEC2-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; OzVEC2-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; OzVEC2-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; OzVEC2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; OzVEC2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; OzVEC2-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; OzVEC2-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; OzVEC2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; OzVEC2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; OzVEC2-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; OzVEC2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; OzVEC2-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; OzVEC2-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; OzVEC2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; OzVEC2-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; OzVEC2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; OzVEC2-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; OzVEC2-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; OzVEC2-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; OzVEC2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; OzVEC2-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; OzVEC2-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; OzVEC2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; OzVEC2-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; OzVEC2-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; OzVEC2-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; OzVEC2-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; OzVEC2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; OzVEC2-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; OzVEC2-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; OzVEC2-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; OzVEC2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; OzVEC2-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; OzVEC2-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; OzVEC2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; OzVEC2-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; OzVEC2-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; OzVEC2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; OzVEC2-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; OzVEC2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; OzVEC2-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; OzVEC2-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; OzVEC2-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; OzVEC2-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; OzVEC2-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; OzVEC2-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; OzVEC2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; OzVEC2-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; OzVEC2-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; OzVEC2-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; OzVEC2-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; OzVEC2-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; OzVEC2-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; OzVEC2-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; OzVEC2-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; OzVEC2-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; OzVEC2-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; OzVEC2-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; OzVEC2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; OzVEC2-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; OzVEC2-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; OzVEC2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; OzVEC2-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; OzVEC2-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; OzVEC2-NEXT: ret i32 [[TMP46]] ; @@ -701,84 +701,84 @@ define i32 @enabled(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, ; O3DIS-NEXT: entry: ; O3DIS-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; O3DIS-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; O3DIS-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O3DIS-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O3DIS-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; O3DIS-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O3DIS-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; O3DIS-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; O3DIS-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; O3DIS-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O3DIS-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; O3DIS-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; O3DIS-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O3DIS-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; O3DIS-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O3DIS-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; O3DIS-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O3DIS-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; O3DIS-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O3DIS-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; O3DIS-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O3DIS-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O3DIS-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; O3DIS-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; O3DIS-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O3DIS-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; O3DIS-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O3DIS-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; O3DIS-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O3DIS-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; O3DIS-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O3DIS-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; O3DIS-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O3DIS-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O3DIS-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; O3DIS-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; O3DIS-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O3DIS-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; O3DIS-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O3DIS-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; O3DIS-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O3DIS-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; O3DIS-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O3DIS-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; O3DIS-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O3DIS-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O3DIS-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; O3DIS-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; O3DIS-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O3DIS-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; O3DIS-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O3DIS-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; O3DIS-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O3DIS-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; O3DIS-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O3DIS-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; O3DIS-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O3DIS-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O3DIS-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; O3DIS-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; O3DIS-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O3DIS-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; O3DIS-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O3DIS-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; O3DIS-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O3DIS-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; O3DIS-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O3DIS-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; O3DIS-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O3DIS-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O3DIS-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; O3DIS-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; O3DIS-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O3DIS-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; O3DIS-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O3DIS-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; O3DIS-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O3DIS-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; O3DIS-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O3DIS-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; O3DIS-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O3DIS-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O3DIS-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; O3DIS-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; O3DIS-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O3DIS-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; O3DIS-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O3DIS-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O3DIS-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; O3DIS-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O3DIS-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; O3DIS-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O3DIS-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; O3DIS-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O3DIS-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O3DIS-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; O3DIS-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; O3DIS-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; O3DIS-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; O3DIS-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O3DIS-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O3DIS-NEXT: ret i32 [[TMP46]] ; @@ -823,84 +823,84 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O2-NEXT: entry: ; O2-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; O2-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; O2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O2-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O2-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; O2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; O2-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; O2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; O2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O2-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; O2-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; O2-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; O2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O2-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; O2-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O2-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; O2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O2-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; O2-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O2-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; O2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; O2-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; O2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; O2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O2-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; O2-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O2-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; O2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O2-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; O2-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O2-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O2-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; O2-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; O2-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O2-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; O2-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O2-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; O2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O2-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; O2-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O2-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; O2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O2-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; O2-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O2-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O2-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; O2-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; O2-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O2-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; O2-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O2-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; O2-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O2-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; O2-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O2-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; O2-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O2-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; O2-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O2-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O2-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; O2-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; O2-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O2-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; O2-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O2-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; O2-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O2-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; O2-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O2-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; O2-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O2-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; O2-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O2-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O2-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; O2-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; O2-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O2-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; O2-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O2-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; O2-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O2-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; O2-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O2-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; O2-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O2-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; O2-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O2-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O2-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; O2-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; O2-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O2-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; O2-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O2-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; O2-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O2-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O2-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; O2-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O2-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; O2-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O2-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; O2-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O2-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O2-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; O2-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; O2-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; O2-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O2-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; O2-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O2-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O2-NEXT: ret i32 [[TMP46]] ; @@ -908,84 +908,84 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O3-NEXT: entry: ; O3-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; O3-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; O3-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O3-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O3-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O3-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; O3-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O3-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; O3-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; O3-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; O3-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O3-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; O3-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; O3-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O3-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; O3-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O3-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; O3-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O3-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; O3-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O3-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; O3-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O3-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O3-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; O3-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; O3-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O3-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; O3-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O3-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; O3-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O3-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; O3-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O3-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; O3-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O3-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; O3-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O3-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O3-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; O3-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; O3-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O3-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; O3-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O3-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; O3-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O3-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; O3-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O3-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; O3-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O3-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; O3-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O3-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O3-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; O3-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; O3-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O3-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; O3-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O3-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; O3-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O3-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; O3-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O3-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; O3-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O3-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; O3-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O3-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O3-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; O3-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; O3-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O3-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; O3-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O3-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; O3-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O3-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; O3-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O3-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; O3-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O3-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; O3-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O3-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O3-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; O3-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; O3-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O3-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; O3-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O3-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; O3-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O3-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; O3-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O3-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; O3-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O3-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; O3-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O3-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O3-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; O3-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; O3-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O3-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; O3-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O3-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; O3-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O3-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O3-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; O3-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O3-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; O3-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O3-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; O3-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O3-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O3-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; O3-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; O3-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; O3-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O3-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; O3-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O3-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O3-NEXT: ret i32 [[TMP46]] ; @@ -993,84 +993,84 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O3DEFAULT-NEXT: entry: ; O3DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; O3DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; O3DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; O3DEFAULT-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; O3DEFAULT-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; O3DEFAULT-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; O3DEFAULT-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; O3DEFAULT-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; O3DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; O3DEFAULT-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; O3DEFAULT-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; O3DEFAULT-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; O3DEFAULT-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; O3DEFAULT-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; O3DEFAULT-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; O3DEFAULT-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; O3DEFAULT-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; O3DEFAULT-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O3DEFAULT-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; O3DEFAULT-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; O3DEFAULT-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; O3DEFAULT-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; O3DEFAULT-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; O3DEFAULT-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; O3DEFAULT-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; O3DEFAULT-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; O3DEFAULT-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; O3DEFAULT-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; O3DEFAULT-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; O3DEFAULT-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; O3DEFAULT-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; O3DEFAULT-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; O3DEFAULT-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; O3DEFAULT-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; O3DEFAULT-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; O3DEFAULT-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; O3DEFAULT-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; O3DEFAULT-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; O3DEFAULT-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; O3DEFAULT-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; O3DEFAULT-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; O3DEFAULT-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; O3DEFAULT-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; O3DEFAULT-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; O3DEFAULT-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; O3DEFAULT-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; O3DEFAULT-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; O3DEFAULT-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; O3DEFAULT-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; O3DEFAULT-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; O3DEFAULT-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; O3DEFAULT-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; O3DEFAULT-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; O3DEFAULT-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; O3DEFAULT-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; O3DEFAULT-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; O3DEFAULT-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; O3DEFAULT-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; O3DEFAULT-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; O3DEFAULT-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; O3DEFAULT-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; O3DEFAULT-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; O3DEFAULT-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; O3DEFAULT-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; O3DEFAULT-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; O3DEFAULT-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; O3DEFAULT-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; O3DEFAULT-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; O3DEFAULT-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; O3DEFAULT-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; O3DEFAULT-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; O3DEFAULT-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; O3DEFAULT-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; O3DEFAULT-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; O3DEFAULT-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; O3DEFAULT-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; O3DEFAULT-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; O3DEFAULT-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; O3DEFAULT-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; O3DEFAULT-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; O3DEFAULT-NEXT: ret i32 [[TMP46]] ; @@ -1078,84 +1078,84 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; Os-NEXT: entry: ; Os-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[N:%.*]], i64 0 ; Os-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer -; Os-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; Os-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; Os-NEXT: store <4 x i32> [[TMP0]], ptr [[A:%.*]], align 4 -; Os-NEXT: [[TMP1:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 16 -; Os-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP1]], align 4 -; Os-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 16 +; Os-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[B:%.*]], i64 16 +; Os-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[B]], align 4 +; Os-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 +; Os-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[A:%.*]], i64 16 +; Os-NEXT: store <4 x i32> [[TMP1]], ptr [[A]], align 4 ; Os-NEXT: store <4 x i32> [[TMP2]], ptr [[TMP3]], align 4 ; Os-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 32 -; Os-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 -; Os-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP6:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 -; Os-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP6]], align 4 -; Os-NEXT: [[TMP7:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 -; Os-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP7]], align 4 -; Os-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 48 +; Os-NEXT: [[WIDE_LOAD_1:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; Os-NEXT: [[WIDE_LOAD1_1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; Os-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_1]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 32 ; Os-NEXT: [[TMP9:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 48 -; Os-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP9]], align 4 +; Os-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP8]], align 4 +; Os-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP9]], align 4 ; Os-NEXT: [[TMP10:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 64 -; Os-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 -; Os-NEXT: [[TMP11:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 -; Os-NEXT: store <4 x i32> [[TMP11]], ptr [[TMP12]], align 4 -; Os-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 -; Os-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP13]], align 4 -; Os-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP11:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 80 +; Os-NEXT: [[WIDE_LOAD_2:%.*]] = load <4 x i32>, ptr [[TMP10]], align 4 +; Os-NEXT: [[WIDE_LOAD1_2:%.*]] = load <4 x i32>, ptr [[TMP11]], align 4 +; Os-NEXT: [[TMP12:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP13:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_2]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 64 ; Os-NEXT: [[TMP15:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 80 -; Os-NEXT: store <4 x i32> [[TMP14]], ptr [[TMP15]], align 4 +; Os-NEXT: store <4 x i32> [[TMP12]], ptr [[TMP14]], align 4 +; Os-NEXT: store <4 x i32> [[TMP13]], ptr [[TMP15]], align 4 ; Os-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 96 -; Os-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 -; Os-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 -; Os-NEXT: store <4 x i32> [[TMP17]], ptr [[TMP18]], align 4 -; Os-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 -; Os-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP19]], align 4 -; Os-NEXT: [[TMP20:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP17:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 112 +; Os-NEXT: [[WIDE_LOAD_3:%.*]] = load <4 x i32>, ptr [[TMP16]], align 4 +; Os-NEXT: [[WIDE_LOAD1_3:%.*]] = load <4 x i32>, ptr [[TMP17]], align 4 +; Os-NEXT: [[TMP18:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP19:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_3]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP20:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 96 ; Os-NEXT: [[TMP21:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 112 -; Os-NEXT: store <4 x i32> [[TMP20]], ptr [[TMP21]], align 4 +; Os-NEXT: store <4 x i32> [[TMP18]], ptr [[TMP20]], align 4 +; Os-NEXT: store <4 x i32> [[TMP19]], ptr [[TMP21]], align 4 ; Os-NEXT: [[TMP22:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 128 -; Os-NEXT: [[WIDE_LOAD_8:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 -; Os-NEXT: [[TMP23:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_8]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP24:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 -; Os-NEXT: store <4 x i32> [[TMP23]], ptr [[TMP24]], align 4 -; Os-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 -; Os-NEXT: [[WIDE_LOAD_9:%.*]] = load <4 x i32>, ptr [[TMP25]], align 4 -; Os-NEXT: [[TMP26:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_9]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 144 +; Os-NEXT: [[WIDE_LOAD_4:%.*]] = load <4 x i32>, ptr [[TMP22]], align 4 +; Os-NEXT: [[WIDE_LOAD1_4:%.*]] = load <4 x i32>, ptr [[TMP23]], align 4 +; Os-NEXT: [[TMP24:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_4]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP25:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_4]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP26:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 128 ; Os-NEXT: [[TMP27:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 144 -; Os-NEXT: store <4 x i32> [[TMP26]], ptr [[TMP27]], align 4 +; Os-NEXT: store <4 x i32> [[TMP24]], ptr [[TMP26]], align 4 +; Os-NEXT: store <4 x i32> [[TMP25]], ptr [[TMP27]], align 4 ; Os-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 160 -; Os-NEXT: [[WIDE_LOAD_10:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 -; Os-NEXT: [[TMP29:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_10]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP30:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 -; Os-NEXT: store <4 x i32> [[TMP29]], ptr [[TMP30]], align 4 -; Os-NEXT: [[TMP31:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 -; Os-NEXT: [[WIDE_LOAD_11:%.*]] = load <4 x i32>, ptr [[TMP31]], align 4 -; Os-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_11]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP29:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 176 +; Os-NEXT: [[WIDE_LOAD_5:%.*]] = load <4 x i32>, ptr [[TMP28]], align 4 +; Os-NEXT: [[WIDE_LOAD1_5:%.*]] = load <4 x i32>, ptr [[TMP29]], align 4 +; Os-NEXT: [[TMP30:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_5]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP31:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_5]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP32:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 160 ; Os-NEXT: [[TMP33:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 176 -; Os-NEXT: store <4 x i32> [[TMP32]], ptr [[TMP33]], align 4 +; Os-NEXT: store <4 x i32> [[TMP30]], ptr [[TMP32]], align 4 +; Os-NEXT: store <4 x i32> [[TMP31]], ptr [[TMP33]], align 4 ; Os-NEXT: [[TMP34:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 192 -; Os-NEXT: [[WIDE_LOAD_12:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 -; Os-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_12]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP36:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 -; Os-NEXT: store <4 x i32> [[TMP35]], ptr [[TMP36]], align 4 -; Os-NEXT: [[TMP37:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 -; Os-NEXT: [[WIDE_LOAD_13:%.*]] = load <4 x i32>, ptr [[TMP37]], align 4 -; Os-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_13]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP35:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 208 +; Os-NEXT: [[WIDE_LOAD_6:%.*]] = load <4 x i32>, ptr [[TMP34]], align 4 +; Os-NEXT: [[WIDE_LOAD1_6:%.*]] = load <4 x i32>, ptr [[TMP35]], align 4 +; Os-NEXT: [[TMP36:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_6]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP37:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_6]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP38:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 192 ; Os-NEXT: [[TMP39:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 208 -; Os-NEXT: store <4 x i32> [[TMP38]], ptr [[TMP39]], align 4 +; Os-NEXT: store <4 x i32> [[TMP36]], ptr [[TMP38]], align 4 +; Os-NEXT: store <4 x i32> [[TMP37]], ptr [[TMP39]], align 4 ; Os-NEXT: [[TMP40:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 224 -; Os-NEXT: [[WIDE_LOAD_14:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 -; Os-NEXT: [[TMP41:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_14]], [[BROADCAST_SPLAT]] -; Os-NEXT: [[TMP42:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 -; Os-NEXT: store <4 x i32> [[TMP41]], ptr [[TMP42]], align 4 -; Os-NEXT: [[TMP43:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 -; Os-NEXT: [[WIDE_LOAD_15:%.*]] = load <4 x i32>, ptr [[TMP43]], align 4 -; Os-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_15]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP41:%.*]] = getelementptr inbounds i8, ptr [[B]], i64 240 +; Os-NEXT: [[WIDE_LOAD_7:%.*]] = load <4 x i32>, ptr [[TMP40]], align 4 +; Os-NEXT: [[WIDE_LOAD1_7:%.*]] = load <4 x i32>, ptr [[TMP41]], align 4 +; Os-NEXT: [[TMP42:%.*]] = add nsw <4 x i32> [[WIDE_LOAD_7]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP43:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1_7]], [[BROADCAST_SPLAT]] +; Os-NEXT: [[TMP44:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 224 ; Os-NEXT: [[TMP45:%.*]] = getelementptr inbounds i8, ptr [[A]], i64 240 -; Os-NEXT: store <4 x i32> [[TMP44]], ptr [[TMP45]], align 4 +; Os-NEXT: store <4 x i32> [[TMP42]], ptr [[TMP44]], align 4 +; Os-NEXT: store <4 x i32> [[TMP43]], ptr [[TMP45]], align 4 ; Os-NEXT: [[TMP46:%.*]] = load i32, ptr [[A]], align 4 ; Os-NEXT: ret i32 [[TMP46]] ; @@ -1186,16 +1186,24 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O1VEC2: vector.body: ; O1VEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; O1VEC2-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; O1VEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] -; O1VEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 -; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 -; O1VEC2-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; O1VEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] -; O1VEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 -; O1VEC2-NEXT: store <4 x i32> [[TMP3]], ptr [[TMP5]], align 4 -; O1VEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; O1VEC2-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 -; O1VEC2-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; O1VEC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; O1VEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] +; O1VEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]] +; O1VEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 0 +; O1VEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; O1VEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; O1VEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; O1VEC2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; O1VEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] +; O1VEC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP1]] +; O1VEC2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 0 +; O1VEC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP10]], align 4 +; O1VEC2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP11]], align 4 +; O1VEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; O1VEC2-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 +; O1VEC2-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; O1VEC2: middle.block: ; O1VEC2-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] ; O1VEC2: scalar.ph: @@ -1204,16 +1212,16 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; O1VEC2: for.body: ; O1VEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; O1VEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; O1VEC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; O1VEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], [[N]] +; O1VEC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; O1VEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[N]] ; O1VEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] ; O1VEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 ; O1VEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; O1VEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 ; O1VEC2-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; O1VEC2: for.end: -; O1VEC2-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 -; O1VEC2-NEXT: ret i32 [[TMP8]] +; O1VEC2-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 +; O1VEC2-NEXT: ret i32 [[TMP14]] ; ; OzVEC2-LABEL: @nopragma( ; OzVEC2-NEXT: entry: @@ -1225,16 +1233,24 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; OzVEC2: vector.body: ; OzVEC2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; OzVEC2-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; OzVEC2-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] -; OzVEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[TMP1]], i32 0 -; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP2]], align 4 -; OzVEC2-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; OzVEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] -; OzVEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP4]], i32 0 -; OzVEC2-NEXT: store <4 x i32> [[TMP3]], ptr [[TMP5]], align 4 -; OzVEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; OzVEC2-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 -; OzVEC2-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; OzVEC2-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; OzVEC2-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B:%.*]], i64 [[TMP0]] +; OzVEC2-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[TMP1]] +; OzVEC2-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 0 +; OzVEC2-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP2]], i32 4 +; OzVEC2-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP4]], align 4 +; OzVEC2-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4 +; OzVEC2-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]] +; OzVEC2-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP0]] +; OzVEC2-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[TMP1]] +; OzVEC2-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 0 +; OzVEC2-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP10]], align 4 +; OzVEC2-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP11]], align 4 +; OzVEC2-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 +; OzVEC2-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 64 +; OzVEC2-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; OzVEC2: middle.block: ; OzVEC2-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] ; OzVEC2: scalar.ph: @@ -1243,16 +1259,16 @@ define i32 @nopragma(ptr noalias nocapture %a, ptr noalias nocapture readonly %b ; OzVEC2: for.body: ; OzVEC2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; OzVEC2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[INDVARS_IV]] -; OzVEC2-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; OzVEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], [[N]] +; OzVEC2-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; OzVEC2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[N]] ; OzVEC2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[A]], i64 [[INDVARS_IV]] ; OzVEC2-NEXT: store i32 [[ADD]], ptr [[ARRAYIDX2]], align 4 ; OzVEC2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; OzVEC2-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 64 ; OzVEC2-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; OzVEC2: for.end: -; OzVEC2-NEXT: [[TMP8:%.*]] = load i32, ptr [[A]], align 4 -; OzVEC2-NEXT: ret i32 [[TMP8]] +; OzVEC2-NEXT: [[TMP14:%.*]] = load i32, ptr [[A]], align 4 +; OzVEC2-NEXT: ret i32 [[TMP14]] ; ; O3DIS-LABEL: @nopragma( ; O3DIS-NEXT: entry: diff --git a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll index 1917023394f0a..a72e158707265 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/strided_load_cost.ll @@ -18,7 +18,10 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP36:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP144:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP145:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI2:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP146:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI3:%.*]] = phi <8 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP147:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1 ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 2 @@ -27,62 +30,179 @@ define i32 @matrix_row_col(ptr nocapture readonly %data, i32 %i, i32 %j) local_u ; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[INDEX]], 5 ; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 6 ; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[INDEX]], 7 -; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA:%.*]], i64 [[IDXPROM]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i32, ptr [[TMP8]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP9]], align 4, !tbaa [[TBAA1:![0-9]+]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP0]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP1]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP2]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP3]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP4]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP5]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP6]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP7]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP18:%.*]] = load i32, ptr [[TMP10]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP19:%.*]] = load i32, ptr [[TMP11]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP20:%.*]] = load i32, ptr [[TMP12]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP13]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TMP14]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP23:%.*]] = load i32, ptr [[TMP15]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP16]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP25:%.*]] = load i32, ptr [[TMP17]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[TMP26:%.*]] = insertelement <8 x i32> poison, i32 [[TMP18]], i32 0 -; CHECK-NEXT: [[TMP27:%.*]] = insertelement <8 x i32> [[TMP26]], i32 [[TMP19]], i32 1 -; CHECK-NEXT: [[TMP28:%.*]] = insertelement <8 x i32> [[TMP27]], i32 [[TMP20]], i32 2 -; CHECK-NEXT: [[TMP29:%.*]] = insertelement <8 x i32> [[TMP28]], i32 [[TMP21]], i32 3 -; CHECK-NEXT: [[TMP30:%.*]] = insertelement <8 x i32> [[TMP29]], i32 [[TMP22]], i32 4 -; CHECK-NEXT: [[TMP31:%.*]] = insertelement <8 x i32> [[TMP30]], i32 [[TMP23]], i32 5 -; CHECK-NEXT: [[TMP32:%.*]] = insertelement <8 x i32> [[TMP31]], i32 [[TMP24]], i32 6 -; CHECK-NEXT: [[TMP33:%.*]] = insertelement <8 x i32> [[TMP32]], i32 [[TMP25]], i32 7 -; CHECK-NEXT: [[TMP34:%.*]] = mul nsw <8 x i32> [[TMP33]], [[WIDE_LOAD]] -; CHECK-NEXT: [[TMP35:%.*]] = add <8 x i32> [[VEC_PHI]], -; CHECK-NEXT: [[TMP36]] = add <8 x i32> [[TMP35]], [[TMP34]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 -; CHECK-NEXT: [[TMP37:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96 -; CHECK-NEXT: br i1 [[TMP37]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[INDEX]], 9 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[INDEX]], 10 +; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 11 +; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[INDEX]], 13 +; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[INDEX]], 14 +; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[INDEX]], 15 +; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP17:%.*]] = add i64 [[INDEX]], 17 +; CHECK-NEXT: [[TMP18:%.*]] = add i64 [[INDEX]], 18 +; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX]], 19 +; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[INDEX]], 20 +; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 21 +; CHECK-NEXT: [[TMP22:%.*]] = add i64 [[INDEX]], 22 +; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[INDEX]], 23 +; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[INDEX]], 24 +; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[INDEX]], 25 +; CHECK-NEXT: [[TMP26:%.*]] = add i64 [[INDEX]], 26 +; CHECK-NEXT: [[TMP27:%.*]] = add i64 [[INDEX]], 27 +; CHECK-NEXT: [[TMP28:%.*]] = add i64 [[INDEX]], 28 +; CHECK-NEXT: [[TMP29:%.*]] = add i64 [[INDEX]], 29 +; CHECK-NEXT: [[TMP30:%.*]] = add i64 [[INDEX]], 30 +; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[INDEX]], 31 +; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA:%.*]], i64 [[IDXPROM]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[TMP8]] +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[TMP16]] +; CHECK-NEXT: [[TMP35:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[TMP24]] +; CHECK-NEXT: [[TMP36:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 0 +; CHECK-NEXT: [[TMP37:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 8 +; CHECK-NEXT: [[TMP38:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 16 +; CHECK-NEXT: [[TMP39:%.*]] = getelementptr inbounds i32, ptr [[TMP32]], i32 24 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[TMP36]], align 4, !tbaa [[TBAA1:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <8 x i32>, ptr [[TMP37]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <8 x i32>, ptr [[TMP38]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <8 x i32>, ptr [[TMP39]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP40:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP0]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP1]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP42:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP2]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP3]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP44:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP4]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP45:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP5]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP46:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP6]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP7]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP8]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP49:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP9]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP50:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP10]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP51:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP11]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP12]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP13]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP54:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP14]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP55:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP15]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP56:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP16]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP57:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP17]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP58:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP18]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP19]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP20]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP61:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP21]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP62:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP22]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP23]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP64:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP24]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP65:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP25]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP66:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP26]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP27]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP68:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP28]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP69:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP29]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP70:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP30]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP71:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[TMP31]], i64 [[IDXPROM5]] +; CHECK-NEXT: [[TMP72:%.*]] = load i32, ptr [[TMP40]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP73:%.*]] = load i32, ptr [[TMP41]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP74:%.*]] = load i32, ptr [[TMP42]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP75:%.*]] = load i32, ptr [[TMP43]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP76:%.*]] = load i32, ptr [[TMP44]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP77:%.*]] = load i32, ptr [[TMP45]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP78:%.*]] = load i32, ptr [[TMP46]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP79:%.*]] = load i32, ptr [[TMP47]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP80:%.*]] = insertelement <8 x i32> poison, i32 [[TMP72]], i32 0 +; CHECK-NEXT: [[TMP81:%.*]] = insertelement <8 x i32> [[TMP80]], i32 [[TMP73]], i32 1 +; CHECK-NEXT: [[TMP82:%.*]] = insertelement <8 x i32> [[TMP81]], i32 [[TMP74]], i32 2 +; CHECK-NEXT: [[TMP83:%.*]] = insertelement <8 x i32> [[TMP82]], i32 [[TMP75]], i32 3 +; CHECK-NEXT: [[TMP84:%.*]] = insertelement <8 x i32> [[TMP83]], i32 [[TMP76]], i32 4 +; CHECK-NEXT: [[TMP85:%.*]] = insertelement <8 x i32> [[TMP84]], i32 [[TMP77]], i32 5 +; CHECK-NEXT: [[TMP86:%.*]] = insertelement <8 x i32> [[TMP85]], i32 [[TMP78]], i32 6 +; CHECK-NEXT: [[TMP87:%.*]] = insertelement <8 x i32> [[TMP86]], i32 [[TMP79]], i32 7 +; CHECK-NEXT: [[TMP88:%.*]] = load i32, ptr [[TMP48]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP89:%.*]] = load i32, ptr [[TMP49]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP90:%.*]] = load i32, ptr [[TMP50]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP91:%.*]] = load i32, ptr [[TMP51]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP92:%.*]] = load i32, ptr [[TMP52]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP93:%.*]] = load i32, ptr [[TMP53]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP94:%.*]] = load i32, ptr [[TMP54]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP95:%.*]] = load i32, ptr [[TMP55]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP96:%.*]] = insertelement <8 x i32> poison, i32 [[TMP88]], i32 0 +; CHECK-NEXT: [[TMP97:%.*]] = insertelement <8 x i32> [[TMP96]], i32 [[TMP89]], i32 1 +; CHECK-NEXT: [[TMP98:%.*]] = insertelement <8 x i32> [[TMP97]], i32 [[TMP90]], i32 2 +; CHECK-NEXT: [[TMP99:%.*]] = insertelement <8 x i32> [[TMP98]], i32 [[TMP91]], i32 3 +; CHECK-NEXT: [[TMP100:%.*]] = insertelement <8 x i32> [[TMP99]], i32 [[TMP92]], i32 4 +; CHECK-NEXT: [[TMP101:%.*]] = insertelement <8 x i32> [[TMP100]], i32 [[TMP93]], i32 5 +; CHECK-NEXT: [[TMP102:%.*]] = insertelement <8 x i32> [[TMP101]], i32 [[TMP94]], i32 6 +; CHECK-NEXT: [[TMP103:%.*]] = insertelement <8 x i32> [[TMP102]], i32 [[TMP95]], i32 7 +; CHECK-NEXT: [[TMP104:%.*]] = load i32, ptr [[TMP56]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP105:%.*]] = load i32, ptr [[TMP57]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP106:%.*]] = load i32, ptr [[TMP58]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP107:%.*]] = load i32, ptr [[TMP59]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP108:%.*]] = load i32, ptr [[TMP60]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP109:%.*]] = load i32, ptr [[TMP61]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP110:%.*]] = load i32, ptr [[TMP62]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP111:%.*]] = load i32, ptr [[TMP63]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP112:%.*]] = insertelement <8 x i32> poison, i32 [[TMP104]], i32 0 +; CHECK-NEXT: [[TMP113:%.*]] = insertelement <8 x i32> [[TMP112]], i32 [[TMP105]], i32 1 +; CHECK-NEXT: [[TMP114:%.*]] = insertelement <8 x i32> [[TMP113]], i32 [[TMP106]], i32 2 +; CHECK-NEXT: [[TMP115:%.*]] = insertelement <8 x i32> [[TMP114]], i32 [[TMP107]], i32 3 +; CHECK-NEXT: [[TMP116:%.*]] = insertelement <8 x i32> [[TMP115]], i32 [[TMP108]], i32 4 +; CHECK-NEXT: [[TMP117:%.*]] = insertelement <8 x i32> [[TMP116]], i32 [[TMP109]], i32 5 +; CHECK-NEXT: [[TMP118:%.*]] = insertelement <8 x i32> [[TMP117]], i32 [[TMP110]], i32 6 +; CHECK-NEXT: [[TMP119:%.*]] = insertelement <8 x i32> [[TMP118]], i32 [[TMP111]], i32 7 +; CHECK-NEXT: [[TMP120:%.*]] = load i32, ptr [[TMP64]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP121:%.*]] = load i32, ptr [[TMP65]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP122:%.*]] = load i32, ptr [[TMP66]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP123:%.*]] = load i32, ptr [[TMP67]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP124:%.*]] = load i32, ptr [[TMP68]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP125:%.*]] = load i32, ptr [[TMP69]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP126:%.*]] = load i32, ptr [[TMP70]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP127:%.*]] = load i32, ptr [[TMP71]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP128:%.*]] = insertelement <8 x i32> poison, i32 [[TMP120]], i32 0 +; CHECK-NEXT: [[TMP129:%.*]] = insertelement <8 x i32> [[TMP128]], i32 [[TMP121]], i32 1 +; CHECK-NEXT: [[TMP130:%.*]] = insertelement <8 x i32> [[TMP129]], i32 [[TMP122]], i32 2 +; CHECK-NEXT: [[TMP131:%.*]] = insertelement <8 x i32> [[TMP130]], i32 [[TMP123]], i32 3 +; CHECK-NEXT: [[TMP132:%.*]] = insertelement <8 x i32> [[TMP131]], i32 [[TMP124]], i32 4 +; CHECK-NEXT: [[TMP133:%.*]] = insertelement <8 x i32> [[TMP132]], i32 [[TMP125]], i32 5 +; CHECK-NEXT: [[TMP134:%.*]] = insertelement <8 x i32> [[TMP133]], i32 [[TMP126]], i32 6 +; CHECK-NEXT: [[TMP135:%.*]] = insertelement <8 x i32> [[TMP134]], i32 [[TMP127]], i32 7 +; CHECK-NEXT: [[TMP136:%.*]] = mul nsw <8 x i32> [[TMP87]], [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP137:%.*]] = mul nsw <8 x i32> [[TMP103]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP138:%.*]] = mul nsw <8 x i32> [[TMP119]], [[WIDE_LOAD5]] +; CHECK-NEXT: [[TMP139:%.*]] = mul nsw <8 x i32> [[TMP135]], [[WIDE_LOAD6]] +; CHECK-NEXT: [[TMP140:%.*]] = add <8 x i32> [[VEC_PHI]], +; CHECK-NEXT: [[TMP141:%.*]] = add <8 x i32> [[VEC_PHI1]], +; CHECK-NEXT: [[TMP142:%.*]] = add <8 x i32> [[VEC_PHI2]], +; CHECK-NEXT: [[TMP143:%.*]] = add <8 x i32> [[VEC_PHI3]], +; CHECK-NEXT: [[TMP144]] = add <8 x i32> [[TMP140]], [[TMP136]] +; CHECK-NEXT: [[TMP145]] = add <8 x i32> [[TMP141]], [[TMP137]] +; CHECK-NEXT: [[TMP146]] = add <8 x i32> [[TMP142]], [[TMP138]] +; CHECK-NEXT: [[TMP147]] = add <8 x i32> [[TMP143]], [[TMP139]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32 +; CHECK-NEXT: [[TMP148:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96 +; CHECK-NEXT: br i1 [[TMP148]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP38:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[TMP36]]) +; CHECK-NEXT: [[BIN_RDX:%.*]] = add <8 x i32> [[TMP145]], [[TMP144]] +; CHECK-NEXT: [[BIN_RDX7:%.*]] = add <8 x i32> [[TMP146]], [[BIN_RDX]] +; CHECK-NEXT: [[BIN_RDX8:%.*]] = add <8 x i32> [[TMP147]], [[BIN_RDX7]] +; CHECK-NEXT: [[TMP149:%.*]] = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> [[BIN_RDX8]]) ; CHECK-NEXT: br i1 false, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 96, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP38]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[TMP149]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], [[FOR_BODY]] ], [ [[TMP38]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ADD7_LCSSA:%.*]] = phi i32 [ [[ADD7:%.*]], [[FOR_BODY]] ], [ [[TMP149]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[ADD7_LCSSA]] ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[SUM_015:%.*]] = phi i32 [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ], [ [[ADD7]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[IDXPROM]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP39:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[TMP150:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4, !tbaa [[TBAA1]] ; CHECK-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [100 x i32], ptr [[DATA]], i64 [[INDVARS_IV]], i64 [[IDXPROM5]] -; CHECK-NEXT: [[TMP40:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4, !tbaa [[TBAA1]] -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP40]], [[TMP39]] +; CHECK-NEXT: [[TMP151:%.*]] = load i32, ptr [[ARRAYIDX6]], align 4, !tbaa [[TBAA1]] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP151]], [[TMP150]] ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUM_015]], 4 ; CHECK-NEXT: [[ADD7]] = add i32 [[ADD]], [[MUL]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100 -; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; entry: %idxprom = sext i32 %i to i64 diff --git a/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll b/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll index 5b79d6af9ed9c..76230994ed87d 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/unroll-small-loops.ll @@ -41,14 +41,20 @@ for.end: ; preds = %for.body ret void } -; TODO: We should unroll this loop 4 times since TC being a multiple of VF means +; We should unroll this loop 4 times since TC being a multiple of VF means ; that the epilogue loop may not need to run, making it profitable for ; the vector loop to run even once ; ; CHECK-VECTOR-LABEL: @foo_trip_count_16( ; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> ; CHECK-VECTOR-NOT: load <4 x i32> ; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> ; CHECK-VECTOR-NOT: store <4 x i32> ; CHECK-VECTOR: ret ; @@ -77,14 +83,20 @@ for.end: ; preds = %for.body ret void } -; TODO: We should unroll this loop twice since TC not being a multiple of VF may require -; the epilogue loop to run, making it profitable when the vector loop runs -; at least twice. +; We should unroll this loop four times since unrolling it twice +; will produce the same epilogue TC of 1, making larger unroll count +; more profitable ; ; CHECK-VECTOR-LABEL: @foo_trip_count_17( ; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> ; CHECK-VECTOR-NOT: load <4 x i32> ; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> ; CHECK-VECTOR-NOT: store <4 x i32> ; CHECK-VECTOR: ret ; @@ -113,15 +125,16 @@ for.end: ; preds = %for.body ret void } -; TODO: We should unroll this loop 4 times since TC being a multiple of VF means -; that the epilogue loop may not need to run, making it profitable for -; the vector loop to run even once. The IC is restricted to 4 since -; that is the maximum supported for the target. +; We should unroll this loop twice since unrolling four times will +; create an epilogue loop of TC 8, while unrolling it twice will +; eliminate the epologue loop altogether ; ; CHECK-VECTOR-LABEL: @foo_trip_count_24( ; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> ; CHECK-VECTOR-NOT: load <4 x i32> ; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> ; CHECK-VECTOR-NOT: store <4 x i32> ; CHECK-VECTOR: ret ; @@ -150,14 +163,16 @@ for.end: ; preds = %for.body ret void } -; TODO: We should unroll this loop twice since TC not being a multiple of VF may require +; We should unroll this loop twice since TC not being a multiple of VF may require ; the epilogue loop to run, making it profitable when the vector loop runs ; at least twice. ; ; CHECK-VECTOR-LABEL: @foo_trip_count_25( ; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> ; CHECK-VECTOR-NOT: load <4 x i32> ; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> ; CHECK-VECTOR-NOT: store <4 x i32> ; CHECK-VECTOR: ret ; @@ -186,14 +201,20 @@ for.end: ; preds = %for.body ret void } -; TODO: We should unroll this loop 4 times since TC not being a multiple of VF may require +; We should unroll this loop 4 times since TC not being a multiple of VF may require ; the epilogue loop to run, making it profitable when the vector loop runs ; at least twice. ; ; CHECK-VECTOR-LABEL: @foo_trip_count_33( ; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> ; CHECK-VECTOR-NOT: load <4 x i32> ; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> ; CHECK-VECTOR-NOT: store <4 x i32> ; CHECK-VECTOR: ret ; @@ -222,15 +243,21 @@ for.end: ; preds = %for.body ret void } -; TODO: We should unroll this loop 4 times since TC not being a multiple of VF may require +; We should unroll this loop 4 times since TC not being a multiple of VF may require ; the epilogue loop to run, making it profitable when the vector loop runs ; at least twice. The IC is restricted to 4 since that is the maximum supported ; for the target. ; ; CHECK-VECTOR-LABEL: @foo_trip_count_101( ; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> +; CHECK-VECTOR: load <4 x i32> ; CHECK-VECTOR-NOT: load <4 x i32> ; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> +; CHECK-VECTOR: store <4 x i32> ; CHECK-VECTOR-NOT: store <4 x i32> ; CHECK-VECTOR: ret ; diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll index 3c2ea22e799dc..daa35d31f2e0c 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.small-tc.ll @@ -27,29 +27,56 @@ define void @vectorized(ptr noalias nocapture %A, ptr noalias nocapture readonly ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0 -; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP2]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]] -; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[TMP3]], i32 0 -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[TMP5:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD1]] -; CHECK-NEXT: store <4 x float> [[TMP5]], ptr [[TMP4]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], 20 -; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[INDEX]], 8 +; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[INDEX]], 12 +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds float, ptr [[B:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 0 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 4 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 8 +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds float, ptr [[TMP4]], i32 12 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !llvm.access.group [[ACC_GRP0:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x float>, ptr [[TMP9]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD2:%.*]] = load <4 x float>, ptr [[TMP10]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x float>, ptr [[TMP11]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[TMP0]] +; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP1]] +; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP2]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP3]] +; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 0 +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 4 +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 8 +; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds float, ptr [[TMP12]], i32 12 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <4 x float>, ptr [[TMP16]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <4 x float>, ptr [[TMP17]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[TMP18]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[TMP19]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[TMP20:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP21:%.*]] = fadd fast <4 x float> [[WIDE_LOAD1]], [[WIDE_LOAD5]] +; CHECK-NEXT: [[TMP22:%.*]] = fadd fast <4 x float> [[WIDE_LOAD2]], [[WIDE_LOAD6]] +; CHECK-NEXT: [[TMP23:%.*]] = fadd fast <4 x float> [[WIDE_LOAD3]], [[WIDE_LOAD7]] +; CHECK-NEXT: store <4 x float> [[TMP20]], ptr [[TMP16]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <4 x float> [[TMP21]], ptr [[TMP17]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <4 x float> [[TMP22]], ptr [[TMP18]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: store <4 x float> [[TMP23]], ptr [[TMP19]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 +; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i64 [[INDEX_NEXT]], 16 +; CHECK-NEXT: br i1 [[TMP24]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP1:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]] +; CHECK-NEXT: br i1 false, label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: -; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 20, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 16, [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.body: ; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP7:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[TMP25:%.*]] = load float, ptr [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[INDVARS_IV]] -; CHECK-NEXT: [[TMP8:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP0]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP7]], [[TMP8]] +; CHECK-NEXT: [[TMP26:%.*]] = load float, ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP0]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP25]], [[TMP26]] ; CHECK-NEXT: store float [[ADD]], ptr [[ARRAYIDX2]], align 4, !llvm.access.group [[ACC_GRP0]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 20 diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-loopid-dbg.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-loopid-dbg.ll index 4dfbdf1adee2c..d774f778b7fdc 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-loopid-dbg.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks-loopid-dbg.ll @@ -6,7 +6,7 @@ ; DEBUG-OUTPUT-NOT: .loc ; DEBUG-OUTPUT-NOT: {{.*}}.debug_info -; VECTORIZED: remark: vectorization-remarks.c:17:8: vectorized loop (vectorization width: 4, interleaved count: 1) +; VECTORIZED: remark: vectorization-remarks.c:17:8: vectorized loop (vectorization width: 4, interleaved count: 2) ; UNROLLED: remark: vectorization-remarks.c:17:8: interleaved loop (interleaved count: 4) ; NONE: remark: vectorization-remarks.c:17:8: loop not vectorized: vectorization and interleaving are explicitly disabled, or the loop has already been vectorized diff --git a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll index 63ed666eb6243..f0b960c640562 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/vectorization-remarks.ll @@ -6,7 +6,7 @@ ; DEBUG-OUTPUT-NOT: .loc ; DEBUG-OUTPUT-NOT: {{.*}}.debug_info -; VECTORIZED: remark: vectorization-remarks.c:17:8: vectorized loop (vectorization width: 4, interleaved count: 1) +; VECTORIZED: remark: vectorization-remarks.c:17:8: vectorized loop (vectorization width: 4, interleaved count: 2) ; UNROLLED: remark: vectorization-remarks.c:17:8: interleaved loop (interleaved count: 4) ; NONE: remark: vectorization-remarks.c:17:8: loop not vectorized: vectorization and interleaving are explicitly disabled, or the loop has already been vectorized diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll index 7b9f3a2e13a64..d18b207e87076 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll @@ -22,62 +22,62 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT27]] ; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[DCT]], align 2, !alias.scope !0, !noalias !3 -; CHECK-NEXT: [[TMP0:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32> -; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt <8 x i16> [[WIDE_LOAD]], zeroinitializer -; CHECK-NEXT: [[WIDE_LOAD28:%.*]] = load <8 x i16>, ptr [[BIAS]], align 2, !alias.scope !6 -; CHECK-NEXT: [[TMP2:%.*]] = zext <8 x i16> [[WIDE_LOAD28]] to <8 x i32> -; CHECK-NEXT: [[WIDE_LOAD29:%.*]] = load <8 x i16>, ptr [[MF]], align 2, !alias.scope !7 -; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i16> [[WIDE_LOAD29]] to <8 x i32> -; CHECK-NEXT: [[TMP4:%.*]] = sub nsw <8 x i32> [[TMP2]], [[TMP0]] -; CHECK-NEXT: [[TMP5:%.*]] = mul <8 x i32> [[TMP4]], [[TMP3]] -; CHECK-NEXT: [[TMP6:%.*]] = lshr <8 x i32> [[TMP5]], -; CHECK-NEXT: [[TMP7:%.*]] = trunc <8 x i32> [[TMP6]] to <8 x i16> -; CHECK-NEXT: [[TMP8:%.*]] = sub <8 x i16> zeroinitializer, [[TMP7]] -; CHECK-NEXT: [[TMP9:%.*]] = add nuw nsw <8 x i32> [[TMP2]], [[TMP0]] -; CHECK-NEXT: [[TMP10:%.*]] = mul <8 x i32> [[TMP9]], [[TMP3]] -; CHECK-NEXT: [[TMP11:%.*]] = lshr <8 x i32> [[TMP10]], -; CHECK-NEXT: [[TMP12:%.*]] = trunc <8 x i32> [[TMP11]] to <8 x i16> -; CHECK-NEXT: [[PREDPHI:%.*]] = select <8 x i1> [[TMP1]], <8 x i16> [[TMP12]], <8 x i16> [[TMP8]] -; CHECK-NEXT: store <8 x i16> [[PREDPHI]], ptr [[DCT]], align 2, !alias.scope !0, !noalias !3 -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 16 -; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <8 x i16>, ptr [[TMP13]], align 2, !alias.scope !0, !noalias !3 -; CHECK-NEXT: [[TMP14:%.*]] = sext <8 x i16> [[WIDE_LOAD_1]] to <8 x i32> -; CHECK-NEXT: [[TMP15:%.*]] = icmp sgt <8 x i16> [[WIDE_LOAD_1]], zeroinitializer -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 16 -; CHECK-NEXT: [[WIDE_LOAD28_1:%.*]] = load <8 x i16>, ptr [[TMP16]], align 2, !alias.scope !6 -; CHECK-NEXT: [[TMP17:%.*]] = zext <8 x i16> [[WIDE_LOAD28_1]] to <8 x i32> -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 16 -; CHECK-NEXT: [[WIDE_LOAD29_1:%.*]] = load <8 x i16>, ptr [[TMP18]], align 2, !alias.scope !7 -; CHECK-NEXT: [[TMP19:%.*]] = zext <8 x i16> [[WIDE_LOAD29_1]] to <8 x i32> -; CHECK-NEXT: [[TMP20:%.*]] = sub nsw <8 x i32> [[TMP17]], [[TMP14]] -; CHECK-NEXT: [[TMP21:%.*]] = mul <8 x i32> [[TMP20]], [[TMP19]] -; CHECK-NEXT: [[TMP22:%.*]] = lshr <8 x i32> [[TMP21]], -; CHECK-NEXT: [[TMP23:%.*]] = trunc <8 x i32> [[TMP22]] to <8 x i16> -; CHECK-NEXT: [[TMP24:%.*]] = sub <8 x i16> zeroinitializer, [[TMP23]] -; CHECK-NEXT: [[TMP25:%.*]] = add nuw nsw <8 x i32> [[TMP17]], [[TMP14]] -; CHECK-NEXT: [[TMP26:%.*]] = mul <8 x i32> [[TMP25]], [[TMP19]] -; CHECK-NEXT: [[TMP27:%.*]] = lshr <8 x i32> [[TMP26]], -; CHECK-NEXT: [[TMP28:%.*]] = trunc <8 x i32> [[TMP27]] to <8 x i16> -; CHECK-NEXT: [[PREDPHI_1:%.*]] = select <8 x i1> [[TMP15]], <8 x i16> [[TMP28]], <8 x i16> [[TMP24]] -; CHECK-NEXT: store <8 x i16> [[PREDPHI_1]], ptr [[TMP13]], align 2, !alias.scope !0, !noalias !3 -; CHECK-NEXT: [[TMP29:%.*]] = or <8 x i16> [[PREDPHI]], [[PREDPHI_1]] -; CHECK-NEXT: [[TMP30:%.*]] = sext <8 x i16> [[TMP29]] to <8 x i32> -; CHECK-NEXT: [[TMP31:%.*]] = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[TMP30]]) +; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 16 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[DCT]], align 2, !alias.scope [[META0:![0-9]+]], !noalias [[META3:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD29:%.*]] = load <8 x i16>, ptr [[TMP0]], align 2, !alias.scope [[META0]], !noalias [[META3]] +; CHECK-NEXT: [[TMP1:%.*]] = sext <8 x i16> [[WIDE_LOAD]] to <8 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i16> [[WIDE_LOAD29]] to <8 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <8 x i16> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = icmp sgt <8 x i16> [[WIDE_LOAD29]], zeroinitializer +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 16 +; CHECK-NEXT: [[WIDE_LOAD30:%.*]] = load <8 x i16>, ptr [[BIAS]], align 2, !alias.scope [[META6:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD31:%.*]] = load <8 x i16>, ptr [[TMP5]], align 2, !alias.scope [[META6]] +; CHECK-NEXT: [[TMP6:%.*]] = zext <8 x i16> [[WIDE_LOAD30]] to <8 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = zext <8 x i16> [[WIDE_LOAD31]] to <8 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 16 +; CHECK-NEXT: [[WIDE_LOAD32:%.*]] = load <8 x i16>, ptr [[MF]], align 2, !alias.scope [[META7:![0-9]+]] +; CHECK-NEXT: [[WIDE_LOAD33:%.*]] = load <8 x i16>, ptr [[TMP8]], align 2, !alias.scope [[META7]] +; CHECK-NEXT: [[TMP9:%.*]] = zext <8 x i16> [[WIDE_LOAD32]] to <8 x i32> +; CHECK-NEXT: [[TMP10:%.*]] = zext <8 x i16> [[WIDE_LOAD33]] to <8 x i32> +; CHECK-NEXT: [[TMP11:%.*]] = sub nsw <8 x i32> [[TMP6]], [[TMP1]] +; CHECK-NEXT: [[TMP12:%.*]] = sub nsw <8 x i32> [[TMP7]], [[TMP2]] +; CHECK-NEXT: [[TMP13:%.*]] = mul <8 x i32> [[TMP11]], [[TMP9]] +; CHECK-NEXT: [[TMP14:%.*]] = mul <8 x i32> [[TMP12]], [[TMP10]] +; CHECK-NEXT: [[TMP15:%.*]] = lshr <8 x i32> [[TMP13]], +; CHECK-NEXT: [[TMP16:%.*]] = lshr <8 x i32> [[TMP14]], +; CHECK-NEXT: [[TMP17:%.*]] = trunc <8 x i32> [[TMP15]] to <8 x i16> +; CHECK-NEXT: [[TMP18:%.*]] = trunc <8 x i32> [[TMP16]] to <8 x i16> +; CHECK-NEXT: [[TMP19:%.*]] = sub <8 x i16> zeroinitializer, [[TMP17]] +; CHECK-NEXT: [[TMP20:%.*]] = sub <8 x i16> zeroinitializer, [[TMP18]] +; CHECK-NEXT: [[TMP21:%.*]] = add nuw nsw <8 x i32> [[TMP6]], [[TMP1]] +; CHECK-NEXT: [[TMP22:%.*]] = add nuw nsw <8 x i32> [[TMP7]], [[TMP2]] +; CHECK-NEXT: [[TMP23:%.*]] = mul <8 x i32> [[TMP21]], [[TMP9]] +; CHECK-NEXT: [[TMP24:%.*]] = mul <8 x i32> [[TMP22]], [[TMP10]] +; CHECK-NEXT: [[TMP25:%.*]] = lshr <8 x i32> [[TMP23]], +; CHECK-NEXT: [[TMP26:%.*]] = lshr <8 x i32> [[TMP24]], +; CHECK-NEXT: [[TMP27:%.*]] = trunc <8 x i32> [[TMP25]] to <8 x i16> +; CHECK-NEXT: [[TMP28:%.*]] = trunc <8 x i32> [[TMP26]] to <8 x i16> +; CHECK-NEXT: [[PREDPHI:%.*]] = select <8 x i1> [[TMP3]], <8 x i16> [[TMP27]], <8 x i16> [[TMP19]] +; CHECK-NEXT: [[PREDPHI34:%.*]] = select <8 x i1> [[TMP4]], <8 x i16> [[TMP28]], <8 x i16> [[TMP20]] +; CHECK-NEXT: store <8 x i16> [[PREDPHI]], ptr [[DCT]], align 2, !alias.scope [[META0]], !noalias [[META3]] +; CHECK-NEXT: store <8 x i16> [[PREDPHI34]], ptr [[TMP0]], align 2, !alias.scope [[META0]], !noalias [[META3]] +; CHECK-NEXT: [[BIN_RDX35:%.*]] = or <8 x i16> [[PREDPHI34]], [[PREDPHI]] +; CHECK-NEXT: [[BIN_RDX:%.*]] = sext <8 x i16> [[BIN_RDX35]] to <8 x i32> +; CHECK-NEXT: [[TMP29:%.*]] = tail call i32 @llvm.vector.reduce.or.v8i32(<8 x i32> [[BIN_RDX]]) ; CHECK-NEXT: br label [[FOR_COND_CLEANUP:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[TMP31]], [[VECTOR_BODY]] ], [ [[OR_15:%.*]], [[IF_END_15:%.*]] ] +; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[TMP29]], [[VECTOR_BODY]] ], [ [[OR_15:%.*]], [[IF_END_15:%.*]] ] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[OR_LCSSA]], 0 ; CHECK-NEXT: [[LNOT_EXT:%.*]] = zext i1 [[TOBOOL]] to i32 ; CHECK-NEXT: ret i32 [[LNOT_EXT]] ; CHECK: for.body: -; CHECK-NEXT: [[TMP32:%.*]] = load i16, ptr [[DCT]], align 2 -; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP32]] to i32 -; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i16 [[TMP32]], 0 -; CHECK-NEXT: [[TMP33:%.*]] = load i16, ptr [[BIAS]], align 2 -; CHECK-NEXT: [[CONV5:%.*]] = zext i16 [[TMP33]] to i32 -; CHECK-NEXT: [[TMP34:%.*]] = load i16, ptr [[MF]], align 2 -; CHECK-NEXT: [[CONV11:%.*]] = zext i16 [[TMP34]] to i32 +; CHECK-NEXT: [[TMP30:%.*]] = load i16, ptr [[DCT]], align 2 +; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[TMP30]] to i32 +; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i16 [[TMP30]], 0 +; CHECK-NEXT: [[TMP31:%.*]] = load i16, ptr [[BIAS]], align 2 +; CHECK-NEXT: [[CONV5:%.*]] = zext i16 [[TMP31]] to i32 +; CHECK-NEXT: [[TMP32:%.*]] = load i16, ptr [[MF]], align 2 +; CHECK-NEXT: [[CONV11:%.*]] = zext i16 [[TMP32]] to i32 ; CHECK-NEXT: br i1 [[CMP1]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; CHECK: if.then: ; CHECK-NEXT: [[ADD:%.*]] = add nuw nsw i32 [[CONV5]], [[CONV]] @@ -89,29 +89,29 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK-NEXT: [[ADD21:%.*]] = sub nsw i32 [[CONV5]], [[CONV]] ; CHECK-NEXT: [[MUL25:%.*]] = mul i32 [[ADD21]], [[CONV11]] ; CHECK-NEXT: [[SHR26:%.*]] = lshr i32 [[MUL25]], 16 -; CHECK-NEXT: [[TMP35:%.*]] = trunc i32 [[SHR26]] to i16 -; CHECK-NEXT: [[CONV28:%.*]] = sub i16 0, [[TMP35]] +; CHECK-NEXT: [[TMP33:%.*]] = trunc i32 [[SHR26]] to i16 +; CHECK-NEXT: [[CONV28:%.*]] = sub i16 0, [[TMP33]] ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: ; CHECK-NEXT: [[STOREMERGE:%.*]] = phi i16 [ [[CONV28]], [[IF_ELSE]] ], [ [[CONV12]], [[IF_THEN]] ] ; CHECK-NEXT: store i16 [[STOREMERGE]], ptr [[DCT]], align 2 ; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 2 -; CHECK-NEXT: [[TMP36:%.*]] = load i16, ptr [[ARRAYIDX_1]], align 2 -; CHECK-NEXT: [[CONV_1:%.*]] = sext i16 [[TMP36]] to i32 -; CHECK-NEXT: [[CMP1_1:%.*]] = icmp sgt i16 [[TMP36]], 0 +; CHECK-NEXT: [[TMP34:%.*]] = load i16, ptr [[ARRAYIDX_1]], align 2 +; CHECK-NEXT: [[CONV_1:%.*]] = sext i16 [[TMP34]] to i32 +; CHECK-NEXT: [[CMP1_1:%.*]] = icmp sgt i16 [[TMP34]], 0 ; CHECK-NEXT: [[ARRAYIDX4_1:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 2 -; CHECK-NEXT: [[TMP37:%.*]] = load i16, ptr [[ARRAYIDX4_1]], align 2 -; CHECK-NEXT: [[CONV5_1:%.*]] = zext i16 [[TMP37]] to i32 +; CHECK-NEXT: [[TMP35:%.*]] = load i16, ptr [[ARRAYIDX4_1]], align 2 +; CHECK-NEXT: [[CONV5_1:%.*]] = zext i16 [[TMP35]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_1:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 2 -; CHECK-NEXT: [[TMP38:%.*]] = load i16, ptr [[ARRAYIDX10_1]], align 2 -; CHECK-NEXT: [[CONV11_1:%.*]] = zext i16 [[TMP38]] to i32 +; CHECK-NEXT: [[TMP36:%.*]] = load i16, ptr [[ARRAYIDX10_1]], align 2 +; CHECK-NEXT: [[CONV11_1:%.*]] = zext i16 [[TMP36]] to i32 ; CHECK-NEXT: br i1 [[CMP1_1]], label [[IF_THEN_1:%.*]], label [[IF_ELSE_1:%.*]] ; CHECK: if.else.1: ; CHECK-NEXT: [[ADD21_1:%.*]] = sub nsw i32 [[CONV5_1]], [[CONV_1]] ; CHECK-NEXT: [[MUL25_1:%.*]] = mul i32 [[ADD21_1]], [[CONV11_1]] ; CHECK-NEXT: [[SHR26_1:%.*]] = lshr i32 [[MUL25_1]], 16 -; CHECK-NEXT: [[TMP39:%.*]] = trunc i32 [[SHR26_1]] to i16 -; CHECK-NEXT: [[CONV28_1:%.*]] = sub i16 0, [[TMP39]] +; CHECK-NEXT: [[TMP37:%.*]] = trunc i32 [[SHR26_1]] to i16 +; CHECK-NEXT: [[CONV28_1:%.*]] = sub i16 0, [[TMP37]] ; CHECK-NEXT: br label [[IF_END_1:%.*]] ; CHECK: if.then.1: ; CHECK-NEXT: [[ADD_1:%.*]] = add nuw nsw i32 [[CONV5_1]], [[CONV_1]] @@ -122,24 +122,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.1: ; CHECK-NEXT: [[STOREMERGE_1:%.*]] = phi i16 [ [[CONV28_1]], [[IF_ELSE_1]] ], [ [[CONV12_1]], [[IF_THEN_1]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_1]], ptr [[ARRAYIDX_1]], align 2 -; CHECK-NEXT: [[OR_131:%.*]] = or i16 [[STOREMERGE]], [[STOREMERGE_1]] +; CHECK-NEXT: [[OR_137:%.*]] = or i16 [[STOREMERGE]], [[STOREMERGE_1]] ; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 4 -; CHECK-NEXT: [[TMP40:%.*]] = load i16, ptr [[ARRAYIDX_2]], align 2 -; CHECK-NEXT: [[CONV_2:%.*]] = sext i16 [[TMP40]] to i32 -; CHECK-NEXT: [[CMP1_2:%.*]] = icmp sgt i16 [[TMP40]], 0 +; CHECK-NEXT: [[TMP38:%.*]] = load i16, ptr [[ARRAYIDX_2]], align 2 +; CHECK-NEXT: [[CONV_2:%.*]] = sext i16 [[TMP38]] to i32 +; CHECK-NEXT: [[CMP1_2:%.*]] = icmp sgt i16 [[TMP38]], 0 ; CHECK-NEXT: [[ARRAYIDX4_2:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 4 -; CHECK-NEXT: [[TMP41:%.*]] = load i16, ptr [[ARRAYIDX4_2]], align 2 -; CHECK-NEXT: [[CONV5_2:%.*]] = zext i16 [[TMP41]] to i32 +; CHECK-NEXT: [[TMP39:%.*]] = load i16, ptr [[ARRAYIDX4_2]], align 2 +; CHECK-NEXT: [[CONV5_2:%.*]] = zext i16 [[TMP39]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_2:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 4 -; CHECK-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX10_2]], align 2 -; CHECK-NEXT: [[CONV11_2:%.*]] = zext i16 [[TMP42]] to i32 +; CHECK-NEXT: [[TMP40:%.*]] = load i16, ptr [[ARRAYIDX10_2]], align 2 +; CHECK-NEXT: [[CONV11_2:%.*]] = zext i16 [[TMP40]] to i32 ; CHECK-NEXT: br i1 [[CMP1_2]], label [[IF_THEN_2:%.*]], label [[IF_ELSE_2:%.*]] ; CHECK: if.else.2: ; CHECK-NEXT: [[ADD21_2:%.*]] = sub nsw i32 [[CONV5_2]], [[CONV_2]] ; CHECK-NEXT: [[MUL25_2:%.*]] = mul i32 [[ADD21_2]], [[CONV11_2]] ; CHECK-NEXT: [[SHR26_2:%.*]] = lshr i32 [[MUL25_2]], 16 -; CHECK-NEXT: [[TMP43:%.*]] = trunc i32 [[SHR26_2]] to i16 -; CHECK-NEXT: [[CONV28_2:%.*]] = sub i16 0, [[TMP43]] +; CHECK-NEXT: [[TMP41:%.*]] = trunc i32 [[SHR26_2]] to i16 +; CHECK-NEXT: [[CONV28_2:%.*]] = sub i16 0, [[TMP41]] ; CHECK-NEXT: br label [[IF_END_2:%.*]] ; CHECK: if.then.2: ; CHECK-NEXT: [[ADD_2:%.*]] = add nuw nsw i32 [[CONV5_2]], [[CONV_2]] @@ -150,24 +150,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.2: ; CHECK-NEXT: [[STOREMERGE_2:%.*]] = phi i16 [ [[CONV28_2]], [[IF_ELSE_2]] ], [ [[CONV12_2]], [[IF_THEN_2]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_2]], ptr [[ARRAYIDX_2]], align 2 -; CHECK-NEXT: [[OR_232:%.*]] = or i16 [[OR_131]], [[STOREMERGE_2]] +; CHECK-NEXT: [[OR_238:%.*]] = or i16 [[OR_137]], [[STOREMERGE_2]] ; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 6 -; CHECK-NEXT: [[TMP44:%.*]] = load i16, ptr [[ARRAYIDX_3]], align 2 -; CHECK-NEXT: [[CONV_3:%.*]] = sext i16 [[TMP44]] to i32 -; CHECK-NEXT: [[CMP1_3:%.*]] = icmp sgt i16 [[TMP44]], 0 +; CHECK-NEXT: [[TMP42:%.*]] = load i16, ptr [[ARRAYIDX_3]], align 2 +; CHECK-NEXT: [[CONV_3:%.*]] = sext i16 [[TMP42]] to i32 +; CHECK-NEXT: [[CMP1_3:%.*]] = icmp sgt i16 [[TMP42]], 0 ; CHECK-NEXT: [[ARRAYIDX4_3:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 6 -; CHECK-NEXT: [[TMP45:%.*]] = load i16, ptr [[ARRAYIDX4_3]], align 2 -; CHECK-NEXT: [[CONV5_3:%.*]] = zext i16 [[TMP45]] to i32 +; CHECK-NEXT: [[TMP43:%.*]] = load i16, ptr [[ARRAYIDX4_3]], align 2 +; CHECK-NEXT: [[CONV5_3:%.*]] = zext i16 [[TMP43]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_3:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 6 -; CHECK-NEXT: [[TMP46:%.*]] = load i16, ptr [[ARRAYIDX10_3]], align 2 -; CHECK-NEXT: [[CONV11_3:%.*]] = zext i16 [[TMP46]] to i32 +; CHECK-NEXT: [[TMP44:%.*]] = load i16, ptr [[ARRAYIDX10_3]], align 2 +; CHECK-NEXT: [[CONV11_3:%.*]] = zext i16 [[TMP44]] to i32 ; CHECK-NEXT: br i1 [[CMP1_3]], label [[IF_THEN_3:%.*]], label [[IF_ELSE_3:%.*]] ; CHECK: if.else.3: ; CHECK-NEXT: [[ADD21_3:%.*]] = sub nsw i32 [[CONV5_3]], [[CONV_3]] ; CHECK-NEXT: [[MUL25_3:%.*]] = mul i32 [[ADD21_3]], [[CONV11_3]] ; CHECK-NEXT: [[SHR26_3:%.*]] = lshr i32 [[MUL25_3]], 16 -; CHECK-NEXT: [[TMP47:%.*]] = trunc i32 [[SHR26_3]] to i16 -; CHECK-NEXT: [[CONV28_3:%.*]] = sub i16 0, [[TMP47]] +; CHECK-NEXT: [[TMP45:%.*]] = trunc i32 [[SHR26_3]] to i16 +; CHECK-NEXT: [[CONV28_3:%.*]] = sub i16 0, [[TMP45]] ; CHECK-NEXT: br label [[IF_END_3:%.*]] ; CHECK: if.then.3: ; CHECK-NEXT: [[ADD_3:%.*]] = add nuw nsw i32 [[CONV5_3]], [[CONV_3]] @@ -178,24 +178,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.3: ; CHECK-NEXT: [[STOREMERGE_3:%.*]] = phi i16 [ [[CONV28_3]], [[IF_ELSE_3]] ], [ [[CONV12_3]], [[IF_THEN_3]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_3]], ptr [[ARRAYIDX_3]], align 2 -; CHECK-NEXT: [[OR_333:%.*]] = or i16 [[OR_232]], [[STOREMERGE_3]] +; CHECK-NEXT: [[OR_339:%.*]] = or i16 [[OR_238]], [[STOREMERGE_3]] ; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 8 -; CHECK-NEXT: [[TMP48:%.*]] = load i16, ptr [[ARRAYIDX_4]], align 2 -; CHECK-NEXT: [[CONV_4:%.*]] = sext i16 [[TMP48]] to i32 -; CHECK-NEXT: [[CMP1_4:%.*]] = icmp sgt i16 [[TMP48]], 0 +; CHECK-NEXT: [[TMP46:%.*]] = load i16, ptr [[ARRAYIDX_4]], align 2 +; CHECK-NEXT: [[CONV_4:%.*]] = sext i16 [[TMP46]] to i32 +; CHECK-NEXT: [[CMP1_4:%.*]] = icmp sgt i16 [[TMP46]], 0 ; CHECK-NEXT: [[ARRAYIDX4_4:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 8 -; CHECK-NEXT: [[TMP49:%.*]] = load i16, ptr [[ARRAYIDX4_4]], align 2 -; CHECK-NEXT: [[CONV5_4:%.*]] = zext i16 [[TMP49]] to i32 +; CHECK-NEXT: [[TMP47:%.*]] = load i16, ptr [[ARRAYIDX4_4]], align 2 +; CHECK-NEXT: [[CONV5_4:%.*]] = zext i16 [[TMP47]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_4:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 8 -; CHECK-NEXT: [[TMP50:%.*]] = load i16, ptr [[ARRAYIDX10_4]], align 2 -; CHECK-NEXT: [[CONV11_4:%.*]] = zext i16 [[TMP50]] to i32 +; CHECK-NEXT: [[TMP48:%.*]] = load i16, ptr [[ARRAYIDX10_4]], align 2 +; CHECK-NEXT: [[CONV11_4:%.*]] = zext i16 [[TMP48]] to i32 ; CHECK-NEXT: br i1 [[CMP1_4]], label [[IF_THEN_4:%.*]], label [[IF_ELSE_4:%.*]] ; CHECK: if.else.4: ; CHECK-NEXT: [[ADD21_4:%.*]] = sub nsw i32 [[CONV5_4]], [[CONV_4]] ; CHECK-NEXT: [[MUL25_4:%.*]] = mul i32 [[ADD21_4]], [[CONV11_4]] ; CHECK-NEXT: [[SHR26_4:%.*]] = lshr i32 [[MUL25_4]], 16 -; CHECK-NEXT: [[TMP51:%.*]] = trunc i32 [[SHR26_4]] to i16 -; CHECK-NEXT: [[CONV28_4:%.*]] = sub i16 0, [[TMP51]] +; CHECK-NEXT: [[TMP49:%.*]] = trunc i32 [[SHR26_4]] to i16 +; CHECK-NEXT: [[CONV28_4:%.*]] = sub i16 0, [[TMP49]] ; CHECK-NEXT: br label [[IF_END_4:%.*]] ; CHECK: if.then.4: ; CHECK-NEXT: [[ADD_4:%.*]] = add nuw nsw i32 [[CONV5_4]], [[CONV_4]] @@ -206,24 +206,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.4: ; CHECK-NEXT: [[STOREMERGE_4:%.*]] = phi i16 [ [[CONV28_4]], [[IF_ELSE_4]] ], [ [[CONV12_4]], [[IF_THEN_4]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_4]], ptr [[ARRAYIDX_4]], align 2 -; CHECK-NEXT: [[OR_434:%.*]] = or i16 [[OR_333]], [[STOREMERGE_4]] +; CHECK-NEXT: [[OR_440:%.*]] = or i16 [[OR_339]], [[STOREMERGE_4]] ; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 10 -; CHECK-NEXT: [[TMP52:%.*]] = load i16, ptr [[ARRAYIDX_5]], align 2 -; CHECK-NEXT: [[CONV_5:%.*]] = sext i16 [[TMP52]] to i32 -; CHECK-NEXT: [[CMP1_5:%.*]] = icmp sgt i16 [[TMP52]], 0 +; CHECK-NEXT: [[TMP50:%.*]] = load i16, ptr [[ARRAYIDX_5]], align 2 +; CHECK-NEXT: [[CONV_5:%.*]] = sext i16 [[TMP50]] to i32 +; CHECK-NEXT: [[CMP1_5:%.*]] = icmp sgt i16 [[TMP50]], 0 ; CHECK-NEXT: [[ARRAYIDX4_5:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 10 -; CHECK-NEXT: [[TMP53:%.*]] = load i16, ptr [[ARRAYIDX4_5]], align 2 -; CHECK-NEXT: [[CONV5_5:%.*]] = zext i16 [[TMP53]] to i32 +; CHECK-NEXT: [[TMP51:%.*]] = load i16, ptr [[ARRAYIDX4_5]], align 2 +; CHECK-NEXT: [[CONV5_5:%.*]] = zext i16 [[TMP51]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_5:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 10 -; CHECK-NEXT: [[TMP54:%.*]] = load i16, ptr [[ARRAYIDX10_5]], align 2 -; CHECK-NEXT: [[CONV11_5:%.*]] = zext i16 [[TMP54]] to i32 +; CHECK-NEXT: [[TMP52:%.*]] = load i16, ptr [[ARRAYIDX10_5]], align 2 +; CHECK-NEXT: [[CONV11_5:%.*]] = zext i16 [[TMP52]] to i32 ; CHECK-NEXT: br i1 [[CMP1_5]], label [[IF_THEN_5:%.*]], label [[IF_ELSE_5:%.*]] ; CHECK: if.else.5: ; CHECK-NEXT: [[ADD21_5:%.*]] = sub nsw i32 [[CONV5_5]], [[CONV_5]] ; CHECK-NEXT: [[MUL25_5:%.*]] = mul i32 [[ADD21_5]], [[CONV11_5]] ; CHECK-NEXT: [[SHR26_5:%.*]] = lshr i32 [[MUL25_5]], 16 -; CHECK-NEXT: [[TMP55:%.*]] = trunc i32 [[SHR26_5]] to i16 -; CHECK-NEXT: [[CONV28_5:%.*]] = sub i16 0, [[TMP55]] +; CHECK-NEXT: [[TMP53:%.*]] = trunc i32 [[SHR26_5]] to i16 +; CHECK-NEXT: [[CONV28_5:%.*]] = sub i16 0, [[TMP53]] ; CHECK-NEXT: br label [[IF_END_5:%.*]] ; CHECK: if.then.5: ; CHECK-NEXT: [[ADD_5:%.*]] = add nuw nsw i32 [[CONV5_5]], [[CONV_5]] @@ -234,24 +234,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.5: ; CHECK-NEXT: [[STOREMERGE_5:%.*]] = phi i16 [ [[CONV28_5]], [[IF_ELSE_5]] ], [ [[CONV12_5]], [[IF_THEN_5]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_5]], ptr [[ARRAYIDX_5]], align 2 -; CHECK-NEXT: [[OR_535:%.*]] = or i16 [[OR_434]], [[STOREMERGE_5]] +; CHECK-NEXT: [[OR_541:%.*]] = or i16 [[OR_440]], [[STOREMERGE_5]] ; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 12 -; CHECK-NEXT: [[TMP56:%.*]] = load i16, ptr [[ARRAYIDX_6]], align 2 -; CHECK-NEXT: [[CONV_6:%.*]] = sext i16 [[TMP56]] to i32 -; CHECK-NEXT: [[CMP1_6:%.*]] = icmp sgt i16 [[TMP56]], 0 +; CHECK-NEXT: [[TMP54:%.*]] = load i16, ptr [[ARRAYIDX_6]], align 2 +; CHECK-NEXT: [[CONV_6:%.*]] = sext i16 [[TMP54]] to i32 +; CHECK-NEXT: [[CMP1_6:%.*]] = icmp sgt i16 [[TMP54]], 0 ; CHECK-NEXT: [[ARRAYIDX4_6:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 12 -; CHECK-NEXT: [[TMP57:%.*]] = load i16, ptr [[ARRAYIDX4_6]], align 2 -; CHECK-NEXT: [[CONV5_6:%.*]] = zext i16 [[TMP57]] to i32 +; CHECK-NEXT: [[TMP55:%.*]] = load i16, ptr [[ARRAYIDX4_6]], align 2 +; CHECK-NEXT: [[CONV5_6:%.*]] = zext i16 [[TMP55]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_6:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 12 -; CHECK-NEXT: [[TMP58:%.*]] = load i16, ptr [[ARRAYIDX10_6]], align 2 -; CHECK-NEXT: [[CONV11_6:%.*]] = zext i16 [[TMP58]] to i32 +; CHECK-NEXT: [[TMP56:%.*]] = load i16, ptr [[ARRAYIDX10_6]], align 2 +; CHECK-NEXT: [[CONV11_6:%.*]] = zext i16 [[TMP56]] to i32 ; CHECK-NEXT: br i1 [[CMP1_6]], label [[IF_THEN_6:%.*]], label [[IF_ELSE_6:%.*]] ; CHECK: if.else.6: ; CHECK-NEXT: [[ADD21_6:%.*]] = sub nsw i32 [[CONV5_6]], [[CONV_6]] ; CHECK-NEXT: [[MUL25_6:%.*]] = mul i32 [[ADD21_6]], [[CONV11_6]] ; CHECK-NEXT: [[SHR26_6:%.*]] = lshr i32 [[MUL25_6]], 16 -; CHECK-NEXT: [[TMP59:%.*]] = trunc i32 [[SHR26_6]] to i16 -; CHECK-NEXT: [[CONV28_6:%.*]] = sub i16 0, [[TMP59]] +; CHECK-NEXT: [[TMP57:%.*]] = trunc i32 [[SHR26_6]] to i16 +; CHECK-NEXT: [[CONV28_6:%.*]] = sub i16 0, [[TMP57]] ; CHECK-NEXT: br label [[IF_END_6:%.*]] ; CHECK: if.then.6: ; CHECK-NEXT: [[ADD_6:%.*]] = add nuw nsw i32 [[CONV5_6]], [[CONV_6]] @@ -262,24 +262,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.6: ; CHECK-NEXT: [[STOREMERGE_6:%.*]] = phi i16 [ [[CONV28_6]], [[IF_ELSE_6]] ], [ [[CONV12_6]], [[IF_THEN_6]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_6]], ptr [[ARRAYIDX_6]], align 2 -; CHECK-NEXT: [[OR_636:%.*]] = or i16 [[OR_535]], [[STOREMERGE_6]] +; CHECK-NEXT: [[OR_642:%.*]] = or i16 [[OR_541]], [[STOREMERGE_6]] ; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 14 -; CHECK-NEXT: [[TMP60:%.*]] = load i16, ptr [[ARRAYIDX_7]], align 2 -; CHECK-NEXT: [[CONV_7:%.*]] = sext i16 [[TMP60]] to i32 -; CHECK-NEXT: [[CMP1_7:%.*]] = icmp sgt i16 [[TMP60]], 0 +; CHECK-NEXT: [[TMP58:%.*]] = load i16, ptr [[ARRAYIDX_7]], align 2 +; CHECK-NEXT: [[CONV_7:%.*]] = sext i16 [[TMP58]] to i32 +; CHECK-NEXT: [[CMP1_7:%.*]] = icmp sgt i16 [[TMP58]], 0 ; CHECK-NEXT: [[ARRAYIDX4_7:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 14 -; CHECK-NEXT: [[TMP61:%.*]] = load i16, ptr [[ARRAYIDX4_7]], align 2 -; CHECK-NEXT: [[CONV5_7:%.*]] = zext i16 [[TMP61]] to i32 +; CHECK-NEXT: [[TMP59:%.*]] = load i16, ptr [[ARRAYIDX4_7]], align 2 +; CHECK-NEXT: [[CONV5_7:%.*]] = zext i16 [[TMP59]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_7:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 14 -; CHECK-NEXT: [[TMP62:%.*]] = load i16, ptr [[ARRAYIDX10_7]], align 2 -; CHECK-NEXT: [[CONV11_7:%.*]] = zext i16 [[TMP62]] to i32 +; CHECK-NEXT: [[TMP60:%.*]] = load i16, ptr [[ARRAYIDX10_7]], align 2 +; CHECK-NEXT: [[CONV11_7:%.*]] = zext i16 [[TMP60]] to i32 ; CHECK-NEXT: br i1 [[CMP1_7]], label [[IF_THEN_7:%.*]], label [[IF_ELSE_7:%.*]] ; CHECK: if.else.7: ; CHECK-NEXT: [[ADD21_7:%.*]] = sub nsw i32 [[CONV5_7]], [[CONV_7]] ; CHECK-NEXT: [[MUL25_7:%.*]] = mul i32 [[ADD21_7]], [[CONV11_7]] ; CHECK-NEXT: [[SHR26_7:%.*]] = lshr i32 [[MUL25_7]], 16 -; CHECK-NEXT: [[TMP63:%.*]] = trunc i32 [[SHR26_7]] to i16 -; CHECK-NEXT: [[CONV28_7:%.*]] = sub i16 0, [[TMP63]] +; CHECK-NEXT: [[TMP61:%.*]] = trunc i32 [[SHR26_7]] to i16 +; CHECK-NEXT: [[CONV28_7:%.*]] = sub i16 0, [[TMP61]] ; CHECK-NEXT: br label [[IF_END_7:%.*]] ; CHECK: if.then.7: ; CHECK-NEXT: [[ADD_7:%.*]] = add nuw nsw i32 [[CONV5_7]], [[CONV_7]] @@ -290,24 +290,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.7: ; CHECK-NEXT: [[STOREMERGE_7:%.*]] = phi i16 [ [[CONV28_7]], [[IF_ELSE_7]] ], [ [[CONV12_7]], [[IF_THEN_7]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_7]], ptr [[ARRAYIDX_7]], align 2 -; CHECK-NEXT: [[OR_737:%.*]] = or i16 [[OR_636]], [[STOREMERGE_7]] +; CHECK-NEXT: [[OR_743:%.*]] = or i16 [[OR_642]], [[STOREMERGE_7]] ; CHECK-NEXT: [[ARRAYIDX_8:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 16 -; CHECK-NEXT: [[TMP64:%.*]] = load i16, ptr [[ARRAYIDX_8]], align 2 -; CHECK-NEXT: [[CONV_8:%.*]] = sext i16 [[TMP64]] to i32 -; CHECK-NEXT: [[CMP1_8:%.*]] = icmp sgt i16 [[TMP64]], 0 +; CHECK-NEXT: [[TMP62:%.*]] = load i16, ptr [[ARRAYIDX_8]], align 2 +; CHECK-NEXT: [[CONV_8:%.*]] = sext i16 [[TMP62]] to i32 +; CHECK-NEXT: [[CMP1_8:%.*]] = icmp sgt i16 [[TMP62]], 0 ; CHECK-NEXT: [[ARRAYIDX4_8:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 16 -; CHECK-NEXT: [[TMP65:%.*]] = load i16, ptr [[ARRAYIDX4_8]], align 2 -; CHECK-NEXT: [[CONV5_8:%.*]] = zext i16 [[TMP65]] to i32 +; CHECK-NEXT: [[TMP63:%.*]] = load i16, ptr [[ARRAYIDX4_8]], align 2 +; CHECK-NEXT: [[CONV5_8:%.*]] = zext i16 [[TMP63]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_8:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 16 -; CHECK-NEXT: [[TMP66:%.*]] = load i16, ptr [[ARRAYIDX10_8]], align 2 -; CHECK-NEXT: [[CONV11_8:%.*]] = zext i16 [[TMP66]] to i32 +; CHECK-NEXT: [[TMP64:%.*]] = load i16, ptr [[ARRAYIDX10_8]], align 2 +; CHECK-NEXT: [[CONV11_8:%.*]] = zext i16 [[TMP64]] to i32 ; CHECK-NEXT: br i1 [[CMP1_8]], label [[IF_THEN_8:%.*]], label [[IF_ELSE_8:%.*]] ; CHECK: if.else.8: ; CHECK-NEXT: [[ADD21_8:%.*]] = sub nsw i32 [[CONV5_8]], [[CONV_8]] ; CHECK-NEXT: [[MUL25_8:%.*]] = mul i32 [[ADD21_8]], [[CONV11_8]] ; CHECK-NEXT: [[SHR26_8:%.*]] = lshr i32 [[MUL25_8]], 16 -; CHECK-NEXT: [[TMP67:%.*]] = trunc i32 [[SHR26_8]] to i16 -; CHECK-NEXT: [[CONV28_8:%.*]] = sub i16 0, [[TMP67]] +; CHECK-NEXT: [[TMP65:%.*]] = trunc i32 [[SHR26_8]] to i16 +; CHECK-NEXT: [[CONV28_8:%.*]] = sub i16 0, [[TMP65]] ; CHECK-NEXT: br label [[IF_END_8:%.*]] ; CHECK: if.then.8: ; CHECK-NEXT: [[ADD_8:%.*]] = add nuw nsw i32 [[CONV5_8]], [[CONV_8]] @@ -318,24 +318,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.8: ; CHECK-NEXT: [[STOREMERGE_8:%.*]] = phi i16 [ [[CONV28_8]], [[IF_ELSE_8]] ], [ [[CONV12_8]], [[IF_THEN_8]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_8]], ptr [[ARRAYIDX_8]], align 2 -; CHECK-NEXT: [[OR_838:%.*]] = or i16 [[OR_737]], [[STOREMERGE_8]] +; CHECK-NEXT: [[OR_844:%.*]] = or i16 [[OR_743]], [[STOREMERGE_8]] ; CHECK-NEXT: [[ARRAYIDX_9:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 18 -; CHECK-NEXT: [[TMP68:%.*]] = load i16, ptr [[ARRAYIDX_9]], align 2 -; CHECK-NEXT: [[CONV_9:%.*]] = sext i16 [[TMP68]] to i32 -; CHECK-NEXT: [[CMP1_9:%.*]] = icmp sgt i16 [[TMP68]], 0 +; CHECK-NEXT: [[TMP66:%.*]] = load i16, ptr [[ARRAYIDX_9]], align 2 +; CHECK-NEXT: [[CONV_9:%.*]] = sext i16 [[TMP66]] to i32 +; CHECK-NEXT: [[CMP1_9:%.*]] = icmp sgt i16 [[TMP66]], 0 ; CHECK-NEXT: [[ARRAYIDX4_9:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 18 -; CHECK-NEXT: [[TMP69:%.*]] = load i16, ptr [[ARRAYIDX4_9]], align 2 -; CHECK-NEXT: [[CONV5_9:%.*]] = zext i16 [[TMP69]] to i32 +; CHECK-NEXT: [[TMP67:%.*]] = load i16, ptr [[ARRAYIDX4_9]], align 2 +; CHECK-NEXT: [[CONV5_9:%.*]] = zext i16 [[TMP67]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_9:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 18 -; CHECK-NEXT: [[TMP70:%.*]] = load i16, ptr [[ARRAYIDX10_9]], align 2 -; CHECK-NEXT: [[CONV11_9:%.*]] = zext i16 [[TMP70]] to i32 +; CHECK-NEXT: [[TMP68:%.*]] = load i16, ptr [[ARRAYIDX10_9]], align 2 +; CHECK-NEXT: [[CONV11_9:%.*]] = zext i16 [[TMP68]] to i32 ; CHECK-NEXT: br i1 [[CMP1_9]], label [[IF_THEN_9:%.*]], label [[IF_ELSE_9:%.*]] ; CHECK: if.else.9: ; CHECK-NEXT: [[ADD21_9:%.*]] = sub nsw i32 [[CONV5_9]], [[CONV_9]] ; CHECK-NEXT: [[MUL25_9:%.*]] = mul i32 [[ADD21_9]], [[CONV11_9]] ; CHECK-NEXT: [[SHR26_9:%.*]] = lshr i32 [[MUL25_9]], 16 -; CHECK-NEXT: [[TMP71:%.*]] = trunc i32 [[SHR26_9]] to i16 -; CHECK-NEXT: [[CONV28_9:%.*]] = sub i16 0, [[TMP71]] +; CHECK-NEXT: [[TMP69:%.*]] = trunc i32 [[SHR26_9]] to i16 +; CHECK-NEXT: [[CONV28_9:%.*]] = sub i16 0, [[TMP69]] ; CHECK-NEXT: br label [[IF_END_9:%.*]] ; CHECK: if.then.9: ; CHECK-NEXT: [[ADD_9:%.*]] = add nuw nsw i32 [[CONV5_9]], [[CONV_9]] @@ -346,24 +346,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.9: ; CHECK-NEXT: [[STOREMERGE_9:%.*]] = phi i16 [ [[CONV28_9]], [[IF_ELSE_9]] ], [ [[CONV12_9]], [[IF_THEN_9]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_9]], ptr [[ARRAYIDX_9]], align 2 -; CHECK-NEXT: [[OR_939:%.*]] = or i16 [[OR_838]], [[STOREMERGE_9]] +; CHECK-NEXT: [[OR_945:%.*]] = or i16 [[OR_844]], [[STOREMERGE_9]] ; CHECK-NEXT: [[ARRAYIDX_10:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 20 -; CHECK-NEXT: [[TMP72:%.*]] = load i16, ptr [[ARRAYIDX_10]], align 2 -; CHECK-NEXT: [[CONV_10:%.*]] = sext i16 [[TMP72]] to i32 -; CHECK-NEXT: [[CMP1_10:%.*]] = icmp sgt i16 [[TMP72]], 0 +; CHECK-NEXT: [[TMP70:%.*]] = load i16, ptr [[ARRAYIDX_10]], align 2 +; CHECK-NEXT: [[CONV_10:%.*]] = sext i16 [[TMP70]] to i32 +; CHECK-NEXT: [[CMP1_10:%.*]] = icmp sgt i16 [[TMP70]], 0 ; CHECK-NEXT: [[ARRAYIDX4_10:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 20 -; CHECK-NEXT: [[TMP73:%.*]] = load i16, ptr [[ARRAYIDX4_10]], align 2 -; CHECK-NEXT: [[CONV5_10:%.*]] = zext i16 [[TMP73]] to i32 +; CHECK-NEXT: [[TMP71:%.*]] = load i16, ptr [[ARRAYIDX4_10]], align 2 +; CHECK-NEXT: [[CONV5_10:%.*]] = zext i16 [[TMP71]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_10:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 20 -; CHECK-NEXT: [[TMP74:%.*]] = load i16, ptr [[ARRAYIDX10_10]], align 2 -; CHECK-NEXT: [[CONV11_10:%.*]] = zext i16 [[TMP74]] to i32 +; CHECK-NEXT: [[TMP72:%.*]] = load i16, ptr [[ARRAYIDX10_10]], align 2 +; CHECK-NEXT: [[CONV11_10:%.*]] = zext i16 [[TMP72]] to i32 ; CHECK-NEXT: br i1 [[CMP1_10]], label [[IF_THEN_10:%.*]], label [[IF_ELSE_10:%.*]] ; CHECK: if.else.10: ; CHECK-NEXT: [[ADD21_10:%.*]] = sub nsw i32 [[CONV5_10]], [[CONV_10]] ; CHECK-NEXT: [[MUL25_10:%.*]] = mul i32 [[ADD21_10]], [[CONV11_10]] ; CHECK-NEXT: [[SHR26_10:%.*]] = lshr i32 [[MUL25_10]], 16 -; CHECK-NEXT: [[TMP75:%.*]] = trunc i32 [[SHR26_10]] to i16 -; CHECK-NEXT: [[CONV28_10:%.*]] = sub i16 0, [[TMP75]] +; CHECK-NEXT: [[TMP73:%.*]] = trunc i32 [[SHR26_10]] to i16 +; CHECK-NEXT: [[CONV28_10:%.*]] = sub i16 0, [[TMP73]] ; CHECK-NEXT: br label [[IF_END_10:%.*]] ; CHECK: if.then.10: ; CHECK-NEXT: [[ADD_10:%.*]] = add nuw nsw i32 [[CONV5_10]], [[CONV_10]] @@ -374,24 +374,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.10: ; CHECK-NEXT: [[STOREMERGE_10:%.*]] = phi i16 [ [[CONV28_10]], [[IF_ELSE_10]] ], [ [[CONV12_10]], [[IF_THEN_10]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_10]], ptr [[ARRAYIDX_10]], align 2 -; CHECK-NEXT: [[OR_1040:%.*]] = or i16 [[OR_939]], [[STOREMERGE_10]] +; CHECK-NEXT: [[OR_1046:%.*]] = or i16 [[OR_945]], [[STOREMERGE_10]] ; CHECK-NEXT: [[ARRAYIDX_11:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 22 -; CHECK-NEXT: [[TMP76:%.*]] = load i16, ptr [[ARRAYIDX_11]], align 2 -; CHECK-NEXT: [[CONV_11:%.*]] = sext i16 [[TMP76]] to i32 -; CHECK-NEXT: [[CMP1_11:%.*]] = icmp sgt i16 [[TMP76]], 0 +; CHECK-NEXT: [[TMP74:%.*]] = load i16, ptr [[ARRAYIDX_11]], align 2 +; CHECK-NEXT: [[CONV_11:%.*]] = sext i16 [[TMP74]] to i32 +; CHECK-NEXT: [[CMP1_11:%.*]] = icmp sgt i16 [[TMP74]], 0 ; CHECK-NEXT: [[ARRAYIDX4_11:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 22 -; CHECK-NEXT: [[TMP77:%.*]] = load i16, ptr [[ARRAYIDX4_11]], align 2 -; CHECK-NEXT: [[CONV5_11:%.*]] = zext i16 [[TMP77]] to i32 +; CHECK-NEXT: [[TMP75:%.*]] = load i16, ptr [[ARRAYIDX4_11]], align 2 +; CHECK-NEXT: [[CONV5_11:%.*]] = zext i16 [[TMP75]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_11:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 22 -; CHECK-NEXT: [[TMP78:%.*]] = load i16, ptr [[ARRAYIDX10_11]], align 2 -; CHECK-NEXT: [[CONV11_11:%.*]] = zext i16 [[TMP78]] to i32 +; CHECK-NEXT: [[TMP76:%.*]] = load i16, ptr [[ARRAYIDX10_11]], align 2 +; CHECK-NEXT: [[CONV11_11:%.*]] = zext i16 [[TMP76]] to i32 ; CHECK-NEXT: br i1 [[CMP1_11]], label [[IF_THEN_11:%.*]], label [[IF_ELSE_11:%.*]] ; CHECK: if.else.11: ; CHECK-NEXT: [[ADD21_11:%.*]] = sub nsw i32 [[CONV5_11]], [[CONV_11]] ; CHECK-NEXT: [[MUL25_11:%.*]] = mul i32 [[ADD21_11]], [[CONV11_11]] ; CHECK-NEXT: [[SHR26_11:%.*]] = lshr i32 [[MUL25_11]], 16 -; CHECK-NEXT: [[TMP79:%.*]] = trunc i32 [[SHR26_11]] to i16 -; CHECK-NEXT: [[CONV28_11:%.*]] = sub i16 0, [[TMP79]] +; CHECK-NEXT: [[TMP77:%.*]] = trunc i32 [[SHR26_11]] to i16 +; CHECK-NEXT: [[CONV28_11:%.*]] = sub i16 0, [[TMP77]] ; CHECK-NEXT: br label [[IF_END_11:%.*]] ; CHECK: if.then.11: ; CHECK-NEXT: [[ADD_11:%.*]] = add nuw nsw i32 [[CONV5_11]], [[CONV_11]] @@ -402,24 +402,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.11: ; CHECK-NEXT: [[STOREMERGE_11:%.*]] = phi i16 [ [[CONV28_11]], [[IF_ELSE_11]] ], [ [[CONV12_11]], [[IF_THEN_11]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_11]], ptr [[ARRAYIDX_11]], align 2 -; CHECK-NEXT: [[OR_1141:%.*]] = or i16 [[OR_1040]], [[STOREMERGE_11]] +; CHECK-NEXT: [[OR_1147:%.*]] = or i16 [[OR_1046]], [[STOREMERGE_11]] ; CHECK-NEXT: [[ARRAYIDX_12:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 24 -; CHECK-NEXT: [[TMP80:%.*]] = load i16, ptr [[ARRAYIDX_12]], align 2 -; CHECK-NEXT: [[CONV_12:%.*]] = sext i16 [[TMP80]] to i32 -; CHECK-NEXT: [[CMP1_12:%.*]] = icmp sgt i16 [[TMP80]], 0 +; CHECK-NEXT: [[TMP78:%.*]] = load i16, ptr [[ARRAYIDX_12]], align 2 +; CHECK-NEXT: [[CONV_12:%.*]] = sext i16 [[TMP78]] to i32 +; CHECK-NEXT: [[CMP1_12:%.*]] = icmp sgt i16 [[TMP78]], 0 ; CHECK-NEXT: [[ARRAYIDX4_12:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 24 -; CHECK-NEXT: [[TMP81:%.*]] = load i16, ptr [[ARRAYIDX4_12]], align 2 -; CHECK-NEXT: [[CONV5_12:%.*]] = zext i16 [[TMP81]] to i32 +; CHECK-NEXT: [[TMP79:%.*]] = load i16, ptr [[ARRAYIDX4_12]], align 2 +; CHECK-NEXT: [[CONV5_12:%.*]] = zext i16 [[TMP79]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_12:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 24 -; CHECK-NEXT: [[TMP82:%.*]] = load i16, ptr [[ARRAYIDX10_12]], align 2 -; CHECK-NEXT: [[CONV11_12:%.*]] = zext i16 [[TMP82]] to i32 +; CHECK-NEXT: [[TMP80:%.*]] = load i16, ptr [[ARRAYIDX10_12]], align 2 +; CHECK-NEXT: [[CONV11_12:%.*]] = zext i16 [[TMP80]] to i32 ; CHECK-NEXT: br i1 [[CMP1_12]], label [[IF_THEN_12:%.*]], label [[IF_ELSE_12:%.*]] ; CHECK: if.else.12: ; CHECK-NEXT: [[ADD21_12:%.*]] = sub nsw i32 [[CONV5_12]], [[CONV_12]] ; CHECK-NEXT: [[MUL25_12:%.*]] = mul i32 [[ADD21_12]], [[CONV11_12]] ; CHECK-NEXT: [[SHR26_12:%.*]] = lshr i32 [[MUL25_12]], 16 -; CHECK-NEXT: [[TMP83:%.*]] = trunc i32 [[SHR26_12]] to i16 -; CHECK-NEXT: [[CONV28_12:%.*]] = sub i16 0, [[TMP83]] +; CHECK-NEXT: [[TMP81:%.*]] = trunc i32 [[SHR26_12]] to i16 +; CHECK-NEXT: [[CONV28_12:%.*]] = sub i16 0, [[TMP81]] ; CHECK-NEXT: br label [[IF_END_12:%.*]] ; CHECK: if.then.12: ; CHECK-NEXT: [[ADD_12:%.*]] = add nuw nsw i32 [[CONV5_12]], [[CONV_12]] @@ -430,24 +430,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.12: ; CHECK-NEXT: [[STOREMERGE_12:%.*]] = phi i16 [ [[CONV28_12]], [[IF_ELSE_12]] ], [ [[CONV12_12]], [[IF_THEN_12]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_12]], ptr [[ARRAYIDX_12]], align 2 -; CHECK-NEXT: [[OR_1242:%.*]] = or i16 [[OR_1141]], [[STOREMERGE_12]] +; CHECK-NEXT: [[OR_1248:%.*]] = or i16 [[OR_1147]], [[STOREMERGE_12]] ; CHECK-NEXT: [[ARRAYIDX_13:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 26 -; CHECK-NEXT: [[TMP84:%.*]] = load i16, ptr [[ARRAYIDX_13]], align 2 -; CHECK-NEXT: [[CONV_13:%.*]] = sext i16 [[TMP84]] to i32 -; CHECK-NEXT: [[CMP1_13:%.*]] = icmp sgt i16 [[TMP84]], 0 +; CHECK-NEXT: [[TMP82:%.*]] = load i16, ptr [[ARRAYIDX_13]], align 2 +; CHECK-NEXT: [[CONV_13:%.*]] = sext i16 [[TMP82]] to i32 +; CHECK-NEXT: [[CMP1_13:%.*]] = icmp sgt i16 [[TMP82]], 0 ; CHECK-NEXT: [[ARRAYIDX4_13:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 26 -; CHECK-NEXT: [[TMP85:%.*]] = load i16, ptr [[ARRAYIDX4_13]], align 2 -; CHECK-NEXT: [[CONV5_13:%.*]] = zext i16 [[TMP85]] to i32 +; CHECK-NEXT: [[TMP83:%.*]] = load i16, ptr [[ARRAYIDX4_13]], align 2 +; CHECK-NEXT: [[CONV5_13:%.*]] = zext i16 [[TMP83]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_13:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 26 -; CHECK-NEXT: [[TMP86:%.*]] = load i16, ptr [[ARRAYIDX10_13]], align 2 -; CHECK-NEXT: [[CONV11_13:%.*]] = zext i16 [[TMP86]] to i32 +; CHECK-NEXT: [[TMP84:%.*]] = load i16, ptr [[ARRAYIDX10_13]], align 2 +; CHECK-NEXT: [[CONV11_13:%.*]] = zext i16 [[TMP84]] to i32 ; CHECK-NEXT: br i1 [[CMP1_13]], label [[IF_THEN_13:%.*]], label [[IF_ELSE_13:%.*]] ; CHECK: if.else.13: ; CHECK-NEXT: [[ADD21_13:%.*]] = sub nsw i32 [[CONV5_13]], [[CONV_13]] ; CHECK-NEXT: [[MUL25_13:%.*]] = mul i32 [[ADD21_13]], [[CONV11_13]] ; CHECK-NEXT: [[SHR26_13:%.*]] = lshr i32 [[MUL25_13]], 16 -; CHECK-NEXT: [[TMP87:%.*]] = trunc i32 [[SHR26_13]] to i16 -; CHECK-NEXT: [[CONV28_13:%.*]] = sub i16 0, [[TMP87]] +; CHECK-NEXT: [[TMP85:%.*]] = trunc i32 [[SHR26_13]] to i16 +; CHECK-NEXT: [[CONV28_13:%.*]] = sub i16 0, [[TMP85]] ; CHECK-NEXT: br label [[IF_END_13:%.*]] ; CHECK: if.then.13: ; CHECK-NEXT: [[ADD_13:%.*]] = add nuw nsw i32 [[CONV5_13]], [[CONV_13]] @@ -458,24 +458,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.13: ; CHECK-NEXT: [[STOREMERGE_13:%.*]] = phi i16 [ [[CONV28_13]], [[IF_ELSE_13]] ], [ [[CONV12_13]], [[IF_THEN_13]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_13]], ptr [[ARRAYIDX_13]], align 2 -; CHECK-NEXT: [[OR_1343:%.*]] = or i16 [[OR_1242]], [[STOREMERGE_13]] +; CHECK-NEXT: [[OR_1349:%.*]] = or i16 [[OR_1248]], [[STOREMERGE_13]] ; CHECK-NEXT: [[ARRAYIDX_14:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 28 -; CHECK-NEXT: [[TMP88:%.*]] = load i16, ptr [[ARRAYIDX_14]], align 2 -; CHECK-NEXT: [[CONV_14:%.*]] = sext i16 [[TMP88]] to i32 -; CHECK-NEXT: [[CMP1_14:%.*]] = icmp sgt i16 [[TMP88]], 0 +; CHECK-NEXT: [[TMP86:%.*]] = load i16, ptr [[ARRAYIDX_14]], align 2 +; CHECK-NEXT: [[CONV_14:%.*]] = sext i16 [[TMP86]] to i32 +; CHECK-NEXT: [[CMP1_14:%.*]] = icmp sgt i16 [[TMP86]], 0 ; CHECK-NEXT: [[ARRAYIDX4_14:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 28 -; CHECK-NEXT: [[TMP89:%.*]] = load i16, ptr [[ARRAYIDX4_14]], align 2 -; CHECK-NEXT: [[CONV5_14:%.*]] = zext i16 [[TMP89]] to i32 +; CHECK-NEXT: [[TMP87:%.*]] = load i16, ptr [[ARRAYIDX4_14]], align 2 +; CHECK-NEXT: [[CONV5_14:%.*]] = zext i16 [[TMP87]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_14:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 28 -; CHECK-NEXT: [[TMP90:%.*]] = load i16, ptr [[ARRAYIDX10_14]], align 2 -; CHECK-NEXT: [[CONV11_14:%.*]] = zext i16 [[TMP90]] to i32 +; CHECK-NEXT: [[TMP88:%.*]] = load i16, ptr [[ARRAYIDX10_14]], align 2 +; CHECK-NEXT: [[CONV11_14:%.*]] = zext i16 [[TMP88]] to i32 ; CHECK-NEXT: br i1 [[CMP1_14]], label [[IF_THEN_14:%.*]], label [[IF_ELSE_14:%.*]] ; CHECK: if.else.14: ; CHECK-NEXT: [[ADD21_14:%.*]] = sub nsw i32 [[CONV5_14]], [[CONV_14]] ; CHECK-NEXT: [[MUL25_14:%.*]] = mul i32 [[ADD21_14]], [[CONV11_14]] ; CHECK-NEXT: [[SHR26_14:%.*]] = lshr i32 [[MUL25_14]], 16 -; CHECK-NEXT: [[TMP91:%.*]] = trunc i32 [[SHR26_14]] to i16 -; CHECK-NEXT: [[CONV28_14:%.*]] = sub i16 0, [[TMP91]] +; CHECK-NEXT: [[TMP89:%.*]] = trunc i32 [[SHR26_14]] to i16 +; CHECK-NEXT: [[CONV28_14:%.*]] = sub i16 0, [[TMP89]] ; CHECK-NEXT: br label [[IF_END_14:%.*]] ; CHECK: if.then.14: ; CHECK-NEXT: [[ADD_14:%.*]] = add nuw nsw i32 [[CONV5_14]], [[CONV_14]] @@ -486,24 +486,24 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.14: ; CHECK-NEXT: [[STOREMERGE_14:%.*]] = phi i16 [ [[CONV28_14]], [[IF_ELSE_14]] ], [ [[CONV12_14]], [[IF_THEN_14]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_14]], ptr [[ARRAYIDX_14]], align 2 -; CHECK-NEXT: [[OR_1444:%.*]] = or i16 [[OR_1343]], [[STOREMERGE_14]] +; CHECK-NEXT: [[OR_1450:%.*]] = or i16 [[OR_1349]], [[STOREMERGE_14]] ; CHECK-NEXT: [[ARRAYIDX_15:%.*]] = getelementptr inbounds i8, ptr [[DCT]], i64 30 -; CHECK-NEXT: [[TMP92:%.*]] = load i16, ptr [[ARRAYIDX_15]], align 2 -; CHECK-NEXT: [[CONV_15:%.*]] = sext i16 [[TMP92]] to i32 -; CHECK-NEXT: [[CMP1_15:%.*]] = icmp sgt i16 [[TMP92]], 0 +; CHECK-NEXT: [[TMP90:%.*]] = load i16, ptr [[ARRAYIDX_15]], align 2 +; CHECK-NEXT: [[CONV_15:%.*]] = sext i16 [[TMP90]] to i32 +; CHECK-NEXT: [[CMP1_15:%.*]] = icmp sgt i16 [[TMP90]], 0 ; CHECK-NEXT: [[ARRAYIDX4_15:%.*]] = getelementptr inbounds i8, ptr [[BIAS]], i64 30 -; CHECK-NEXT: [[TMP93:%.*]] = load i16, ptr [[ARRAYIDX4_15]], align 2 -; CHECK-NEXT: [[CONV5_15:%.*]] = zext i16 [[TMP93]] to i32 +; CHECK-NEXT: [[TMP91:%.*]] = load i16, ptr [[ARRAYIDX4_15]], align 2 +; CHECK-NEXT: [[CONV5_15:%.*]] = zext i16 [[TMP91]] to i32 ; CHECK-NEXT: [[ARRAYIDX10_15:%.*]] = getelementptr inbounds i8, ptr [[MF]], i64 30 -; CHECK-NEXT: [[TMP94:%.*]] = load i16, ptr [[ARRAYIDX10_15]], align 2 -; CHECK-NEXT: [[CONV11_15:%.*]] = zext i16 [[TMP94]] to i32 +; CHECK-NEXT: [[TMP92:%.*]] = load i16, ptr [[ARRAYIDX10_15]], align 2 +; CHECK-NEXT: [[CONV11_15:%.*]] = zext i16 [[TMP92]] to i32 ; CHECK-NEXT: br i1 [[CMP1_15]], label [[IF_THEN_15:%.*]], label [[IF_ELSE_15:%.*]] ; CHECK: if.else.15: ; CHECK-NEXT: [[ADD21_15:%.*]] = sub nsw i32 [[CONV5_15]], [[CONV_15]] ; CHECK-NEXT: [[MUL25_15:%.*]] = mul i32 [[ADD21_15]], [[CONV11_15]] ; CHECK-NEXT: [[SHR26_15:%.*]] = lshr i32 [[MUL25_15]], 16 -; CHECK-NEXT: [[TMP95:%.*]] = trunc i32 [[SHR26_15]] to i16 -; CHECK-NEXT: [[CONV28_15:%.*]] = sub i16 0, [[TMP95]] +; CHECK-NEXT: [[TMP93:%.*]] = trunc i32 [[SHR26_15]] to i16 +; CHECK-NEXT: [[CONV28_15:%.*]] = sub i16 0, [[TMP93]] ; CHECK-NEXT: br label [[IF_END_15]] ; CHECK: if.then.15: ; CHECK-NEXT: [[ADD_15:%.*]] = add nuw nsw i32 [[CONV5_15]], [[CONV_15]] @@ -514,8 +514,8 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK: if.end.15: ; CHECK-NEXT: [[STOREMERGE_15:%.*]] = phi i16 [ [[CONV28_15]], [[IF_ELSE_15]] ], [ [[CONV12_15]], [[IF_THEN_15]] ] ; CHECK-NEXT: store i16 [[STOREMERGE_15]], ptr [[ARRAYIDX_15]], align 2 -; CHECK-NEXT: [[OR_1545:%.*]] = or i16 [[OR_1444]], [[STOREMERGE_15]] -; CHECK-NEXT: [[OR_15]] = sext i16 [[OR_1545]] to i32 +; CHECK-NEXT: [[OR_1551:%.*]] = or i16 [[OR_1450]], [[STOREMERGE_15]] +; CHECK-NEXT: [[OR_15]] = sext i16 [[OR_1551]] to i32 ; CHECK-NEXT: br label [[FOR_COND_CLEANUP]] ; entry: diff --git a/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll b/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll index fd6bb61d0369d..741e3ad4f7b98 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/excessive-unrolling.ll @@ -13,124 +13,124 @@ define void @test_known_trip_count() { ; CHECK-LABEL: @test_known_trip_count( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr @b, align 16 -; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <2 x double>, ptr @c, align 16 -; CHECK-NEXT: [[TMP0:%.*]] = fadd <2 x double> [[WIDE_LOAD]], [[WIDE_LOAD3]] +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 2), align 16 +; CHECK-NEXT: [[WIDE_LOAD4:%.*]] = load <2 x double>, ptr @c, align 16 +; CHECK-NEXT: [[WIDE_LOAD5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 2), align 16 +; CHECK-NEXT: [[TMP0:%.*]] = fadd <2 x double> [[WIDE_LOAD]], [[WIDE_LOAD4]] +; CHECK-NEXT: [[TMP1:%.*]] = fadd <2 x double> [[WIDE_LOAD3]], [[WIDE_LOAD5]] ; CHECK-NEXT: store <2 x double> [[TMP0]], ptr @a, align 16 -; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 2), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 2), align 16 -; CHECK-NEXT: [[TMP1:%.*]] = fadd <2 x double> [[WIDE_LOAD_1]], [[WIDE_LOAD3_1]] ; CHECK-NEXT: store <2 x double> [[TMP1]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 2), align 16 -; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 4), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_2:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 4), align 16 -; CHECK-NEXT: [[TMP2:%.*]] = fadd <2 x double> [[WIDE_LOAD_2]], [[WIDE_LOAD3_2]] +; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 4), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 6), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 4), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_1:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 6), align 16 +; CHECK-NEXT: [[TMP2:%.*]] = fadd <2 x double> [[WIDE_LOAD_1]], [[WIDE_LOAD4_1]] +; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[WIDE_LOAD3_1]], [[WIDE_LOAD5_1]] ; CHECK-NEXT: store <2 x double> [[TMP2]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 4), align 16 -; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 6), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 6), align 16 -; CHECK-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[WIDE_LOAD_3]], [[WIDE_LOAD3_3]] ; CHECK-NEXT: store <2 x double> [[TMP3]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 6), align 16 -; CHECK-NEXT: [[WIDE_LOAD_4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 8), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 8), align 16 -; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[WIDE_LOAD_4]], [[WIDE_LOAD3_4]] +; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 8), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_2:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 10), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_2:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 8), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_2:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 10), align 16 +; CHECK-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[WIDE_LOAD_2]], [[WIDE_LOAD4_2]] +; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[WIDE_LOAD3_2]], [[WIDE_LOAD5_2]] ; CHECK-NEXT: store <2 x double> [[TMP4]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 8), align 16 -; CHECK-NEXT: [[WIDE_LOAD_5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 10), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 10), align 16 -; CHECK-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[WIDE_LOAD_5]], [[WIDE_LOAD3_5]] ; CHECK-NEXT: store <2 x double> [[TMP5]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 10), align 16 -; CHECK-NEXT: [[WIDE_LOAD_6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 12), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 12), align 16 -; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> [[WIDE_LOAD_6]], [[WIDE_LOAD3_6]] +; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 12), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 14), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 12), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_3:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 14), align 16 +; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> [[WIDE_LOAD_3]], [[WIDE_LOAD4_3]] +; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[WIDE_LOAD3_3]], [[WIDE_LOAD5_3]] ; CHECK-NEXT: store <2 x double> [[TMP6]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 12), align 16 -; CHECK-NEXT: [[WIDE_LOAD_7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 14), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 14), align 16 -; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[WIDE_LOAD_7]], [[WIDE_LOAD3_7]] ; CHECK-NEXT: store <2 x double> [[TMP7]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 14), align 16 -; CHECK-NEXT: [[WIDE_LOAD_8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 16), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 16), align 16 -; CHECK-NEXT: [[TMP8:%.*]] = fadd <2 x double> [[WIDE_LOAD_8]], [[WIDE_LOAD3_8]] +; CHECK-NEXT: [[WIDE_LOAD_4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 16), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 18), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 16), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_4:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 18), align 16 +; CHECK-NEXT: [[TMP8:%.*]] = fadd <2 x double> [[WIDE_LOAD_4]], [[WIDE_LOAD4_4]] +; CHECK-NEXT: [[TMP9:%.*]] = fadd <2 x double> [[WIDE_LOAD3_4]], [[WIDE_LOAD5_4]] ; CHECK-NEXT: store <2 x double> [[TMP8]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 16), align 16 -; CHECK-NEXT: [[WIDE_LOAD_9:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 18), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_9:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 18), align 16 -; CHECK-NEXT: [[TMP9:%.*]] = fadd <2 x double> [[WIDE_LOAD_9]], [[WIDE_LOAD3_9]] ; CHECK-NEXT: store <2 x double> [[TMP9]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 18), align 16 -; CHECK-NEXT: [[WIDE_LOAD_10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 20), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 20), align 16 -; CHECK-NEXT: [[TMP10:%.*]] = fadd <2 x double> [[WIDE_LOAD_10]], [[WIDE_LOAD3_10]] +; CHECK-NEXT: [[WIDE_LOAD_5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 20), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 22), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 20), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_5:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 22), align 16 +; CHECK-NEXT: [[TMP10:%.*]] = fadd <2 x double> [[WIDE_LOAD_5]], [[WIDE_LOAD4_5]] +; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[WIDE_LOAD3_5]], [[WIDE_LOAD5_5]] ; CHECK-NEXT: store <2 x double> [[TMP10]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 20), align 16 -; CHECK-NEXT: [[WIDE_LOAD_11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 22), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 22), align 16 -; CHECK-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[WIDE_LOAD_11]], [[WIDE_LOAD3_11]] ; CHECK-NEXT: store <2 x double> [[TMP11]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 22), align 16 -; CHECK-NEXT: [[WIDE_LOAD_12:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 24), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_12:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 24), align 16 -; CHECK-NEXT: [[TMP12:%.*]] = fadd <2 x double> [[WIDE_LOAD_12]], [[WIDE_LOAD3_12]] +; CHECK-NEXT: [[WIDE_LOAD_6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 24), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 26), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 24), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_6:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 26), align 16 +; CHECK-NEXT: [[TMP12:%.*]] = fadd <2 x double> [[WIDE_LOAD_6]], [[WIDE_LOAD4_6]] +; CHECK-NEXT: [[TMP13:%.*]] = fadd <2 x double> [[WIDE_LOAD3_6]], [[WIDE_LOAD5_6]] ; CHECK-NEXT: store <2 x double> [[TMP12]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 24), align 16 -; CHECK-NEXT: [[WIDE_LOAD_13:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 26), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_13:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 26), align 16 -; CHECK-NEXT: [[TMP13:%.*]] = fadd <2 x double> [[WIDE_LOAD_13]], [[WIDE_LOAD3_13]] ; CHECK-NEXT: store <2 x double> [[TMP13]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 26), align 16 -; CHECK-NEXT: [[WIDE_LOAD_14:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 28), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_14:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 28), align 16 -; CHECK-NEXT: [[TMP14:%.*]] = fadd <2 x double> [[WIDE_LOAD_14]], [[WIDE_LOAD3_14]] +; CHECK-NEXT: [[WIDE_LOAD_7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 28), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 30), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 28), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_7:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 30), align 16 +; CHECK-NEXT: [[TMP14:%.*]] = fadd <2 x double> [[WIDE_LOAD_7]], [[WIDE_LOAD4_7]] +; CHECK-NEXT: [[TMP15:%.*]] = fadd <2 x double> [[WIDE_LOAD3_7]], [[WIDE_LOAD5_7]] ; CHECK-NEXT: store <2 x double> [[TMP14]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 28), align 16 -; CHECK-NEXT: [[WIDE_LOAD_15:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 30), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_15:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 30), align 16 -; CHECK-NEXT: [[TMP15:%.*]] = fadd <2 x double> [[WIDE_LOAD_15]], [[WIDE_LOAD3_15]] ; CHECK-NEXT: store <2 x double> [[TMP15]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 30), align 16 -; CHECK-NEXT: [[WIDE_LOAD_16:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 32), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_16:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 32), align 16 -; CHECK-NEXT: [[TMP16:%.*]] = fadd <2 x double> [[WIDE_LOAD_16]], [[WIDE_LOAD3_16]] +; CHECK-NEXT: [[WIDE_LOAD_8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 32), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 34), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 32), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_8:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 34), align 16 +; CHECK-NEXT: [[TMP16:%.*]] = fadd <2 x double> [[WIDE_LOAD_8]], [[WIDE_LOAD4_8]] +; CHECK-NEXT: [[TMP17:%.*]] = fadd <2 x double> [[WIDE_LOAD3_8]], [[WIDE_LOAD5_8]] ; CHECK-NEXT: store <2 x double> [[TMP16]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 32), align 16 -; CHECK-NEXT: [[WIDE_LOAD_17:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 34), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_17:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 34), align 16 -; CHECK-NEXT: [[TMP17:%.*]] = fadd <2 x double> [[WIDE_LOAD_17]], [[WIDE_LOAD3_17]] ; CHECK-NEXT: store <2 x double> [[TMP17]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 34), align 16 -; CHECK-NEXT: [[WIDE_LOAD_18:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 36), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_18:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 36), align 16 -; CHECK-NEXT: [[TMP18:%.*]] = fadd <2 x double> [[WIDE_LOAD_18]], [[WIDE_LOAD3_18]] +; CHECK-NEXT: [[WIDE_LOAD_9:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 36), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_9:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 38), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_9:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 36), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_9:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 38), align 16 +; CHECK-NEXT: [[TMP18:%.*]] = fadd <2 x double> [[WIDE_LOAD_9]], [[WIDE_LOAD4_9]] +; CHECK-NEXT: [[TMP19:%.*]] = fadd <2 x double> [[WIDE_LOAD3_9]], [[WIDE_LOAD5_9]] ; CHECK-NEXT: store <2 x double> [[TMP18]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 36), align 16 -; CHECK-NEXT: [[WIDE_LOAD_19:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 38), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_19:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 38), align 16 -; CHECK-NEXT: [[TMP19:%.*]] = fadd <2 x double> [[WIDE_LOAD_19]], [[WIDE_LOAD3_19]] ; CHECK-NEXT: store <2 x double> [[TMP19]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 38), align 16 -; CHECK-NEXT: [[WIDE_LOAD_20:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 40), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_20:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 40), align 16 -; CHECK-NEXT: [[TMP20:%.*]] = fadd <2 x double> [[WIDE_LOAD_20]], [[WIDE_LOAD3_20]] +; CHECK-NEXT: [[WIDE_LOAD_10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 40), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 42), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 40), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_10:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 42), align 16 +; CHECK-NEXT: [[TMP20:%.*]] = fadd <2 x double> [[WIDE_LOAD_10]], [[WIDE_LOAD4_10]] +; CHECK-NEXT: [[TMP21:%.*]] = fadd <2 x double> [[WIDE_LOAD3_10]], [[WIDE_LOAD5_10]] ; CHECK-NEXT: store <2 x double> [[TMP20]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 40), align 16 -; CHECK-NEXT: [[WIDE_LOAD_21:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 42), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_21:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 42), align 16 -; CHECK-NEXT: [[TMP21:%.*]] = fadd <2 x double> [[WIDE_LOAD_21]], [[WIDE_LOAD3_21]] ; CHECK-NEXT: store <2 x double> [[TMP21]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 42), align 16 -; CHECK-NEXT: [[WIDE_LOAD_22:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 44), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_22:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 44), align 16 -; CHECK-NEXT: [[TMP22:%.*]] = fadd <2 x double> [[WIDE_LOAD_22]], [[WIDE_LOAD3_22]] +; CHECK-NEXT: [[WIDE_LOAD_11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 44), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 46), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 44), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_11:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 46), align 16 +; CHECK-NEXT: [[TMP22:%.*]] = fadd <2 x double> [[WIDE_LOAD_11]], [[WIDE_LOAD4_11]] +; CHECK-NEXT: [[TMP23:%.*]] = fadd <2 x double> [[WIDE_LOAD3_11]], [[WIDE_LOAD5_11]] ; CHECK-NEXT: store <2 x double> [[TMP22]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 44), align 16 -; CHECK-NEXT: [[WIDE_LOAD_23:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 46), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_23:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 46), align 16 -; CHECK-NEXT: [[TMP23:%.*]] = fadd <2 x double> [[WIDE_LOAD_23]], [[WIDE_LOAD3_23]] ; CHECK-NEXT: store <2 x double> [[TMP23]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 46), align 16 -; CHECK-NEXT: [[WIDE_LOAD_24:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 48), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_24:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 48), align 16 -; CHECK-NEXT: [[TMP24:%.*]] = fadd <2 x double> [[WIDE_LOAD_24]], [[WIDE_LOAD3_24]] +; CHECK-NEXT: [[WIDE_LOAD_12:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 48), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_12:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 50), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_12:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 48), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_12:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 50), align 16 +; CHECK-NEXT: [[TMP24:%.*]] = fadd <2 x double> [[WIDE_LOAD_12]], [[WIDE_LOAD4_12]] +; CHECK-NEXT: [[TMP25:%.*]] = fadd <2 x double> [[WIDE_LOAD3_12]], [[WIDE_LOAD5_12]] ; CHECK-NEXT: store <2 x double> [[TMP24]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 48), align 16 -; CHECK-NEXT: [[WIDE_LOAD_25:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 50), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_25:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 50), align 16 -; CHECK-NEXT: [[TMP25:%.*]] = fadd <2 x double> [[WIDE_LOAD_25]], [[WIDE_LOAD3_25]] ; CHECK-NEXT: store <2 x double> [[TMP25]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 50), align 16 -; CHECK-NEXT: [[WIDE_LOAD_26:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 52), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_26:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 52), align 16 -; CHECK-NEXT: [[TMP26:%.*]] = fadd <2 x double> [[WIDE_LOAD_26]], [[WIDE_LOAD3_26]] +; CHECK-NEXT: [[WIDE_LOAD_13:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 52), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_13:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 54), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_13:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 52), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_13:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 54), align 16 +; CHECK-NEXT: [[TMP26:%.*]] = fadd <2 x double> [[WIDE_LOAD_13]], [[WIDE_LOAD4_13]] +; CHECK-NEXT: [[TMP27:%.*]] = fadd <2 x double> [[WIDE_LOAD3_13]], [[WIDE_LOAD5_13]] ; CHECK-NEXT: store <2 x double> [[TMP26]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 52), align 16 -; CHECK-NEXT: [[WIDE_LOAD_27:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 54), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_27:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 54), align 16 -; CHECK-NEXT: [[TMP27:%.*]] = fadd <2 x double> [[WIDE_LOAD_27]], [[WIDE_LOAD3_27]] ; CHECK-NEXT: store <2 x double> [[TMP27]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 54), align 16 -; CHECK-NEXT: [[WIDE_LOAD_28:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 56), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_28:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 56), align 16 -; CHECK-NEXT: [[TMP28:%.*]] = fadd <2 x double> [[WIDE_LOAD_28]], [[WIDE_LOAD3_28]] +; CHECK-NEXT: [[WIDE_LOAD_14:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 0, i64 56), align 16 +; CHECK-NEXT: [[WIDE_LOAD3_14:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 1, i64 0), align 16 +; CHECK-NEXT: [[WIDE_LOAD4_14:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 0, i64 56), align 16 +; CHECK-NEXT: [[WIDE_LOAD5_14:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 1, i64 0), align 16 +; CHECK-NEXT: [[TMP28:%.*]] = fadd <2 x double> [[WIDE_LOAD_14]], [[WIDE_LOAD4_14]] +; CHECK-NEXT: [[TMP29:%.*]] = fadd <2 x double> [[WIDE_LOAD3_14]], [[WIDE_LOAD5_14]] ; CHECK-NEXT: store <2 x double> [[TMP28]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 0, i64 56), align 16 -; CHECK-NEXT: [[WIDE_LOAD_29:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @b, i64 1, i64 0), align 16 -; CHECK-NEXT: [[WIDE_LOAD3_29:%.*]] = load <2 x double>, ptr getelementptr inbounds ([58 x double], ptr @c, i64 1, i64 0), align 16 -; CHECK-NEXT: [[TMP29:%.*]] = fadd <2 x double> [[WIDE_LOAD_29]], [[WIDE_LOAD3_29]] ; CHECK-NEXT: store <2 x double> [[TMP29]], ptr getelementptr inbounds ([58 x double], ptr @a, i64 1, i64 0), align 16 ; CHECK-NEXT: [[TMP30:%.*]] = load double, ptr getelementptr inbounds ([58 x double], ptr @b, i64 1, i64 2), align 16 ; CHECK-NEXT: [[TMP31:%.*]] = load double, ptr getelementptr inbounds ([58 x double], ptr @c, i64 1, i64 2), align 16