diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1-bfloat.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1-bfloat.c index cbc645d429e5c..aaf4e652cd145 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1-bfloat.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1-bfloat.c @@ -44,7 +44,7 @@ svbfloat16_t test_svld1_bf16(svbool_t pg, const bfloat16_t *base) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8bf16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: ret [[TMP4]] @@ -54,7 +54,7 @@ svbfloat16_t test_svld1_bf16(svbool_t pg, const bfloat16_t *base) MODE_ATTR // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8bf16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP4]] diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1.c index 0c5ab6c9aea9f..276ef64736bc3 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1.c @@ -209,7 +209,7 @@ svfloat64_t test_svld1_f64(svbool_t pg, const float64_t *base) MODE_ATTR // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, [[PG:%.*]], zeroinitializer) // CHECK-NEXT: ret [[TMP3]] @@ -218,7 +218,7 @@ svfloat64_t test_svld1_f64(svbool_t pg, const float64_t *base) MODE_ATTR // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, [[PG:%.*]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP3]] @@ -233,7 +233,7 @@ svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) MODE_ // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: ret [[TMP4]] @@ -243,7 +243,7 @@ svint8_t test_svld1_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) MODE_ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP4]] @@ -258,7 +258,7 @@ svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) MO // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: ret [[TMP4]] @@ -268,7 +268,7 @@ svint16_t test_svld1_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) MO // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP4]] @@ -283,7 +283,7 @@ svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) MO // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: ret [[TMP4]] @@ -293,7 +293,7 @@ svint32_t test_svld1_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) MO // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP4]] @@ -307,7 +307,7 @@ svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) MO // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, [[PG:%.*]], zeroinitializer) // CHECK-NEXT: ret [[TMP3]] @@ -316,7 +316,7 @@ svint64_t test_svld1_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) MO // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.masked.load.nxv16i8.p0(ptr [[TMP2]], i32 1, [[PG:%.*]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP3]] @@ -331,7 +331,7 @@ svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) MOD // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: ret [[TMP4]] @@ -341,7 +341,7 @@ svuint8_t test_svld1_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) MOD // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP4]] @@ -356,7 +356,7 @@ svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: ret [[TMP4]] @@ -366,7 +366,7 @@ svuint16_t test_svld1_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP4]] @@ -381,7 +381,7 @@ svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: ret [[TMP4]] @@ -391,7 +391,7 @@ svuint32_t test_svld1_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i64.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP4]] @@ -406,7 +406,7 @@ svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8f16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: ret [[TMP4]] @@ -416,7 +416,7 @@ svuint64_t test_svld1_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8f16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP4]] @@ -431,7 +431,7 @@ svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4f32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: ret [[TMP4]] @@ -441,7 +441,7 @@ svfloat16_t test_svld1_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4f32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP4]] @@ -456,7 +456,7 @@ svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2f64.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: ret [[TMP4]] @@ -466,7 +466,7 @@ svfloat32_t test_svld1_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2f64.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: ret [[TMP4]] diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sb.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sb.c index 59d1e103db389..2757f2873cc83 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sb.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sb.c @@ -141,7 +141,7 @@ svuint64_t test_svld1sb_u64(svbool_t pg, const int8_t *base) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -152,7 +152,7 @@ svuint64_t test_svld1sb_u64(svbool_t pg, const int8_t *base) MODE_ATTR // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -168,7 +168,7 @@ svint16_t test_svld1sb_vnum_s16(svbool_t pg, const int8_t *base, int64_t vnum) M // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -179,7 +179,7 @@ svint16_t test_svld1sb_vnum_s16(svbool_t pg, const int8_t *base, int64_t vnum) M // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -195,7 +195,7 @@ svint32_t test_svld1sb_vnum_s32(svbool_t pg, const int8_t *base, int64_t vnum) M // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -206,7 +206,7 @@ svint32_t test_svld1sb_vnum_s32(svbool_t pg, const int8_t *base, int64_t vnum) M // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -222,7 +222,7 @@ svint64_t test_svld1sb_vnum_s64(svbool_t pg, const int8_t *base, int64_t vnum) M // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -233,7 +233,7 @@ svint64_t test_svld1sb_vnum_s64(svbool_t pg, const int8_t *base, int64_t vnum) M // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -249,7 +249,7 @@ svuint16_t test_svld1sb_vnum_u16(svbool_t pg, const int8_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -260,7 +260,7 @@ svuint16_t test_svld1sb_vnum_u16(svbool_t pg, const int8_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -276,7 +276,7 @@ svuint32_t test_svld1sb_vnum_u32(svbool_t pg, const int8_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -287,7 +287,7 @@ svuint32_t test_svld1sb_vnum_u32(svbool_t pg, const int8_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -298,7 +298,7 @@ svuint64_t test_svld1sb_vnum_u64(svbool_t pg, const int8_t *base, int64_t vnum) return svld1sb_vnum_u64(pg, base, vnum); } -#ifndef __ARM_FEATURE_SME +#ifndef __ARM_FEATURE_SME // CHECK-LABEL: @test_svld1sb_gather_u32base_s32( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sh.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sh.c index 1df3f6adbc1c6..dbc762fb8632a 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sh.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sh.c @@ -103,7 +103,7 @@ svuint64_t test_svld1sh_u64(svbool_t pg, const int16_t *base) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -114,7 +114,7 @@ svuint64_t test_svld1sh_u64(svbool_t pg, const int16_t *base) MODE_ATTR // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -130,7 +130,7 @@ svint32_t test_svld1sh_vnum_s32(svbool_t pg, const int16_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -141,7 +141,7 @@ svint32_t test_svld1sh_vnum_s32(svbool_t pg, const int16_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -157,7 +157,7 @@ svint64_t test_svld1sh_vnum_s64(svbool_t pg, const int16_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -168,7 +168,7 @@ svint64_t test_svld1sh_vnum_s64(svbool_t pg, const int16_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -184,7 +184,7 @@ svuint32_t test_svld1sh_vnum_u32(svbool_t pg, const int16_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -195,7 +195,7 @@ svuint32_t test_svld1sh_vnum_u32(svbool_t pg, const int16_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -206,7 +206,7 @@ svuint64_t test_svld1sh_vnum_u64(svbool_t pg, const int16_t *base, int64_t vnum) return svld1sh_vnum_u64(pg, base, vnum); } -#ifndef __ARM_FEATURE_SME +#ifndef __ARM_FEATURE_SME // CHECK-LABEL: @test_svld1sh_gather_u32base_s32( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sw.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sw.c index e7d77e62d44c1..575d2141d2815 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sw.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1sw.c @@ -65,7 +65,7 @@ svuint64_t test_svld1sw_u64(svbool_t pg, const int32_t *base) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -76,7 +76,7 @@ svuint64_t test_svld1sw_u64(svbool_t pg, const int32_t *base) MODE_ATTR // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -92,7 +92,7 @@ svint64_t test_svld1sw_vnum_s64(svbool_t pg, const int32_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -103,7 +103,7 @@ svint64_t test_svld1sw_vnum_s64(svbool_t pg, const int32_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = sext [[TMP4]] to @@ -114,7 +114,7 @@ svuint64_t test_svld1sw_vnum_u64(svbool_t pg, const int32_t *base, int64_t vnum) return svld1sw_vnum_u64(pg, base, vnum); } -#ifndef __ARM_FEATURE_SME +#ifndef __ARM_FEATURE_SME // CHECK-LABEL: @test_svld1sw_gather_u64base_s64( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1ub.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1ub.c index 31906b4e5f646..07e88152a6f53 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1ub.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1ub.c @@ -141,7 +141,7 @@ svuint64_t test_svld1ub_u64(svbool_t pg, const uint8_t *base) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -152,7 +152,7 @@ svuint64_t test_svld1ub_u64(svbool_t pg, const uint8_t *base) MODE_ATTR // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -168,7 +168,7 @@ svint16_t test_svld1ub_vnum_s16(svbool_t pg, const uint8_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -179,7 +179,7 @@ svint16_t test_svld1ub_vnum_s16(svbool_t pg, const uint8_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -195,7 +195,7 @@ svint32_t test_svld1ub_vnum_s32(svbool_t pg, const uint8_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -206,7 +206,7 @@ svint32_t test_svld1ub_vnum_s32(svbool_t pg, const uint8_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -222,7 +222,7 @@ svint64_t test_svld1ub_vnum_s64(svbool_t pg, const uint8_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -233,7 +233,7 @@ svint64_t test_svld1ub_vnum_s64(svbool_t pg, const uint8_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv8i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -249,7 +249,7 @@ svuint16_t test_svld1ub_vnum_u16(svbool_t pg, const uint8_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -260,7 +260,7 @@ svuint16_t test_svld1ub_vnum_u16(svbool_t pg, const uint8_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -276,7 +276,7 @@ svuint32_t test_svld1ub_vnum_u32(svbool_t pg, const uint8_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -287,7 +287,7 @@ svuint32_t test_svld1ub_vnum_u32(svbool_t pg, const uint8_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i8.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -298,7 +298,7 @@ svuint64_t test_svld1ub_vnum_u64(svbool_t pg, const uint8_t *base, int64_t vnum) return svld1ub_vnum_u64(pg, base, vnum); } -#ifndef __ARM_FEATURE_SME +#ifndef __ARM_FEATURE_SME // CHECK-LABEL: @test_svld1ub_gather_u32base_s32( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1uh.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1uh.c index e6553e193109f..6d91c1ecd7c7a 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1uh.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1uh.c @@ -103,7 +103,7 @@ svuint64_t test_svld1uh_u64(svbool_t pg, const uint16_t *base) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -114,7 +114,7 @@ svuint64_t test_svld1uh_u64(svbool_t pg, const uint16_t *base) MODE_ATTR // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -130,7 +130,7 @@ svint32_t test_svld1uh_vnum_s32(svbool_t pg, const uint16_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -141,7 +141,7 @@ svint32_t test_svld1uh_vnum_s32(svbool_t pg, const uint16_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -157,7 +157,7 @@ svint64_t test_svld1uh_vnum_s64(svbool_t pg, const uint16_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -168,7 +168,7 @@ svint64_t test_svld1uh_vnum_s64(svbool_t pg, const uint16_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv4i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -184,7 +184,7 @@ svuint32_t test_svld1uh_vnum_u32(svbool_t pg, const uint16_t *base, int64_t vnum // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -195,7 +195,7 @@ svuint32_t test_svld1uh_vnum_u32(svbool_t pg, const uint16_t *base, int64_t vnum // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i16.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -206,7 +206,7 @@ svuint64_t test_svld1uh_vnum_u64(svbool_t pg, const uint16_t *base, int64_t vnum return svld1uh_vnum_u64(pg, base, vnum); } -#ifndef __ARM_FEATURE_SME +#ifndef __ARM_FEATURE_SME // CHECK-LABEL: @test_svld1uh_gather_u32base_s32( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1uw.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1uw.c index b7ffb86daac23..7be23987aedf5 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1uw.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_ld1uw.c @@ -65,7 +65,7 @@ svuint64_t test_svld1uw_u64(svbool_t pg, const uint32_t *base) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -76,7 +76,7 @@ svuint64_t test_svld1uw_u64(svbool_t pg, const uint32_t *base) MODE_ATTR // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -92,7 +92,7 @@ svint64_t test_svld1uw_vnum_s64(svbool_t pg, const uint32_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -103,7 +103,7 @@ svint64_t test_svld1uw_vnum_s64(svbool_t pg, const uint32_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.masked.load.nxv2i32.p0(ptr [[TMP3]], i32 1, [[TMP0]], zeroinitializer) // CPP-CHECK-NEXT: [[TMP5:%.*]] = zext [[TMP4]] to @@ -114,7 +114,7 @@ svuint64_t test_svld1uw_vnum_u64(svbool_t pg, const uint32_t *base, int64_t vnum return svld1uw_vnum_u64(pg, base, vnum); } -#ifndef __ARM_FEATURE_SME +#ifndef __ARM_FEATURE_SME // CHECK-LABEL: @test_svld1uw_gather_u64base_s64( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1-bfloat.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1-bfloat.c index c1254e03102d7..1d194626418a2 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1-bfloat.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1-bfloat.c @@ -45,7 +45,7 @@ void test_svst1_bf16(svbool_t pg, bfloat16_t *base, svbfloat16_t data) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CHECK-NEXT: ret void @@ -55,7 +55,7 @@ void test_svst1_bf16(svbool_t pg, bfloat16_t *base, svbfloat16_t data) MODE_ATTR // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8bf16.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CPP-CHECK-NEXT: ret void diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1.c index 519f0c90614a5..29afdaf3eb0c7 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1.c @@ -209,7 +209,7 @@ void test_svst1_f64(svbool_t pg, float64_t *base, svfloat64_t data) MODE_ATTR // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0( [[DATA:%.*]], ptr [[TMP2]], i32 1, [[PG:%.*]]) // CHECK-NEXT: ret void @@ -218,7 +218,7 @@ void test_svst1_f64(svbool_t pg, float64_t *base, svfloat64_t data) MODE_ATTR // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0( [[DATA:%.*]], ptr [[TMP2]], i32 1, [[PG:%.*]]) // CPP-CHECK-NEXT: ret void @@ -233,7 +233,7 @@ void test_svst1_vnum_s8(svbool_t pg, int8_t *base, int64_t vnum, svint8_t data) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CHECK-NEXT: ret void @@ -243,7 +243,7 @@ void test_svst1_vnum_s8(svbool_t pg, int8_t *base, int64_t vnum, svint8_t data) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CPP-CHECK-NEXT: ret void @@ -258,7 +258,7 @@ void test_svst1_vnum_s16(svbool_t pg, int16_t *base, int64_t vnum, svint16_t dat // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CHECK-NEXT: ret void @@ -268,7 +268,7 @@ void test_svst1_vnum_s16(svbool_t pg, int16_t *base, int64_t vnum, svint16_t dat // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CPP-CHECK-NEXT: ret void @@ -283,7 +283,7 @@ void test_svst1_vnum_s32(svbool_t pg, int32_t *base, int64_t vnum, svint32_t dat // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CHECK-NEXT: ret void @@ -293,7 +293,7 @@ void test_svst1_vnum_s32(svbool_t pg, int32_t *base, int64_t vnum, svint32_t dat // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CPP-CHECK-NEXT: ret void @@ -307,7 +307,7 @@ void test_svst1_vnum_s64(svbool_t pg, int64_t *base, int64_t vnum, svint64_t dat // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0( [[DATA:%.*]], ptr [[TMP2]], i32 1, [[PG:%.*]]) // CHECK-NEXT: ret void @@ -316,7 +316,7 @@ void test_svst1_vnum_s64(svbool_t pg, int64_t *base, int64_t vnum, svint64_t dat // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0( [[DATA:%.*]], ptr [[TMP2]], i32 1, [[PG:%.*]]) // CPP-CHECK-NEXT: ret void @@ -331,7 +331,7 @@ void test_svst1_vnum_u8(svbool_t pg, uint8_t *base, int64_t vnum, svuint8_t data // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CHECK-NEXT: ret void @@ -341,7 +341,7 @@ void test_svst1_vnum_u8(svbool_t pg, uint8_t *base, int64_t vnum, svuint8_t data // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8i16.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CPP-CHECK-NEXT: ret void @@ -356,7 +356,7 @@ void test_svst1_vnum_u16(svbool_t pg, uint16_t *base, int64_t vnum, svuint16_t d // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CHECK-NEXT: ret void @@ -366,7 +366,7 @@ void test_svst1_vnum_u16(svbool_t pg, uint16_t *base, int64_t vnum, svuint16_t d // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4i32.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CPP-CHECK-NEXT: ret void @@ -381,7 +381,7 @@ void test_svst1_vnum_u32(svbool_t pg, uint32_t *base, int64_t vnum, svuint32_t d // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CHECK-NEXT: ret void @@ -391,7 +391,7 @@ void test_svst1_vnum_u32(svbool_t pg, uint32_t *base, int64_t vnum, svuint32_t d // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2i64.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CPP-CHECK-NEXT: ret void @@ -406,7 +406,7 @@ void test_svst1_vnum_u64(svbool_t pg, uint64_t *base, int64_t vnum, svuint64_t d // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CHECK-NEXT: ret void @@ -416,7 +416,7 @@ void test_svst1_vnum_u64(svbool_t pg, uint64_t *base, int64_t vnum, svuint64_t d // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv8f16.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CPP-CHECK-NEXT: ret void @@ -431,7 +431,7 @@ void test_svst1_vnum_f16(svbool_t pg, float16_t *base, int64_t vnum, svfloat16_t // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CHECK-NEXT: ret void @@ -441,7 +441,7 @@ void test_svst1_vnum_f16(svbool_t pg, float16_t *base, int64_t vnum, svfloat16_t // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv4f32.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CPP-CHECK-NEXT: ret void @@ -456,7 +456,7 @@ void test_svst1_vnum_f32(svbool_t pg, float32_t *base, int64_t vnum, svfloat32_t // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CHECK-NEXT: ret void @@ -466,7 +466,7 @@ void test_svst1_vnum_f32(svbool_t pg, float32_t *base, int64_t vnum, svfloat32_t // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.masked.store.nxv2f64.p0( [[DATA:%.*]], ptr [[TMP3]], i32 1, [[TMP0]]) // CPP-CHECK-NEXT: ret void @@ -476,7 +476,7 @@ void test_svst1_vnum_f64(svbool_t pg, float64_t *base, int64_t vnum, svfloat64_t return SVE_ACLE_FUNC(svst1_vnum,_f64,,)(pg, base, vnum, data); } -#ifndef __ARM_FEATURE_SME +#ifndef __ARM_FEATURE_SME // CHECK-LABEL: @test_svst1_scatter_u32base_s32( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1b.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1b.c index 152f01aab7405..c908bc2a483ce 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1b.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1b.c @@ -97,7 +97,7 @@ void test_svst1b_u64(svbool_t pg, uint8_t *base, svuint64_t data) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -113,7 +113,7 @@ void test_svst1b_vnum_s16(svbool_t pg, int8_t *base, int64_t vnum, svint16_t dat // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -129,7 +129,7 @@ void test_svst1b_vnum_s32(svbool_t pg, int8_t *base, int64_t vnum, svint32_t dat // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -145,7 +145,7 @@ void test_svst1b_vnum_s64(svbool_t pg, int8_t *base, int64_t vnum, svint64_t dat // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -161,7 +161,7 @@ void test_svst1b_vnum_u16(svbool_t pg, uint8_t *base, int64_t vnum, svuint16_t d // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -177,7 +177,7 @@ void test_svst1b_vnum_u32(svbool_t pg, uint8_t *base, int64_t vnum, svuint32_t d // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 1 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -188,7 +188,7 @@ void test_svst1b_vnum_u64(svbool_t pg, uint8_t *base, int64_t vnum, svuint64_t d return SVE_ACLE_FUNC(svst1b_vnum,_u64,,)(pg, base, vnum, data); } -#ifndef __ARM_FEATURE_SME +#ifndef __ARM_FEATURE_SME // CHECK-LABEL: @test_svst1b_scatter_u32base_s32( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1h.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1h.c index 9aa450f2e5457..959b658425f01 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1h.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1h.c @@ -73,7 +73,7 @@ void test_svst1h_u64(svbool_t pg, uint16_t *base, svuint64_t data) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -89,7 +89,7 @@ void test_svst1h_vnum_s32(svbool_t pg, int16_t *base, int64_t vnum, svint32_t da // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -105,7 +105,7 @@ void test_svst1h_vnum_s64(svbool_t pg, int16_t *base, int64_t vnum, svint64_t da // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -121,7 +121,7 @@ void test_svst1h_vnum_u32(svbool_t pg, uint16_t *base, int64_t vnum, svuint32_t // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -132,7 +132,7 @@ void test_svst1h_vnum_u64(svbool_t pg, uint16_t *base, int64_t vnum, svuint64_t return SVE_ACLE_FUNC(svst1h_vnum,_u64,,)(pg, base, vnum, data); } -#ifndef __ARM_FEATURE_SME +#ifndef __ARM_FEATURE_SME // CHECK-LABEL: @test_svst1h_scatter_u32base_s32( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1w.c b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1w.c index f22190b3583ed..3d9e45bda7b3f 100644 --- a/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1w.c +++ b/clang/test/CodeGen/aarch64-sve-intrinsics/acle_sve_st1w.c @@ -49,7 +49,7 @@ void test_svst1w_u64(svbool_t pg, uint32_t *base, svuint64_t data) MODE_ATTR // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -65,7 +65,7 @@ void test_svst1w_vnum_s64(svbool_t pg, int32_t *base, int64_t vnum, svint64_t da // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = trunc [[DATA:%.*]] to // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i32.p0( [[TMP4]], ptr [[TMP3]], i32 1, [[TMP0]]) @@ -76,7 +76,7 @@ void test_svst1w_vnum_u64(svbool_t pg, uint32_t *base, int64_t vnum, svuint64_t return SVE_ACLE_FUNC(svst1w_vnum,_u64,,)(pg, base, vnum, data); } -#ifndef __ARM_FEATURE_SME +#ifndef __ARM_FEATURE_SME // CHECK-LABEL: @test_svst1w_scatter_u64base_s64( // CHECK-NEXT: entry: diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_loads.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_loads.c index 877e24411bb9b..467161ccc238d 100644 --- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_loads.c +++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_loads.c @@ -314,7 +314,7 @@ svfloat64x2_t test_svld2q_f64(svbool_t pg, const float64_t *base) // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP3:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , } [[TMP3]], 0 @@ -327,7 +327,7 @@ svfloat64x2_t test_svld2q_f64(svbool_t pg, const float64_t *base) // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , } [[TMP3]], 0 @@ -345,7 +345,7 @@ svuint8x2_t test_svld2q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP3:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , } [[TMP3]], 0 @@ -358,7 +358,7 @@ svuint8x2_t test_svld2q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , } [[TMP3]], 0 @@ -376,7 +376,7 @@ svint8x2_t test_svld2q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -390,7 +390,7 @@ svint8x2_t test_svld2q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -409,7 +409,7 @@ svuint16x2_t test_svld2q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnu // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -423,7 +423,7 @@ svuint16x2_t test_svld2q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnu // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -442,7 +442,7 @@ svint16x2_t test_svld2q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -456,7 +456,7 @@ svint16x2_t test_svld2q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -475,7 +475,7 @@ svuint32x2_t test_svld2q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnu // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -489,7 +489,7 @@ svuint32x2_t test_svld2q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnu // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -508,7 +508,7 @@ svint32x2_t test_svld2q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -522,7 +522,7 @@ svint32x2_t test_svld2q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -541,7 +541,7 @@ svuint64x2_t test_svld2q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnu // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -555,7 +555,7 @@ svuint64x2_t test_svld2q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnu // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -574,7 +574,7 @@ svint64x2_t test_svld2q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv8f16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -588,7 +588,7 @@ svint64x2_t test_svld2q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv8f16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -607,7 +607,7 @@ svfloat16x2_t test_svld2q_vnum_f16(svbool_t pg, const float16_t *base, int64_t v // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv8bf16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -621,7 +621,7 @@ svfloat16x2_t test_svld2q_vnum_f16(svbool_t pg, const float16_t *base, int64_t v // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv8bf16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -640,7 +640,7 @@ svbfloat16x2_t test_svld2q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_ // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv4f32( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -654,7 +654,7 @@ svbfloat16x2_t test_svld2q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv4f32( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -673,7 +673,7 @@ svfloat32x2_t test_svld2q_vnum_f32(svbool_t pg, const float32_t *base, int64_t v // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv2f64( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -687,7 +687,7 @@ svfloat32x2_t test_svld2q_vnum_f32(svbool_t pg, const float32_t *base, int64_t v // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , } @llvm.aarch64.sve.ld2q.sret.nxv2f64( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , } [[TMP4]], 0 @@ -1049,7 +1049,7 @@ svfloat64x3_t test_svld3q_f64(svbool_t pg, const float64_t *base) // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP3:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP3]], 0 @@ -1064,7 +1064,7 @@ svfloat64x3_t test_svld3q_f64(svbool_t pg, const float64_t *base) // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP3]], 0 @@ -1084,7 +1084,7 @@ svuint8x3_t test_svld3q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP3:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP3]], 0 @@ -1099,7 +1099,7 @@ svuint8x3_t test_svld3q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , } [[TMP3]], 0 @@ -1120,7 +1120,7 @@ svint8x3_t test_svld3q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1136,7 +1136,7 @@ svint8x3_t test_svld3q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1157,7 +1157,7 @@ svuint16x3_t test_svld3q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnu // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1173,7 +1173,7 @@ svuint16x3_t test_svld3q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnu // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1194,7 +1194,7 @@ svint16x3_t test_svld3q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1210,7 +1210,7 @@ svint16x3_t test_svld3q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1231,7 +1231,7 @@ svuint32x3_t test_svld3q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnu // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1247,7 +1247,7 @@ svuint32x3_t test_svld3q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnu // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1268,7 +1268,7 @@ svint32x3_t test_svld3q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1284,7 +1284,7 @@ svint32x3_t test_svld3q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1305,7 +1305,7 @@ svuint64x3_t test_svld3q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnu // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1321,7 +1321,7 @@ svuint64x3_t test_svld3q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnu // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1342,7 +1342,7 @@ svint64x3_t test_svld3q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv8f16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1358,7 +1358,7 @@ svint64x3_t test_svld3q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv8f16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1379,7 +1379,7 @@ svfloat16x3_t test_svld3q_vnum_f16(svbool_t pg, const float16_t *base, int64_t v // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv8bf16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1395,7 +1395,7 @@ svfloat16x3_t test_svld3q_vnum_f16(svbool_t pg, const float16_t *base, int64_t v // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv8bf16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1416,7 +1416,7 @@ svbfloat16x3_t test_svld3q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_ // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv4f32( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1432,7 +1432,7 @@ svbfloat16x3_t test_svld3q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv4f32( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1453,7 +1453,7 @@ svfloat32x3_t test_svld3q_vnum_f32(svbool_t pg, const float32_t *base, int64_t v // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv2f64( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1469,7 +1469,7 @@ svfloat32x3_t test_svld3q_vnum_f32(svbool_t pg, const float32_t *base, int64_t v // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , } @llvm.aarch64.sve.ld3q.sret.nxv2f64( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , } [[TMP4]], 0 @@ -1850,7 +1850,7 @@ svfloat64x4_t test_svld4q_f64(svbool_t pg, const float64_t *base) // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP3:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 @@ -1867,7 +1867,7 @@ svfloat64x4_t test_svld4q_f64(svbool_t pg, const float64_t *base) // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 @@ -1889,7 +1889,7 @@ svuint8x4_t test_svld4q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP3:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 @@ -1906,7 +1906,7 @@ svuint8x4_t test_svld4q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum) // CPP-CHECK-NEXT: entry: // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP1]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP1]] // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv16i8( [[PG:%.*]], ptr [[TMP2]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = extractvalue { , , , } [[TMP3]], 0 @@ -1928,7 +1928,7 @@ svint8x4_t test_svld4q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -1946,7 +1946,7 @@ svint8x4_t test_svld4q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -1969,7 +1969,7 @@ svuint16x4_t test_svld4q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnu // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -1987,7 +1987,7 @@ svuint16x4_t test_svld4q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnu // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv8i16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2010,7 +2010,7 @@ svint16x4_t test_svld4q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2028,7 +2028,7 @@ svint16x4_t test_svld4q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2051,7 +2051,7 @@ svuint32x4_t test_svld4q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnu // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2069,7 +2069,7 @@ svuint32x4_t test_svld4q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnu // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv4i32( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2092,7 +2092,7 @@ svint32x4_t test_svld4q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2110,7 +2110,7 @@ svint32x4_t test_svld4q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2133,7 +2133,7 @@ svuint64x4_t test_svld4q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnu // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2151,7 +2151,7 @@ svuint64x4_t test_svld4q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnu // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv2i64( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2174,7 +2174,7 @@ svint64x4_t test_svld4q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv8f16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2192,7 +2192,7 @@ svint64x4_t test_svld4q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum) // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv8f16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2215,7 +2215,7 @@ svfloat16x4_t test_svld4q_vnum_f16(svbool_t pg, const float16_t *base, int64_t v // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv8bf16( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2233,7 +2233,7 @@ svfloat16x4_t test_svld4q_vnum_f16(svbool_t pg, const float16_t *base, int64_t v // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv8bf16( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2256,7 +2256,7 @@ svbfloat16x4_t test_svld4q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_ // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv4f32( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2274,7 +2274,7 @@ svbfloat16x4_t test_svld4q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_ // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv4f32( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2297,7 +2297,7 @@ svfloat32x4_t test_svld4q_vnum_f32(svbool_t pg, const float32_t *base, int64_t v // CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv2f64( [[TMP0]], ptr [[TMP3]]) // CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 @@ -2315,7 +2315,7 @@ svfloat32x4_t test_svld4q_vnum_f32(svbool_t pg, const float32_t *base, int64_t v // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP2]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]] // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call { , , , } @llvm.aarch64.sve.ld4q.sret.nxv2f64( [[TMP0]], ptr [[TMP3]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = extractvalue { , , , } [[TMP4]], 0 diff --git a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_store.c b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_store.c index bc028eeba624c..1def0289c12ae 100644 --- a/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_store.c +++ b/clang/test/CodeGen/aarch64-sve2p1-intrinsics/acle_sve2p1_store.c @@ -267,7 +267,7 @@ void test_svst2q_f64(svbool_t pg, const float64_t *base, svfloat64x2_t zt) // CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv32i8( [[ZT]], i64 16) // CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP3]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP3]] // CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8( [[TMP0]], [[TMP1]], [[PG:%.*]], ptr [[TMP4]]) // CHECK-NEXT: ret void @@ -278,7 +278,7 @@ void test_svst2q_f64(svbool_t pg, const float64_t *base, svfloat64x2_t zt) // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv32i8( [[ZT]], i64 16) // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP3]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP3]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8( [[TMP0]], [[TMP1]], [[PG:%.*]], ptr [[TMP4]]) // CPP-CHECK-NEXT: ret void @@ -294,7 +294,7 @@ void test_svst2q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum, svuint8 // CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv32i8( [[ZT]], i64 16) // CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP3]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP3]] // CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8( [[TMP0]], [[TMP1]], [[PG:%.*]], ptr [[TMP4]]) // CHECK-NEXT: ret void @@ -305,7 +305,7 @@ void test_svst2q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum, svuint8 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv32i8( [[ZT]], i64 16) // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP3]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP3]] // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8( [[TMP0]], [[TMP1]], [[PG:%.*]], ptr [[TMP4]]) // CPP-CHECK-NEXT: ret void @@ -322,7 +322,7 @@ void test_svst2q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum, svint8x2 // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -334,7 +334,7 @@ void test_svst2q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum, svint8x2 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -351,7 +351,7 @@ void test_svst2q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum, svuin // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -363,7 +363,7 @@ void test_svst2q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum, svuin // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -380,7 +380,7 @@ void test_svst2q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum, svint1 // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -392,7 +392,7 @@ void test_svst2q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum, svint1 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -409,7 +409,7 @@ void test_svst2q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum, svuin // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -421,7 +421,7 @@ void test_svst2q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum, svuin // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -438,7 +438,7 @@ void test_svst2q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum, svint3 // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -450,7 +450,7 @@ void test_svst2q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum, svint3 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -467,7 +467,7 @@ void test_svst2q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum, svuin // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -479,7 +479,7 @@ void test_svst2q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum, svuin // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -496,7 +496,7 @@ void test_svst2q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum, svint6 // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8f16( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -508,7 +508,7 @@ void test_svst2q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum, svint6 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8f16( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -525,7 +525,7 @@ void test_svst2q_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum, svfl // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8bf16( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -537,7 +537,7 @@ void test_svst2q_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum, svfl // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8bf16( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -554,7 +554,7 @@ void test_svst2q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_t vnum, sv // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4f32( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -566,7 +566,7 @@ void test_svst2q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_t vnum, sv // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4f32( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -583,7 +583,7 @@ void test_svst2q_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum, svfl // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2f64( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -595,7 +595,7 @@ void test_svst2q_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum, svfl // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2f64( [[TMP0]], [[TMP1]], [[TMP2]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -885,7 +885,7 @@ void test_svst3q_f64(svbool_t pg, const float64_t *base, svfloat64x3_t zt) // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv48i8( [[ZT]], i64 32) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8( [[TMP0]], [[TMP1]], [[TMP2]], [[PG:%.*]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -897,7 +897,7 @@ void test_svst3q_f64(svbool_t pg, const float64_t *base, svfloat64x3_t zt) // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv48i8( [[ZT]], i64 32) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8( [[TMP0]], [[TMP1]], [[TMP2]], [[PG:%.*]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -914,7 +914,7 @@ void test_svst3q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum, svuint8 // CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv48i8( [[ZT]], i64 32) // CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8( [[TMP0]], [[TMP1]], [[TMP2]], [[PG:%.*]], ptr [[TMP5]]) // CHECK-NEXT: ret void @@ -926,7 +926,7 @@ void test_svst3q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum, svuint8 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv48i8( [[ZT]], i64 32) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP4]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP4]] // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8( [[TMP0]], [[TMP1]], [[TMP2]], [[PG:%.*]], ptr [[TMP5]]) // CPP-CHECK-NEXT: ret void @@ -944,7 +944,7 @@ void test_svst3q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum, svint8x3 // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -957,7 +957,7 @@ void test_svst3q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum, svint8x3 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -975,7 +975,7 @@ void test_svst3q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum, svuin // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -988,7 +988,7 @@ void test_svst3q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum, svuin // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1006,7 +1006,7 @@ void test_svst3q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum, svint1 // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -1019,7 +1019,7 @@ void test_svst3q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum, svint1 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1037,7 +1037,7 @@ void test_svst3q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum, svuin // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -1050,7 +1050,7 @@ void test_svst3q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum, svuin // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1068,7 +1068,7 @@ void test_svst3q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum, svint3 // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -1081,7 +1081,7 @@ void test_svst3q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum, svint3 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1099,7 +1099,7 @@ void test_svst3q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum, svuin // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -1112,7 +1112,7 @@ void test_svst3q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum, svuin // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1130,7 +1130,7 @@ void test_svst3q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum, svint6 // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8f16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -1143,7 +1143,7 @@ void test_svst3q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum, svint6 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8f16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1161,7 +1161,7 @@ void test_svst3q_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum, svfl // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8bf16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -1174,7 +1174,7 @@ void test_svst3q_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum, svfl // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8bf16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1192,7 +1192,7 @@ void test_svst3q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_t vnum, sv // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4f32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -1205,7 +1205,7 @@ void test_svst3q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_t vnum, sv // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4f32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1223,7 +1223,7 @@ void test_svst3q_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum, svfl // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2f64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -1236,7 +1236,7 @@ void test_svst3q_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum, svfl // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2f64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1551,7 +1551,7 @@ void test_svst4q_f64(svbool_t pg, const float64_t *base, svfloat64x4_t zt) // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv64i8( [[ZT]], i64 48) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[PG:%.*]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -1564,7 +1564,7 @@ void test_svst4q_f64(svbool_t pg, const float64_t *base, svfloat64x4_t zt) // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv64i8( [[ZT]], i64 48) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[PG:%.*]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1582,7 +1582,7 @@ void test_svst4q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum, svuint8 // CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv64i8( [[ZT]], i64 48) // CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[PG:%.*]], ptr [[TMP6]]) // CHECK-NEXT: ret void @@ -1595,7 +1595,7 @@ void test_svst4q_vnum_u8(svbool_t pg, const uint8_t *base, int64_t vnum, svuint8 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.extract.nxv16i8.nxv64i8( [[ZT]], i64 48) // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP5]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP5]] // CPP-CHECK-NEXT: [[TMP6:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[PG:%.*]], ptr [[TMP6]]) // CPP-CHECK-NEXT: ret void @@ -1614,7 +1614,7 @@ void test_svst4q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum, svint8x4 // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CHECK-NEXT: ret void @@ -1628,7 +1628,7 @@ void test_svst4q_vnum_s8(svbool_t pg, const int8_t *base, int64_t vnum, svint8x4 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CPP-CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CPP-CHECK-NEXT: ret void @@ -1647,7 +1647,7 @@ void test_svst4q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum, svuin // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CHECK-NEXT: ret void @@ -1661,7 +1661,7 @@ void test_svst4q_vnum_u16(svbool_t pg, const uint16_t *base, int64_t vnum, svuin // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CPP-CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CPP-CHECK-NEXT: ret void @@ -1680,7 +1680,7 @@ void test_svst4q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum, svint1 // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CHECK-NEXT: ret void @@ -1694,7 +1694,7 @@ void test_svst4q_vnum_s16(svbool_t pg, const int16_t *base, int64_t vnum, svint1 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CPP-CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CPP-CHECK-NEXT: ret void @@ -1713,7 +1713,7 @@ void test_svst4q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum, svuin // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CHECK-NEXT: ret void @@ -1727,7 +1727,7 @@ void test_svst4q_vnum_u32(svbool_t pg, const uint32_t *base, int64_t vnum, svuin // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CPP-CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CPP-CHECK-NEXT: ret void @@ -1746,7 +1746,7 @@ void test_svst4q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum, svint3 // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CHECK-NEXT: ret void @@ -1760,7 +1760,7 @@ void test_svst4q_vnum_s32(svbool_t pg, const int32_t *base, int64_t vnum, svint3 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CPP-CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CPP-CHECK-NEXT: ret void @@ -1779,7 +1779,7 @@ void test_svst4q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum, svuin // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CHECK-NEXT: ret void @@ -1793,7 +1793,7 @@ void test_svst4q_vnum_u64(svbool_t pg, const uint64_t *base, int64_t vnum, svuin // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CPP-CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CPP-CHECK-NEXT: ret void @@ -1812,7 +1812,7 @@ void test_svst4q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum, svint6 // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8f16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CHECK-NEXT: ret void @@ -1826,7 +1826,7 @@ void test_svst4q_vnum_s64(svbool_t pg, const int64_t *base, int64_t vnum, svint6 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CPP-CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8f16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CPP-CHECK-NEXT: ret void @@ -1845,7 +1845,7 @@ void test_svst4q_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum, svfl // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8bf16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CHECK-NEXT: ret void @@ -1859,7 +1859,7 @@ void test_svst4q_vnum_f16(svbool_t pg, const float16_t *base, int64_t vnum, svfl // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv8i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CPP-CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8bf16( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CPP-CHECK-NEXT: ret void @@ -1878,7 +1878,7 @@ void test_svst4q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_t vnum, sv // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4f32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CHECK-NEXT: ret void @@ -1892,7 +1892,7 @@ void test_svst4q_vnum_bf16(svbool_t pg, const bfloat16_t *base, int64_t vnum, sv // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CPP-CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4f32( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CPP-CHECK-NEXT: ret void @@ -1911,7 +1911,7 @@ void test_svst4q_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum, svfl // CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2f64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CHECK-NEXT: ret void @@ -1925,7 +1925,7 @@ void test_svst4q_vnum_f32(svbool_t pg, const float32_t *base, int64_t vnum, svfl // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CPP-CHECK-NEXT: [[TMP5:%.*]] = tail call i64 @llvm.vscale.i64() // CPP-CHECK-NEXT: [[TMP6:%.*]] = shl nuw nsw i64 [[TMP5]], 4 -// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[TMP6]], [[VNUM:%.*]] +// CPP-CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP6]] // CPP-CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]] // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2f64( [[TMP0]], [[TMP1]], [[TMP2]], [[TMP3]], [[TMP4]], ptr [[TMP7]]) // CPP-CHECK-NEXT: ret void diff --git a/clang/test/CodeGen/attr-counted-by.c b/clang/test/CodeGen/attr-counted-by.c index 9acc896c0f0e9..3ed8b6f0c7186 100644 --- a/clang/test/CodeGen/attr-counted-by.c +++ b/clang/test/CodeGen/attr-counted-by.c @@ -111,7 +111,7 @@ void test1(struct annotated *p, int index, int val) { // SANITIZE-WITH-ATTR-NEXT: [[DOT_COUNTED_BY_GEP:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 8 // SANITIZE-WITH-ATTR-NEXT: [[DOT_COUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOT_COUNTED_BY_GEP]], align 4 // SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOT_COUNTED_BY_LOAD]] to i64, !nosanitize [[META2]] -// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], [[INDEX]], !nosanitize [[META2]] +// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ult i64 [[INDEX]], [[TMP0]], !nosanitize [[META2]] // SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP1]], label [[CONT3:%.*]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], !prof [[PROF3]], !nosanitize [[META2]] // SANITIZE-WITH-ATTR: handler.out_of_bounds: // SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB3:[0-9]+]], i64 [[INDEX]]) #[[ATTR10]], !nosanitize [[META2]] @@ -200,7 +200,7 @@ size_t test2_bdos(struct annotated *p) { // SANITIZE-WITH-ATTR-NEXT: [[DOT_COUNTED_BY_GEP:%.*]] = getelementptr inbounds i8, ptr [[P]], i64 8 // SANITIZE-WITH-ATTR-NEXT: [[DOT_COUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOT_COUNTED_BY_GEP]], align 4 // SANITIZE-WITH-ATTR-NEXT: [[TMP0:%.*]] = zext i32 [[DOT_COUNTED_BY_LOAD]] to i64, !nosanitize [[META2]] -// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[TMP0]], [[INDEX]], !nosanitize [[META2]] +// SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = icmp ult i64 [[INDEX]], [[TMP0]], !nosanitize [[META2]] // SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP1]], label [[CONT3:%.*]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], !prof [[PROF3]], !nosanitize [[META2]] // SANITIZE-WITH-ATTR: handler.out_of_bounds: // SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB4:[0-9]+]], i64 [[INDEX]]) #[[ATTR10]], !nosanitize [[META2]] @@ -1185,7 +1185,7 @@ struct test13_bar { // SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 8 // SANITIZE-WITH-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4 // SANITIZE-WITH-ATTR-NEXT: [[TMP1:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META2]] -// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[TMP1]], [[INDEX]], !nosanitize [[META2]] +// SANITIZE-WITH-ATTR-NEXT: [[TMP2:%.*]] = icmp ult i64 [[INDEX]], [[TMP1]], !nosanitize [[META2]] // SANITIZE-WITH-ATTR-NEXT: br i1 [[TMP2]], label [[CONT5:%.*]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], !prof [[PROF3]], !nosanitize [[META2]] // SANITIZE-WITH-ATTR: handler.out_of_bounds: // SANITIZE-WITH-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB24:[0-9]+]], i64 [[INDEX]]) #[[ATTR10]], !nosanitize [[META2]] @@ -1212,7 +1212,7 @@ struct test13_bar { // SANITIZE-WITHOUT-ATTR-NEXT: [[DOTCOUNTED_BY_GEP:%.*]] = getelementptr inbounds i8, ptr [[TMP0]], i64 8 // SANITIZE-WITHOUT-ATTR-NEXT: [[DOTCOUNTED_BY_LOAD:%.*]] = load i32, ptr [[DOTCOUNTED_BY_GEP]], align 4 // SANITIZE-WITHOUT-ATTR-NEXT: [[TMP1:%.*]] = zext i32 [[DOTCOUNTED_BY_LOAD]] to i64, !nosanitize [[META9]] -// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP2:%.*]] = icmp ugt i64 [[TMP1]], [[INDEX]], !nosanitize [[META9]] +// SANITIZE-WITHOUT-ATTR-NEXT: [[TMP2:%.*]] = icmp ult i64 [[INDEX]], [[TMP1]], !nosanitize [[META9]] // SANITIZE-WITHOUT-ATTR-NEXT: br i1 [[TMP2]], label [[CONT5:%.*]], label [[HANDLER_OUT_OF_BOUNDS:%.*]], !prof [[PROF8]], !nosanitize [[META9]] // SANITIZE-WITHOUT-ATTR: handler.out_of_bounds: // SANITIZE-WITHOUT-ATTR-NEXT: tail call void @__ubsan_handle_out_of_bounds_abort(ptr nonnull @[[GLOB8:[0-9]+]], i64 [[INDEX]]) #[[ATTR8]], !nosanitize [[META9]] diff --git a/clang/test/CodeGen/fp-reassoc-pragma.cpp b/clang/test/CodeGen/fp-reassoc-pragma.cpp index 0cf2f812e66e9..8b9329c40174b 100644 --- a/clang/test/CodeGen/fp-reassoc-pragma.cpp +++ b/clang/test/CodeGen/fp-reassoc-pragma.cpp @@ -3,8 +3,8 @@ float fp_reassoc_simple(float a, float b, float c) { // CHECK: _Z17fp_reassoc_simplefff // CHECK: %[[A:.+]] = fadd reassoc float %b, %c -// CHECK: %[[M:.+]] = fmul reassoc float %[[A]], %b -// CHECK-NEXT: fadd reassoc float %[[M]], %c +// CHECK: %[[M:.+]] = fmul reassoc float %b, %[[A]] +// CHECK-NEXT: fadd reassoc float %c, %[[M]] #pragma clang fp reassociate(on) a = b + c; return a * b + c; @@ -34,7 +34,7 @@ float fp_reassoc_template(float a, float b, float c) { // CHECK: _Z19fp_reassoc_templatefff // CHECK: %[[A1:.+]] = fadd reassoc float %a, %b // CHECK-NEXT: %[[A2:.+]] = fsub reassoc float %[[A1]], %c - // CHECK-NEXT: fadd reassoc float %[[A2]], %c + // CHECK-NEXT: fadd reassoc float %c, %[[A2]] return template_reassoc(a, b, c); } diff --git a/clang/test/CodeGen/fp-reciprocal-pragma.cpp b/clang/test/CodeGen/fp-reciprocal-pragma.cpp index db93550301bf2..8398e48410e33 100644 --- a/clang/test/CodeGen/fp-reciprocal-pragma.cpp +++ b/clang/test/CodeGen/fp-reciprocal-pragma.cpp @@ -5,11 +5,11 @@ float base(float a, float b, float c) { // CHECK-LABEL: _Z4basefff // FLAG: %[[A:.+]] = fdiv arcp float %b, %c // FLAG: %[[M:.+]] = fdiv arcp float %[[A]], %b -// FLAG-NEXT: fadd arcp float %[[M]], %c +// FLAG-NEXT: fadd arcp float %c, %[[M]] // DEFAULT: %[[A:.+]] = fdiv float %b, %c // DEFAULT: %[[M:.+]] = fdiv float %[[A]], %b -// DEFAULT-NEXT: fadd float %[[M]], %c +// DEFAULT-NEXT: fadd float %c, %[[M]] a = b / c; return a / b + c; } @@ -19,7 +19,7 @@ float fp_recip_simple(float a, float b, float c) { // CHECK-LABEL: _Z15fp_recip_simplefff // CHECK: %[[A:.+]] = fdiv arcp float %b, %c // CHECK: %[[M:.+]] = fdiv arcp float %[[A]], %b -// CHECK-NEXT: fadd arcp float %[[M]], %c +// CHECK-NEXT: fadd arcp float %c, %[[M]] #pragma clang fp reciprocal(on) a = b / c; return a / b + c; @@ -30,7 +30,7 @@ float fp_recip_disable(float a, float b, float c) { // CHECK-LABEL: _Z16fp_recip_disablefff // CHECK: %[[A:.+]] = fdiv float %b, %c // CHECK: %[[M:.+]] = fdiv float %[[A]], %b -// CHECK-NEXT: fadd float %[[M]], %c +// CHECK-NEXT: fadd float %c, %[[M]] #pragma clang fp reciprocal(off) a = b / c; return a / b + c; @@ -40,7 +40,7 @@ float fp_recip_with_reassoc_simple(float a, float b, float c) { // CHECK-LABEL: _Z28fp_recip_with_reassoc_simplefff // CHECK: %[[A:.+]] = fmul reassoc arcp float %b, %c // CHECK: %[[M:.+]] = fdiv reassoc arcp float %b, %[[A]] -// CHECK-NEXT: fadd reassoc arcp float %[[M]], %c +// CHECK-NEXT: fadd reassoc arcp float %c, %[[M]] #pragma clang fp reciprocal(on) reassociate(on) a = b / c; return a / b + c; @@ -72,7 +72,7 @@ float fp_recip_template(float a, float b, float c) { // CHECK-LABEL: _Z17fp_recip_templatefff // CHECK: %[[A1:.+]] = fdiv arcp float %a, %b // CHECK-NEXT: %[[A2:.+]] = fsub arcp float %[[A1]], %c - // CHECK-NEXT: fadd arcp float %[[A2]], %c + // CHECK-NEXT: fadd arcp float %c, %[[A2]] return template_recip(a, b, c); } diff --git a/clang/test/CodeGen/ms-mixed-ptr-sizes.c b/clang/test/CodeGen/ms-mixed-ptr-sizes.c index 51bea60eb39dc..0bc1925b13dbc 100644 --- a/clang/test/CodeGen/ms-mixed-ptr-sizes.c +++ b/clang/test/CodeGen/ms-mixed-ptr-sizes.c @@ -51,35 +51,35 @@ void test_other(struct Foo *f, __attribute__((address_space(10))) int *i) { int test_compare1(int *__ptr32 __uptr i, int *__ptr64 j) { // ALL-LABEL: define dso_local range(i32 0, 2) i32 @test_compare1 // X64: %{{.+}} = addrspacecast ptr %j to ptr addrspace(271) - // X64: %cmp = icmp eq ptr addrspace(271) %{{.+}}, %i + // X64: %cmp = icmp eq ptr addrspace(271) %i, %{{.+}} // X86: %{{.+}} = addrspacecast ptr addrspace(272) %j to ptr addrspace(271) - // X86: %cmp = icmp eq ptr addrspace(271) %{{.+}}, %i + // X86: %cmp = icmp eq ptr addrspace(271) %i, %{{.+}} return (i == j); } int test_compare2(int *__ptr32 __sptr i, int *__ptr64 j) { // ALL-LABEL: define dso_local range(i32 0, 2) i32 @test_compare2 // X64: %{{.+}} = addrspacecast ptr %j to ptr addrspace(270) - // X64: %cmp = icmp eq ptr addrspace(270) %{{.+}}, %i + // X64: %cmp = icmp eq ptr addrspace(270) %i, %{{.+}} // X86: %{{.+}} = addrspacecast ptr addrspace(272) %j to ptr - // X86: %cmp = icmp eq ptr %{{.+}}, %i + // X86: %cmp = icmp eq ptr %i, %{{.+}} return (i == j); } int test_compare3(int *__ptr32 __uptr i, int *__ptr64 j) { // ALL-LABEL: define dso_local range(i32 0, 2) i32 @test_compare3 // X64: %{{.+}} = addrspacecast ptr addrspace(271) %i to ptr - // X64: %cmp = icmp eq ptr %{{.+}}, %j + // X64: %cmp = icmp eq ptr %j, %{{.+}} // X86: %{{.+}} = addrspacecast ptr addrspace(271) %i to ptr addrspace(272) - // X86: %cmp = icmp eq ptr addrspace(272) %{{.+}}, %j + // X86: %cmp = icmp eq ptr addrspace(272) %j, %{{.+}} return (j == i); } int test_compare4(int *__ptr32 __sptr i, int *__ptr64 j) { // ALL-LABEL: define dso_local range(i32 0, 2) i32 @test_compare4 // X64: %{{.+}} = addrspacecast ptr addrspace(270) %i to ptr - // X64: %cmp = icmp eq ptr %{{.+}}, %j + // X64: %cmp = icmp eq ptr %j, %{{.+}} // X86: %{{.+}} = addrspacecast ptr %i to ptr addrspace(272) - // X86: %cmp = icmp eq ptr addrspace(272) %{{.+}}, %j + // X86: %cmp = icmp eq ptr addrspace(272) %j, %{{.+}} return (j == i); } diff --git a/clang/test/Headers/wasm.c b/clang/test/Headers/wasm.c index b22d87a5f8b70..0fae8557a066d 100644 --- a/clang/test/Headers/wasm.c +++ b/clang/test/Headers/wasm.c @@ -1499,7 +1499,7 @@ v128_t test_v128_xor(v128_t a, v128_t b) { // CHECK-LABEL: @test_v128_andnot( // CHECK-NEXT: entry: // CHECK-NEXT: [[NOT_I:%.*]] = xor <4 x i32> [[B:%.*]], -// CHECK-NEXT: [[AND_I:%.*]] = and <4 x i32> [[NOT_I]], [[A:%.*]] +// CHECK-NEXT: [[AND_I:%.*]] = and <4 x i32> [[A:%.*]], [[NOT_I]] // CHECK-NEXT: ret <4 x i32> [[AND_I]] // v128_t test_v128_andnot(v128_t a, v128_t b) { diff --git a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h index ebcbd5d9e8880..ed2e7f58ca853 100644 --- a/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h +++ b/llvm/include/llvm/Transforms/InstCombine/InstCombiner.h @@ -132,21 +132,18 @@ class LLVM_LIBRARY_VISIBILITY InstCombiner { /// This routine maps IR values to various complexity ranks: /// 0 -> undef /// 1 -> Constants - /// 2 -> Other non-instructions - /// 3 -> Arguments - /// 4 -> Cast and (f)neg/not instructions - /// 5 -> Other instructions + /// 2 -> Cast and (f)neg/not instructions + /// 3 -> Other instructions and arguments static unsigned getComplexity(Value *V) { - if (isa(V)) { - if (isa(V) || match(V, m_Neg(PatternMatch::m_Value())) || - match(V, m_Not(PatternMatch::m_Value())) || - match(V, m_FNeg(PatternMatch::m_Value()))) - return 4; - return 5; - } - if (isa(V)) - return 3; - return isa(V) ? (isa(V) ? 0 : 1) : 2; + if (isa(V)) + return isa(V) ? 0 : 1; + + if (isa(V) || match(V, m_Neg(PatternMatch::m_Value())) || + match(V, m_Not(PatternMatch::m_Value())) || + match(V, m_FNeg(PatternMatch::m_Value()))) + return 2; + + return 3; } /// Predicate canonicalization reduces the number of patterns that need to be diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp index 123f810bacfb6..dd4a64050f878 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp @@ -2719,8 +2719,10 @@ Instruction *InstCombinerImpl::hoistFNegAboveFMulFDiv(Value *FNegOp, Instruction &FMFSource) { Value *X, *Y; if (match(FNegOp, m_FMul(m_Value(X), m_Value(Y)))) { + // Push into RHS which is more likely to simplify (const or another fneg). + // FIXME: It would be better to invert the transform. return cast(Builder.CreateFMulFMF( - Builder.CreateFNegFMF(X, &FMFSource), Y, &FMFSource)); + X, Builder.CreateFNegFMF(Y, &FMFSource), &FMFSource)); } if (match(FNegOp, m_FDiv(m_Value(X), m_Value(Y)))) { diff --git a/llvm/test/Analysis/ValueTracking/known-power-of-two-urem.ll b/llvm/test/Analysis/ValueTracking/known-power-of-two-urem.ll index ba3a484441e9e..55c3e7779478e 100644 --- a/llvm/test/Analysis/ValueTracking/known-power-of-two-urem.ll +++ b/llvm/test/Analysis/ValueTracking/known-power-of-two-urem.ll @@ -19,7 +19,7 @@ define i64 @known_power_of_two_urem_phi(i64 %size, i1 %cmp, i1 %cmp1) { ; CHECK-NEXT: br label [[COND_END]] ; CHECK: cond.end: ; CHECK-NEXT: [[PHI1:%.*]] = phi i64 [ 4095, [[ENTRY:%.*]] ], [ [[PHI]], [[COND_TRUE_END]] ] -; CHECK-NEXT: [[UREM:%.*]] = and i64 [[PHI1]], [[SIZE:%.*]] +; CHECK-NEXT: [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[PHI1]] ; CHECK-NEXT: ret i64 [[UREM]] ; entry: @@ -57,7 +57,7 @@ define i64 @known_power_of_two_urem_nested_expr(i64 %size, i1 %cmp, i1 %cmp1, i6 ; CHECK: cond.end: ; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[SELECT]], [[COND_FALSE]] ], [ [[TMP1]], [[COND_TRUE]] ], [ [[PHI]], [[COND_END]] ] ; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[PHI]], -1 -; CHECK-NEXT: [[UREM:%.*]] = and i64 [[TMP2]], [[SIZE:%.*]] +; CHECK-NEXT: [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[TMP2]] ; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[UREM]], 10 ; CHECK-NEXT: br i1 [[CMP2]], label [[COND_END]], label [[END:%.*]] ; CHECK: end: @@ -119,7 +119,7 @@ define i64 @known_power_of_two_urem_loop_mul(i64 %size, i64 %a) { ; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[START]], [[ENTRY:%.*]] ], [ [[I:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[PHI]], -1 -; CHECK-NEXT: [[UREM:%.*]] = and i64 [[TMP0]], [[SIZE:%.*]] +; CHECK-NEXT: [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[TMP0]] ; CHECK-NEXT: [[ADD]] = add nuw i64 [[SUM]], [[UREM]] ; CHECK-NEXT: [[I]] = shl nuw i64 [[PHI]], 2 ; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[PHI]], 25000000 @@ -190,7 +190,7 @@ define i64 @known_power_of_two_urem_loop_shl(i64 %size, i64 %a) { ; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[START]], [[ENTRY:%.*]] ], [ [[I:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[PHI]], -1 -; CHECK-NEXT: [[UREM:%.*]] = and i64 [[TMP0]], [[SIZE:%.*]] +; CHECK-NEXT: [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[TMP0]] ; CHECK-NEXT: [[ADD]] = add nuw i64 [[SUM]], [[UREM]] ; CHECK-NEXT: [[I]] = shl nuw i64 [[PHI]], 1 ; CHECK-NEXT: [[ICMP:%.*]] = icmp ult i64 [[PHI]], 50000000 @@ -225,7 +225,7 @@ define i64 @known_power_of_two_urem_loop_lshr(i64 %size, i64 %a) { ; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ [[START]], [[ENTRY:%.*]] ], [ [[I:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[PHI]], -1 -; CHECK-NEXT: [[UREM:%.*]] = and i64 [[TMP0]], [[SIZE:%.*]] +; CHECK-NEXT: [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[TMP0]] ; CHECK-NEXT: [[ADD]] = add nuw i64 [[SUM]], [[UREM]] ; CHECK-NEXT: [[I]] = lshr i64 [[PHI]], 1 ; CHECK-NEXT: [[ICMP_NOT:%.*]] = icmp ult i64 [[PHI]], 2 @@ -260,7 +260,7 @@ define i64 @known_power_of_two_urem_loop_ashr(i64 %size, i64 %a) { ; CHECK-NEXT: [[PHI:%.*]] = phi i64 [ 4096, [[ENTRY:%.*]] ], [ [[I:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[SUM:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = add nsw i64 [[PHI]], -1 -; CHECK-NEXT: [[UREM:%.*]] = and i64 [[TMP0]], [[SIZE:%.*]] +; CHECK-NEXT: [[UREM:%.*]] = and i64 [[SIZE:%.*]], [[TMP0]] ; CHECK-NEXT: [[ADD]] = add nsw i64 [[SUM]], [[UREM]] ; CHECK-NEXT: [[I]] = lshr i64 [[PHI]], [[A:%.*]] ; CHECK-NEXT: [[ICMP_NOT:%.*]] = icmp eq i64 [[I]], 0 @@ -396,7 +396,7 @@ define i8 @known_power_of_two_rust_next_power_of_two(i8 %x, i8 %y) { ; CHECK-NEXT: [[TMP3:%.*]] = lshr i8 -1, [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ugt i8 [[X]], 1 ; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i8 [[TMP3]], i8 0 -; CHECK-NEXT: [[R:%.*]] = and i8 [[TMP5]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[Y:%.*]], [[TMP5]] ; CHECK-NEXT: ret i8 [[R]] ; %2 = add i8 %x, -1 @@ -414,7 +414,7 @@ define i8 @known_power_of_two_rust_next_power_of_two(i8 %x, i8 %y) { define i8 @known_power_of_two_lshr_add_one_allow_zero(i8 %x, i8 %y) { ; CHECK-LABEL: @known_power_of_two_lshr_add_one_allow_zero( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 -1, [[X:%.*]] -; CHECK-NEXT: [[R:%.*]] = and i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[R]] ; %4 = lshr i8 -1, %x @@ -429,7 +429,7 @@ define i1 @known_power_of_two_lshr_add_one_nuw_deny_zero(i8 %x, i8 %y) { ; CHECK-LABEL: @known_power_of_two_lshr_add_one_nuw_deny_zero( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 -1, [[X:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = sub i8 -2, [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP3]], -1 ; CHECK-NEXT: ret i1 [[R]] ; @@ -446,7 +446,7 @@ define i1 @negative_known_power_of_two_lshr_add_one_deny_zero(i8 %x, i8 %y) { ; CHECK-LABEL: @negative_known_power_of_two_lshr_add_one_deny_zero( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 -1, [[X:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = sub i8 -2, [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP3]], -1 ; CHECK-NEXT: ret i1 [[R]] ; @@ -463,7 +463,7 @@ define i1 @negative_known_power_of_two_lshr_add_one_nsw_deny_zero(i8 %x, i8 %y) ; CHECK-LABEL: @negative_known_power_of_two_lshr_add_one_nsw_deny_zero( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 -1, [[X:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = sub i8 -2, [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP3]], -1 ; CHECK-NEXT: ret i1 [[R]] ; diff --git a/llvm/test/Analysis/ValueTracking/known-power-of-two.ll b/llvm/test/Analysis/ValueTracking/known-power-of-two.ll index 7bcf96065a69d..7cfb6af0d7b95 100644 --- a/llvm/test/Analysis/ValueTracking/known-power-of-two.ll +++ b/llvm/test/Analysis/ValueTracking/known-power-of-two.ll @@ -16,8 +16,8 @@ declare i16 @llvm.umax.i16(i16, i16) define i32 @pr25900(i32 %d) { ; CHECK-LABEL: define i32 @pr25900 ; CHECK-SAME: (i32 [[D:%.*]]) { -; CHECK-NEXT: [[AND:%.*]] = ashr i32 [[D]], 31 -; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 4, [[AND]] +; CHECK-NEXT: [[ASHR:%.*]] = ashr i32 [[D]], 31 +; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 4, [[ASHR]] ; CHECK-NEXT: ret i32 [[DIV]] ; %and = and i32 %d, -2147483648 @@ -37,7 +37,7 @@ define i8 @trunc_is_pow2_or_zero(i16 %x, i8 %y) { ; CHECK-NEXT: [[XP2:%.*]] = shl i16 4, [[X]] ; CHECK-NEXT: [[XX:%.*]] = trunc i16 [[XP2]] to i8 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[XX]], -1 -; CHECK-NEXT: [[R:%.*]] = and i8 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i8 [[R]] ; %xp2 = shl i16 4, %x @@ -67,7 +67,7 @@ define i1 @trunc_is_pow2_fail(i16 %x, i8 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i8 [[Y:%.*]]) { ; CHECK-NEXT: [[XP2:%.*]] = shl i16 4, [[X]] ; CHECK-NEXT: [[XX:%.*]] = trunc i16 [[XP2]] to i8 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -85,7 +85,7 @@ define i16 @bswap_is_pow2_or_zero(i16 %x, i16 %y) { ; CHECK-NEXT: [[XP2:%.*]] = shl i16 4, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.bswap.i16(i16 [[XP2]]) ; CHECK-NEXT: [[TMP1:%.*]] = add i16 [[XX]], -1 -; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %xp2 = shl i16 4, %x @@ -115,7 +115,7 @@ define i1 @bswap_is_pow2(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XP2:%.*]] = shl nuw i16 1, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.bswap.i16(i16 [[XP2]]) -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[AND]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -132,7 +132,7 @@ define i1 @bswap_is_pow2_fail(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XP2:%.*]] = shl i16 2, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.bswap.i16(i16 [[XP2]]) -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -150,7 +150,7 @@ define i16 @bitreverse_is_pow2_or_zero(i16 %x, i16 %y) { ; CHECK-NEXT: [[XP2:%.*]] = shl i16 4, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[XP2]]) ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i16 [[XX]], -1 -; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %xp2 = shl i16 4, %x @@ -180,7 +180,7 @@ define i1 @bitreverse_is_pow2(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XP2:%.*]] = shl nuw i16 1, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[XP2]]) -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[AND]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -197,7 +197,7 @@ define i1 @bitreverse_is_pow2_fail(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XP2:%.*]] = shl i16 2, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[XP2]]) -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -215,7 +215,7 @@ define i16 @fshl_is_pow2_or_zero(i16 %x, i16 %y, i16 %z) { ; CHECK-NEXT: [[XP2:%.*]] = shl i16 4, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.fshl.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]]) ; CHECK-NEXT: [[TMP1:%.*]] = add i16 [[XX]], -1 -; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %xp2 = shl i16 4, %x @@ -262,7 +262,7 @@ define i1 @fshl_is_pow2(i16 %x, i16 %y, i16 %z) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) { ; CHECK-NEXT: [[XP2:%.*]] = shl nuw i16 1, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.fshl.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]]) -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[AND]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -279,7 +279,7 @@ define i1 @fshl_is_pow2_fail(i16 %x, i16 %y, i16 %z) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) { ; CHECK-NEXT: [[XP2:%.*]] = shl i16 2, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.fshl.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]]) -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -297,7 +297,7 @@ define i16 @fshr_is_pow2_or_zero(i16 %x, i16 %y, i16 %z) { ; CHECK-NEXT: [[XP2:%.*]] = shl i16 4, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.fshr.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]]) ; CHECK-NEXT: [[TMP1:%.*]] = add i16 [[XX]], -1 -; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %xp2 = shl i16 4, %x @@ -344,7 +344,7 @@ define i1 @fshr_is_pow2(i16 %x, i16 %y, i16 %z) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) { ; CHECK-NEXT: [[XP2:%.*]] = shl nuw i16 1, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.fshr.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]]) -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[AND]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -361,7 +361,7 @@ define i1 @fshr_is_pow2_fail(i16 %x, i16 %y, i16 %z) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[Z:%.*]]) { ; CHECK-NEXT: [[XP2:%.*]] = shl i16 2, [[X]] ; CHECK-NEXT: [[XX:%.*]] = call i16 @llvm.fshr.i16(i16 [[XP2]], i16 [[XP2]], i16 [[Z]]) -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -380,7 +380,7 @@ define i16 @mul_is_pow2_or_zero(i16 %x, i16 %y, i16 %z) { ; CHECK-NEXT: [[ZP2:%.*]] = shl i16 2, [[Z]] ; CHECK-NEXT: [[XX:%.*]] = mul i16 [[XP2]], [[ZP2]] ; CHECK-NEXT: [[TMP1:%.*]] = add i16 [[XX]], -1 -; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %xp2 = shl i16 4, %x @@ -416,7 +416,7 @@ define i1 @mul_is_pow2(i16 %x, i16 %y, i16 %z) { ; CHECK-NEXT: [[ZP2:%.*]] = shl nuw nsw i16 2, [[ZSMALL]] ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i16 [[XSMALL]], 2 ; CHECK-NEXT: [[XX:%.*]] = shl nuw nsw i16 [[ZP2]], [[TMP1]] -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[AND]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -439,7 +439,7 @@ define i1 @mul_is_pow2_fail(i16 %x, i16 %y, i16 %z) { ; CHECK-NEXT: [[ZP2:%.*]] = shl nuw nsw i16 2, [[ZSMALL]] ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i16 [[XSMALL]], 2 ; CHECK-NEXT: [[XX:%.*]] = shl i16 [[ZP2]], [[TMP1]] -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -462,7 +462,7 @@ define i1 @mul_is_pow2_fail2(i16 %x, i16 %y, i16 %z) { ; CHECK-NEXT: [[XP2:%.*]] = shl nuw nsw i16 3, [[XSMALL]] ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i16 [[ZSMALL]], 1 ; CHECK-NEXT: [[XX:%.*]] = shl nuw nsw i16 [[XP2]], [[TMP1]] -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -482,7 +482,7 @@ define i1 @shl_is_pow2(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XSMALL:%.*]] = and i16 [[X]], 7 ; CHECK-NEXT: [[XX:%.*]] = shl nuw nsw i16 4, [[XSMALL]] -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[AND]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -499,7 +499,7 @@ define i1 @shl_is_pow2_fail(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XSMALL:%.*]] = and i16 [[X]], 7 ; CHECK-NEXT: [[XX:%.*]] = shl i16 512, [[XSMALL]] -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -516,7 +516,7 @@ define i1 @shl_is_pow2_fail2(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XSMALL:%.*]] = and i16 [[X]], 7 ; CHECK-NEXT: [[XX:%.*]] = shl nuw nsw i16 5, [[XSMALL]] -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -533,7 +533,7 @@ define i1 @lshr_is_pow2(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XSMALL:%.*]] = and i16 [[X]], 7 ; CHECK-NEXT: [[XX:%.*]] = lshr exact i16 512, [[XSMALL]] -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[AND]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -550,7 +550,7 @@ define i1 @lshr_is_pow2_fail(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XSMALL:%.*]] = and i16 [[X]], 7 ; CHECK-NEXT: [[XX:%.*]] = lshr i16 4, [[XSMALL]] -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -567,7 +567,7 @@ define i1 @lshr_is_pow2_fail2(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XSMALL:%.*]] = and i16 [[X]], 7 ; CHECK-NEXT: [[XX:%.*]] = lshr i16 513, [[XSMALL]] -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -584,7 +584,7 @@ define i1 @and_is_pow2(i16 %x, i16 %y) { ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XNZ:%.*]] = or i16 [[X]], 4 ; CHECK-NEXT: [[X_NEG:%.*]] = sub nsw i16 0, [[XNZ]] -; CHECK-NEXT: [[TMP1:%.*]] = and i16 [[X_NEG]], [[Y]] +; CHECK-NEXT: [[TMP1:%.*]] = and i16 [[Y]], [[X_NEG]] ; CHECK-NEXT: [[AND:%.*]] = and i16 [[TMP1]], [[XNZ]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i16 [[AND]], 0 ; CHECK-NEXT: ret i1 [[R]] @@ -602,8 +602,8 @@ define i1 @and_is_pow2_fail(i16 %x, i16 %y) { ; CHECK-LABEL: define i1 @and_is_pow2_fail ; CHECK-SAME: (i16 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[X_NEG:%.*]] = sub i16 0, [[X]] -; CHECK-NEXT: [[XX:%.*]] = and i16 [[X_NEG]], [[X]] -; CHECK-NEXT: [[AND:%.*]] = and i16 [[XX]], [[Y]] +; CHECK-NEXT: [[XX:%.*]] = and i16 [[X]], [[X_NEG]] +; CHECK-NEXT: [[AND:%.*]] = and i16 [[Y]], [[XX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i16 [[AND]], [[XX]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -619,7 +619,7 @@ define i16 @i1_is_pow2_or_zero(i1 %x, i16 %y) { ; CHECK-LABEL: define i16 @i1_is_pow2_or_zero ; CHECK-SAME: (i1 [[X:%.*]], i16 [[Y:%.*]]) { ; CHECK-NEXT: [[XX:%.*]] = zext i1 [[X]] to i16 -; CHECK-NEXT: [[R:%.*]] = or i16 [[XX]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = or i16 [[Y]], [[XX]] ; CHECK-NEXT: ret i16 [[R]] ; %xx = zext i1 %x to i16 diff --git a/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll b/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll index 4ca7ed9eda7bb..fba907ab731b0 100644 --- a/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll +++ b/llvm/test/Analysis/ValueTracking/knownbits-and-or-xor-lowbit.ll @@ -93,7 +93,7 @@ define <2 x i1> @sub_XY_and_bit0_is_zero_fail(<2 x i8> %x, <2 x i8> %C) nounwind ; CHECK-LABEL: @sub_XY_and_bit0_is_zero_fail( ; CHECK-NEXT: [[C1:%.*]] = or <2 x i8> [[C:%.*]], ; CHECK-NEXT: [[Y:%.*]] = sub <2 x i8> [[X:%.*]], [[C1]] -; CHECK-NEXT: [[W:%.*]] = and <2 x i8> [[Y]], [[X]] +; CHECK-NEXT: [[W:%.*]] = and <2 x i8> [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[W]], ; CHECK-NEXT: ret <2 x i1> [[R]] ; @@ -108,7 +108,7 @@ define i1 @sub_XY_xor_bit0_is_one_fail(i8 %x, i8 %C) nounwind { ; CHECK-LABEL: @sub_XY_xor_bit0_is_one_fail( ; CHECK-NEXT: [[C1:%.*]] = xor i8 [[C:%.*]], 1 ; CHECK-NEXT: [[Y:%.*]] = sub i8 [[X:%.*]], [[C1]] -; CHECK-NEXT: [[W:%.*]] = xor i8 [[Y]], [[X]] +; CHECK-NEXT: [[W:%.*]] = xor i8 [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[W]], 10 ; CHECK-NEXT: ret i1 [[R]] ; @@ -122,7 +122,7 @@ define i1 @sub_XY_xor_bit0_is_one_fail(i8 %x, i8 %C) nounwind { define i1 @sub_XY_or_bit0_is_one_fail(i8 %x, i8 %C) nounwind { ; CHECK-LABEL: @sub_XY_or_bit0_is_one_fail( ; CHECK-NEXT: [[Y:%.*]] = sub i8 [[X:%.*]], [[C:%.*]] -; CHECK-NEXT: [[W:%.*]] = or i8 [[Y]], [[X]] +; CHECK-NEXT: [[W:%.*]] = or i8 [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[W]], 10 ; CHECK-NEXT: ret i1 [[R]] ; @@ -135,7 +135,7 @@ define i1 @sub_XY_or_bit0_is_one_fail(i8 %x, i8 %C) nounwind { define i1 @sub_YX_and_bit0_is_zero_fail(i8 %x, i8 %C) nounwind { ; CHECK-LABEL: @sub_YX_and_bit0_is_zero_fail( ; CHECK-NEXT: [[Y:%.*]] = sub i8 [[C:%.*]], [[X:%.*]] -; CHECK-NEXT: [[W:%.*]] = and i8 [[Y]], [[X]] +; CHECK-NEXT: [[W:%.*]] = and i8 [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[W]], -1 ; CHECK-NEXT: ret i1 [[R]] ; @@ -148,7 +148,7 @@ define i1 @sub_YX_and_bit0_is_zero_fail(i8 %x, i8 %C) nounwind { define <2 x i1> @sub_YX_xor_bit0_is_one_fail(<2 x i8> %x, <2 x i8> %C) nounwind { ; CHECK-LABEL: @sub_YX_xor_bit0_is_one_fail( ; CHECK-NEXT: [[TMP1:%.*]] = sub <2 x i8> [[X:%.*]], [[C:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i8> [[TMP1]], [[X]] +; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i8> [[X]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[TMP2]], ; CHECK-NEXT: ret <2 x i1> [[R]] ; @@ -163,7 +163,7 @@ define i1 @sub_YX_or_bit0_is_one_fail(i8 %x, i8 %C) nounwind { ; CHECK-LABEL: @sub_YX_or_bit0_is_one_fail( ; CHECK-NEXT: [[C1:%.*]] = xor i8 [[C:%.*]], 1 ; CHECK-NEXT: [[Y:%.*]] = sub i8 [[C1]], [[X:%.*]] -; CHECK-NEXT: [[W:%.*]] = or i8 [[Y]], [[X]] +; CHECK-NEXT: [[W:%.*]] = or i8 [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[W]], 32 ; CHECK-NEXT: ret i1 [[R]] ; @@ -178,7 +178,7 @@ define i1 @add_YX_xor_bit0_is_one_fail(i8 %x, i8 %C) nounwind { ; CHECK-LABEL: @add_YX_xor_bit0_is_one_fail( ; CHECK-NEXT: [[C1:%.*]] = and i8 [[C:%.*]], 1 ; CHECK-NEXT: [[Y:%.*]] = add i8 [[C1]], [[X:%.*]] -; CHECK-NEXT: [[W:%.*]] = xor i8 [[Y]], [[X]] +; CHECK-NEXT: [[W:%.*]] = xor i8 [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[W]], 32 ; CHECK-NEXT: ret i1 [[R]] ; @@ -193,7 +193,7 @@ define <2 x i1> @add_XY_or_bit0_is_one_fail(<2 x i8> %x, <2 x i8> %C) nounwind { ; CHECK-LABEL: @add_XY_or_bit0_is_one_fail( ; CHECK-NEXT: [[C1:%.*]] = add <2 x i8> [[C:%.*]], ; CHECK-NEXT: [[Y:%.*]] = add <2 x i8> [[C1]], [[X:%.*]] -; CHECK-NEXT: [[W:%.*]] = or <2 x i8> [[Y]], [[X]] +; CHECK-NEXT: [[W:%.*]] = or <2 x i8> [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[W]], ; CHECK-NEXT: ret <2 x i1> [[R]] ; diff --git a/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll b/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll index 793d6ffa3e34e..407100dec1201 100644 --- a/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll +++ b/llvm/test/Analysis/ValueTracking/knownbits-bmi-pattern.ll @@ -161,7 +161,7 @@ define i32 @blsmsk_and_eval2(i32 %x) { ; CHECK-LABEL: @blsmsk_and_eval2( ; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 10 ; CHECK-NEXT: [[X2:%.*]] = add i32 [[X1]], 63 -; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]] +; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X]], [[X2]] ; CHECK-NEXT: [[Z:%.*]] = and i32 [[X3]], 32 ; CHECK-NEXT: ret i32 [[Z]] ; @@ -337,7 +337,7 @@ define <2 x i1> @blsi_ge_is_false_vec(<2 x i32> %x) { ; CHECK-LABEL: @blsi_ge_is_false_vec( ; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]] -; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X2]], [[X]] +; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X]], [[X2]] ; CHECK-NEXT: [[Z:%.*]] = icmp ugt <2 x i32> [[X3]], ; CHECK-NEXT: ret <2 x i1> [[Z]] ; @@ -352,7 +352,7 @@ define <2 x i1> @blsi_ge_is_false_diff_vec(<2 x i32> %x) { ; CHECK-LABEL: @blsi_ge_is_false_diff_vec( ; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]] -; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X2]], [[X]] +; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X]], [[X2]] ; CHECK-NEXT: [[Z:%.*]] = icmp ugt <2 x i32> [[X3]], ; CHECK-NEXT: ret <2 x i1> [[Z]] ; @@ -445,7 +445,7 @@ define <2 x i32> @blsi_and_eval2_vec(<2 x i32> %x) { ; CHECK-LABEL: @blsi_and_eval2_vec( ; CHECK-NEXT: [[X1:%.*]] = or <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[X2:%.*]] = sub nsw <2 x i32> zeroinitializer, [[X1]] -; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X2]], [[X]] +; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X]], [[X2]] ; CHECK-NEXT: [[Z:%.*]] = and <2 x i32> [[X3]], ; CHECK-NEXT: ret <2 x i32> [[Z]] ; @@ -460,7 +460,7 @@ define i32 @blsi_and_eval3(i32 %x) { ; CHECK-LABEL: @blsi_and_eval3( ; CHECK-NEXT: [[X1:%.*]] = or i32 [[X:%.*]], 34 ; CHECK-NEXT: [[X2:%.*]] = sub nsw i32 0, [[X1]] -; CHECK-NEXT: [[X3:%.*]] = and i32 [[X2]], [[X]] +; CHECK-NEXT: [[X3:%.*]] = and i32 [[X]], [[X2]] ; CHECK-NEXT: [[Z:%.*]] = and i32 [[X3]], 208 ; CHECK-NEXT: ret i32 [[Z]] ; @@ -480,7 +480,7 @@ define <2 x i1> @blsi_eq_is_false_assume_vec(<2 x i32> %x) { ; CHECK-NEXT: [[CMP1:%.*]] = extractelement <2 x i1> [[CMP]], i64 1 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP1]]) ; CHECK-NEXT: [[X2:%.*]] = sub <2 x i32> zeroinitializer, [[X]] -; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X2]], [[X]] +; CHECK-NEXT: [[X3:%.*]] = and <2 x i32> [[X]], [[X2]] ; CHECK-NEXT: [[Z:%.*]] = icmp eq <2 x i32> [[X3]], ; CHECK-NEXT: ret <2 x i1> [[Z]] ; @@ -668,7 +668,7 @@ define i32 @blsmsk_xor_no_eval_assume(i32 %x) { ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LB]], 0 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[X2:%.*]] = add i32 [[X]], -1 -; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]] +; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X]], [[X2]] ; CHECK-NEXT: [[Z:%.*]] = xor i32 [[X3]], 32 ; CHECK-NEXT: ret i32 [[Z]] ; @@ -687,7 +687,7 @@ define i32 @blsmsk_xor_no_eval_assume2(i32 %x) { ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[LB]], 0 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: [[X2:%.*]] = add nsw i32 [[X]], -1 -; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X2]], [[X]] +; CHECK-NEXT: [[X3:%.*]] = xor i32 [[X]], [[X2]] ; CHECK-NEXT: [[Z:%.*]] = xor i32 [[X3]], 32 ; CHECK-NEXT: ret i32 [[Z]] ; diff --git a/llvm/test/Analysis/ValueTracking/phi-known-bits.ll b/llvm/test/Analysis/ValueTracking/phi-known-bits.ll index 3728e4177dd99..8691e63a4f3ee 100644 --- a/llvm/test/Analysis/ValueTracking/phi-known-bits.ll +++ b/llvm/test/Analysis/ValueTracking/phi-known-bits.ll @@ -401,7 +401,7 @@ define i8 @phi_ugt_high_bits_and_known_todo_high_depths(i8 %xx, i8 %y, i8 %z) { ; CHECK-LABEL: @phi_ugt_high_bits_and_known_todo_high_depths( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[YY:%.*]] = and i8 [[Y:%.*]], -2 -; CHECK-NEXT: [[XXX:%.*]] = and i8 [[YY]], [[XX:%.*]] +; CHECK-NEXT: [[XXX:%.*]] = and i8 [[XX:%.*]], [[YY]] ; CHECK-NEXT: [[ZZ:%.*]] = or i8 [[Z:%.*]], 1 ; CHECK-NEXT: [[X:%.*]] = add i8 [[XXX]], [[ZZ]] ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[X]], -65 diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pow-codegen.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pow-codegen.ll index ebbab5c2b9508..0025d23b10803 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pow-codegen.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pow-codegen.ll @@ -215,7 +215,7 @@ define half @test_powr_fast_f16(half %x, half %y) { ; CHECK: ; %bb.0: ; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) ; CHECK-NEXT: v_log_f16_e32 v0, v0 -; CHECK-NEXT: v_mul_f16_e32 v0, v0, v1 +; CHECK-NEXT: v_mul_f16_e32 v0, v1, v0 ; CHECK-NEXT: v_exp_f16_e32 v0, v0 ; CHECK-NEXT: s_setpc_b64 s[30:31] %powr = tail call fast half @_Z4powrDhDh(half %x, half %y) @@ -236,11 +236,11 @@ define float @test_powr_fast_f32(float %x, float %y) { ; CHECK-NEXT: v_cndmask_b32_e32 v2, 0, v2, vcc ; CHECK-NEXT: s_mov_b32 s4, 0xc2fc0000 ; CHECK-NEXT: v_sub_f32_e32 v0, v0, v2 -; CHECK-NEXT: v_mul_f32_e32 v2, v0, v1 +; CHECK-NEXT: v_mul_f32_e32 v2, v1, v0 ; CHECK-NEXT: v_mov_b32_e32 v3, 0x42800000 ; CHECK-NEXT: v_cmp_gt_f32_e32 vcc, s4, v2 ; CHECK-NEXT: v_cndmask_b32_e32 v2, 0, v3, vcc -; CHECK-NEXT: v_fma_f32 v0, v0, v1, v2 +; CHECK-NEXT: v_fma_f32 v0, v1, v0, v2 ; CHECK-NEXT: v_exp_f32_e32 v0, v0 ; CHECK-NEXT: v_mov_b32_e32 v1, 0x1f800000 ; CHECK-NEXT: v_cndmask_b32_e32 v1, 1.0, v1, vcc @@ -296,7 +296,7 @@ define double @test_powr_fast_f64(double %x, double %y) { ; CHECK-NEXT: s_mov_b64 s[38:39], s[8:9] ; CHECK-NEXT: s_waitcnt lgkmcnt(0) ; CHECK-NEXT: s_swappc_b64 s[30:31], s[16:17] -; CHECK-NEXT: v_mul_f64 v[0:1], v[0:1], v[40:41] +; CHECK-NEXT: v_mul_f64 v[0:1], v[40:41], v[0:1] ; CHECK-NEXT: s_getpc_b64 s[4:5] ; CHECK-NEXT: s_add_u32 s4, s4, _Z4exp2d@gotpcrel32@lo+4 ; CHECK-NEXT: s_addc_u32 s5, s5, _Z4exp2d@gotpcrel32@hi+12 diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pow.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pow.ll index 6b4b0f881f3be..acdab29e85b91 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pow.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pow.ll @@ -1074,7 +1074,7 @@ define float @test_pow_afn_f32_nnan_ninf_x_known_positive(float nofpclass(ninf n ; CHECK-LABEL: define float @test_pow_afn_f32_nnan_ninf_x_known_positive ; CHECK-SAME: (float nofpclass(ninf nsub nnorm) [[X:%.*]], float [[Y:%.*]]) { ; CHECK-NEXT: [[__LOG2:%.*]] = call nnan ninf afn float @llvm.log2.f32(float [[X]]) -; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn float [[__LOG2]], [[Y]] +; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn float [[Y]], [[__LOG2]] ; CHECK-NEXT: [[__EXP2:%.*]] = call nnan ninf afn float @llvm.exp2.f32(float [[__YLOGX]]) ; CHECK-NEXT: ret float [[__EXP2]] ; @@ -1096,7 +1096,7 @@ define <2 x float> @test_pow_afn_v2f32_nnan_ninf_x_known_positive(<2 x float> no ; CHECK-LABEL: define <2 x float> @test_pow_afn_v2f32_nnan_ninf_x_known_positive ; CHECK-SAME: (<2 x float> nofpclass(ninf nsub nnorm) [[X:%.*]], <2 x float> [[Y:%.*]]) { ; CHECK-NEXT: [[__LOG2:%.*]] = call nnan ninf afn <2 x float> @llvm.log2.v2f32(<2 x float> [[X]]) -; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn <2 x float> [[__LOG2]], [[Y]] +; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn <2 x float> [[Y]], [[__LOG2]] ; CHECK-NEXT: [[__EXP2:%.*]] = call nnan ninf afn <2 x float> @llvm.exp2.v2f32(<2 x float> [[__YLOGX]]) ; CHECK-NEXT: ret <2 x float> [[__EXP2]] ; @@ -1158,7 +1158,7 @@ define double @test_pow_afn_f64_nnan_ninf_x_known_positive(double nofpclass(ninf ; CHECK-LABEL: define double @test_pow_afn_f64_nnan_ninf_x_known_positive ; CHECK-SAME: (double nofpclass(ninf nsub nnorm) [[X:%.*]], double [[Y:%.*]]) { ; CHECK-NEXT: [[__LOG2:%.*]] = call nnan ninf afn double @_Z4log2d(double [[X]]) -; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn double [[__LOG2]], [[Y]] +; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn double [[Y]], [[__LOG2]] ; CHECK-NEXT: [[__EXP2:%.*]] = call nnan ninf afn double @_Z4exp2d(double [[__YLOGX]]) ; CHECK-NEXT: ret double [[__EXP2]] ; @@ -1180,7 +1180,7 @@ define <2 x double> @test_pow_afn_v2f64_nnan_ninf_x_known_positive(<2 x double> ; CHECK-LABEL: define <2 x double> @test_pow_afn_v2f64_nnan_ninf_x_known_positive ; CHECK-SAME: (<2 x double> nofpclass(ninf nsub nnorm) [[X:%.*]], <2 x double> [[Y:%.*]]) { ; CHECK-NEXT: [[__LOG2:%.*]] = call nnan ninf afn <2 x double> @_Z4log2Dv2_d(<2 x double> [[X]]) -; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn <2 x double> [[__LOG2]], [[Y]] +; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn <2 x double> [[Y]], [[__LOG2]] ; CHECK-NEXT: [[__EXP2:%.*]] = call nnan ninf afn <2 x double> @_Z4exp2Dv2_d(<2 x double> [[__YLOGX]]) ; CHECK-NEXT: ret <2 x double> [[__EXP2]] ; @@ -1242,7 +1242,7 @@ define half @test_pow_afn_f16_nnan_ninf_x_known_positive(half nofpclass(ninf nno ; CHECK-LABEL: define half @test_pow_afn_f16_nnan_ninf_x_known_positive ; CHECK-SAME: (half nofpclass(ninf nsub nnorm) [[X:%.*]], half [[Y:%.*]]) { ; CHECK-NEXT: [[__LOG2:%.*]] = call nnan ninf afn half @llvm.log2.f16(half [[X]]) -; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn half [[__LOG2]], [[Y]] +; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn half [[Y]], [[__LOG2]] ; CHECK-NEXT: [[__EXP2:%.*]] = call nnan ninf afn half @llvm.exp2.f16(half [[__YLOGX]]) ; CHECK-NEXT: ret half [[__EXP2]] ; @@ -1264,7 +1264,7 @@ define <2 x half> @test_pow_afn_v2f16_nnan_ninf_x_known_positive(<2 x half> nofp ; CHECK-LABEL: define <2 x half> @test_pow_afn_v2f16_nnan_ninf_x_known_positive ; CHECK-SAME: (<2 x half> nofpclass(ninf nsub nnorm) [[X:%.*]], <2 x half> [[Y:%.*]]) { ; CHECK-NEXT: [[__LOG2:%.*]] = call nnan ninf afn <2 x half> @llvm.log2.v2f16(<2 x half> [[X]]) -; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn <2 x half> [[__LOG2]], [[Y]] +; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn <2 x half> [[Y]], [[__LOG2]] ; CHECK-NEXT: [[__EXP2:%.*]] = call nnan ninf afn <2 x half> @llvm.exp2.v2f16(<2 x half> [[__YLOGX]]) ; CHECK-NEXT: ret <2 x half> [[__EXP2]] ; @@ -1684,7 +1684,7 @@ define float @test_pow_afn_f32_nnan_ninf__y_3(float %x) { ; CHECK-LABEL: define float @test_pow_afn_f32_nnan_ninf__y_3 ; CHECK-SAME: (float [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX2]] ; CHECK-NEXT: ret float [[__POWPROD]] ; %pow = tail call afn nnan ninf float @_Z3powff(float %x, float 3.0) @@ -1737,7 +1737,7 @@ define float @test_pow_afn_f32_nnan_ninf__y_5(float %x) { ; CHECK-SAME: (float [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX21]] ; CHECK-NEXT: ret float [[__POWPROD]] ; %pow = tail call afn nnan ninf float @_Z3powff(float %x, float 5.0) @@ -1759,7 +1759,7 @@ define float @test_pow_afn_f32_nnan_ninf__y_neg5(float %x) { ; CHECK-SAME: (float [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX21]] ; CHECK-NEXT: [[__1POWPROD:%.*]] = fdiv nnan ninf afn float 1.000000e+00, [[__POWPROD]] ; CHECK-NEXT: ret float [[__1POWPROD]] ; @@ -1793,7 +1793,7 @@ define <2 x float> @test_pow_afn_v2f32_nnan_ninf__y_3(<2 x float> %x) { ; CHECK-LABEL: define <2 x float> @test_pow_afn_v2f32_nnan_ninf__y_3 ; CHECK-SAME: (<2 x float> [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn <2 x float> [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x float> [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x float> [[X]], [[__POWX2]] ; CHECK-NEXT: ret <2 x float> [[__POWPROD]] ; %pow = tail call afn nnan ninf <2 x float> @_Z3powDv2_fS_(<2 x float> %x, <2 x float> ) @@ -1836,7 +1836,7 @@ define <2 x float> @test_pow_afn_v2f32_nnan_ninf__y_5(<2 x float> %x) { ; CHECK-SAME: (<2 x float> [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn <2 x float> [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn <2 x float> [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x float> [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x float> [[X]], [[__POWX21]] ; CHECK-NEXT: ret <2 x float> [[__POWPROD]] ; %pow = tail call afn nnan ninf <2 x float> @_Z3powDv2_fS_(<2 x float> %x, <2 x float> ) @@ -1848,7 +1848,7 @@ define float @test_pow_afn_f32_nnan_ninf__y_5_known_positive(float nofpclass(nin ; CHECK-SAME: (float nofpclass(ninf nsub nnorm) [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX21]] ; CHECK-NEXT: ret float [[__POWPROD]] ; %pow = tail call afn nnan ninf float @_Z3powff(float %x, float 5.0) @@ -1861,7 +1861,7 @@ define float @test_pow_afn_f32_nnan_ninf__y_5_known_positive_with_ninf_flag(floa ; CHECK-SAME: (float nofpclass(nsub nnorm) [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX21]] ; CHECK-NEXT: ret float [[__POWPROD]] ; %pow = tail call afn nnan ninf float @_Z3powff(float %x, float 5.0) @@ -1882,7 +1882,7 @@ define double @test_pow_afn_f64_nnan_ninf__y_3(double %x) { ; CHECK-LABEL: define double @test_pow_afn_f64_nnan_ninf__y_3 ; CHECK-SAME: (double [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn double [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn double [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn double [[X]], [[__POWX2]] ; CHECK-NEXT: ret double [[__POWPROD]] ; %pow = tail call afn nnan ninf double @_Z3powdd(double %x, double 3.0) @@ -1935,7 +1935,7 @@ define double @test_pow_afn_f64_nnan_ninf__y_5(double %x) { ; CHECK-SAME: (double [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn double [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn double [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn double [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn double [[X]], [[__POWX21]] ; CHECK-NEXT: ret double [[__POWPROD]] ; %pow = tail call afn nnan ninf double @_Z3powdd(double %x, double 5.0) @@ -1957,7 +1957,7 @@ define double @test_pow_afn_f64_nnan_ninf__y_neg5(double %x) { ; CHECK-SAME: (double [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn double [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn double [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn double [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn double [[X]], [[__POWX21]] ; CHECK-NEXT: [[__1POWPROD:%.*]] = fdiv nnan ninf afn double 1.000000e+00, [[__POWPROD]] ; CHECK-NEXT: ret double [[__1POWPROD]] ; @@ -1982,7 +1982,7 @@ define <2 x double> @test_pow_afn_v2f64_nnan_ninf__y_3(<2 x double> %x) { ; CHECK-LABEL: define <2 x double> @test_pow_afn_v2f64_nnan_ninf__y_3 ; CHECK-SAME: (<2 x double> [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn <2 x double> [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x double> [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x double> [[X]], [[__POWX2]] ; CHECK-NEXT: ret <2 x double> [[__POWPROD]] ; %pow = tail call afn nnan ninf <2 x double> @_Z3powDv2_dS_(<2 x double> %x, <2 x double> ) @@ -2015,7 +2015,7 @@ define <2 x double> @test_pow_afn_v2f64_nnan_ninf__y_5(<2 x double> %x) { ; CHECK-SAME: (<2 x double> [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn <2 x double> [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn <2 x double> [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x double> [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x double> [[X]], [[__POWX21]] ; CHECK-NEXT: ret <2 x double> [[__POWPROD]] ; %pow = tail call afn nnan ninf <2 x double> @_Z3powDv2_dS_(<2 x double> %x, <2 x double> ) @@ -2036,7 +2036,7 @@ define half @test_pow_afn_f16_nnan_ninf__y_3(half %x) { ; CHECK-LABEL: define half @test_pow_afn_f16_nnan_ninf__y_3 ; CHECK-SAME: (half [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn half [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn half [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn half [[X]], [[__POWX2]] ; CHECK-NEXT: ret half [[__POWPROD]] ; %pow = tail call afn nnan ninf half @_Z3powDhDh(half %x, half 3.0) @@ -2089,7 +2089,7 @@ define half @test_pow_afn_f16_nnan_ninf__y_5(half %x) { ; CHECK-SAME: (half [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn half [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn half [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn half [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn half [[X]], [[__POWX21]] ; CHECK-NEXT: ret half [[__POWPROD]] ; %pow = tail call afn nnan ninf half @_Z3powDhDh(half %x, half 5.0) @@ -2111,7 +2111,7 @@ define half @test_pow_afn_f16_nnan_ninf__y_neg5(half %x) { ; CHECK-SAME: (half [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn half [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn half [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn half [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn half [[X]], [[__POWX21]] ; CHECK-NEXT: [[__1POWPROD:%.*]] = fdiv nnan ninf afn half 0xH3C00, [[__POWPROD]] ; CHECK-NEXT: ret half [[__1POWPROD]] ; @@ -2136,7 +2136,7 @@ define <2 x half> @test_pow_afn_v2f16_nnan_ninf__y_3(<2 x half> %x) { ; CHECK-LABEL: define <2 x half> @test_pow_afn_v2f16_nnan_ninf__y_3 ; CHECK-SAME: (<2 x half> [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn <2 x half> [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x half> [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x half> [[X]], [[__POWX2]] ; CHECK-NEXT: ret <2 x half> [[__POWPROD]] ; %pow = tail call afn nnan ninf <2 x half> @_Z3powDv2_DhS_(<2 x half> %x, <2 x half> ) @@ -2169,7 +2169,7 @@ define <2 x half> @test_pow_afn_v2f16_nnan_ninf__y_5(<2 x half> %x) { ; CHECK-SAME: (<2 x half> [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn <2 x half> [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn <2 x half> [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x half> [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x half> [[X]], [[__POWX21]] ; CHECK-NEXT: ret <2 x half> [[__POWPROD]] ; %pow = tail call afn nnan ninf <2 x half> @_Z3powDv2_DhS_(<2 x half> %x, <2 x half> ) diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll index 77db224af2890..bd4b86f038766 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-pown.ll @@ -850,7 +850,7 @@ define float @test_pown_afn_nnan_ninf_f32__y_3(float %x) { ; CHECK-LABEL: define float @test_pown_afn_nnan_ninf_f32__y_3 ; CHECK-SAME: (float [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX2]] ; CHECK-NEXT: ret float [[__POWPROD]] ; %call = tail call nnan ninf afn float @_Z4pownfi(float %x, i32 3) @@ -861,7 +861,7 @@ define float @test_pown_afn_nnan_ninf_f32__y_neg3(float %x) { ; CHECK-LABEL: define float @test_pown_afn_nnan_ninf_f32__y_neg3 ; CHECK-SAME: (float [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX2]] ; CHECK-NEXT: [[__1POWPROD:%.*]] = fdiv nnan ninf afn float 1.000000e+00, [[__POWPROD]] ; CHECK-NEXT: ret float [[__1POWPROD]] ; @@ -897,7 +897,7 @@ define float @test_pown_afn_nnan_ninf_f32__y_5(float %x) { ; CHECK-SAME: (float [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX21]] ; CHECK-NEXT: ret float [[__POWPROD]] ; %call = tail call nnan ninf afn float @_Z4pownfi(float %x, i32 5) @@ -909,7 +909,7 @@ define float @test_pown_afn_nnan_ninf_f32__y_neg5(float %x) { ; CHECK-SAME: (float [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX21]] ; CHECK-NEXT: [[__1POWPROD:%.*]] = fdiv nnan ninf afn float 1.000000e+00, [[__POWPROD]] ; CHECK-NEXT: ret float [[__1POWPROD]] ; @@ -921,7 +921,7 @@ define float @test_pown_afn_nnan_ninf_f32__y_7(float %x) { ; CHECK-LABEL: define float @test_pown_afn_nnan_ninf_f32__y_7 ; CHECK-SAME: (float [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX2]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[__POWX2]] ; CHECK-NEXT: [[__POWPROD2:%.*]] = fmul nnan ninf afn float [[__POWPROD]], [[__POWX21]] ; CHECK-NEXT: ret float [[__POWPROD2]] @@ -934,7 +934,7 @@ define float @test_pown_afn_nnan_ninf_f32__y_neg7(float %x) { ; CHECK-LABEL: define float @test_pown_afn_nnan_ninf_f32__y_neg7 ; CHECK-SAME: (float [[X:%.*]]) { ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn float [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn float [[X]], [[__POWX2]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn float [[__POWX2]], [[__POWX2]] ; CHECK-NEXT: [[__POWPROD2:%.*]] = fmul nnan ninf afn float [[__POWPROD]], [[__POWX21]] ; CHECK-NEXT: [[__1POWPROD:%.*]] = fdiv nnan ninf afn float 1.000000e+00, [[__POWPROD2]] @@ -974,7 +974,7 @@ define <2 x float> @test_pown_afn_nnan_ninf_v2f32__y_3(<2 x float> %x) { ; CHECK-SAME: (<2 x float> [[X:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn <2 x float> [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x float> [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x float> [[X]], [[__POWX2]] ; CHECK-NEXT: ret <2 x float> [[__POWPROD]] ; entry: @@ -1000,7 +1000,7 @@ define <2 x float> @test_pown_afn_nnan_ninf_v2f32__y_neg3(<2 x float> %x) { ; CHECK-SAME: (<2 x float> [[X:%.*]]) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn <2 x float> [[X]], [[X]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x float> [[__POWX2]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x float> [[X]], [[__POWX2]] ; CHECK-NEXT: [[__1POWPROD:%.*]] = fdiv nnan ninf afn <2 x float> , [[__POWPROD]] ; CHECK-NEXT: ret <2 x float> [[__1POWPROD]] ; @@ -1029,7 +1029,7 @@ define <2 x float> @test_pown_afn_nnan_ninf_v2f32__y_5(<2 x float> %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[__POWX2:%.*]] = fmul nnan ninf afn <2 x float> [[X]], [[X]] ; CHECK-NEXT: [[__POWX21:%.*]] = fmul nnan ninf afn <2 x float> [[__POWX2]], [[__POWX2]] -; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x float> [[__POWX21]], [[X]] +; CHECK-NEXT: [[__POWPROD:%.*]] = fmul nnan ninf afn <2 x float> [[X]], [[__POWX21]] ; CHECK-NEXT: ret <2 x float> [[__POWPROD]] ; entry: diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-powr.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-powr.ll index dc4cf1d067ef1..1a92ca8960a77 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-powr.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-powr.ll @@ -26,7 +26,7 @@ define float @test_powr_fast_f32(float %x, float %y) { ; CHECK-LABEL: define float @test_powr_fast_f32 ; CHECK-SAME: (float [[X:%.*]], float [[Y:%.*]]) { ; CHECK-NEXT: [[__LOG2:%.*]] = call fast float @llvm.log2.f32(float [[X]]) -; CHECK-NEXT: [[__YLOGX:%.*]] = fmul fast float [[__LOG2]], [[Y]] +; CHECK-NEXT: [[__YLOGX:%.*]] = fmul fast float [[Y]], [[__LOG2]] ; CHECK-NEXT: [[__EXP2:%.*]] = call fast float @llvm.exp2.f32(float [[__YLOGX]]) ; CHECK-NEXT: ret float [[__EXP2]] ; @@ -38,7 +38,7 @@ define <2 x float> @test_powr_fast_v2f32(<2 x float> %x, <2 x float> %y) { ; CHECK-LABEL: define <2 x float> @test_powr_fast_v2f32 ; CHECK-SAME: (<2 x float> [[X:%.*]], <2 x float> [[Y:%.*]]) { ; CHECK-NEXT: [[__LOG2:%.*]] = call fast <2 x float> @llvm.log2.v2f32(<2 x float> [[X]]) -; CHECK-NEXT: [[__YLOGX:%.*]] = fmul fast <2 x float> [[__LOG2]], [[Y]] +; CHECK-NEXT: [[__YLOGX:%.*]] = fmul fast <2 x float> [[Y]], [[__LOG2]] ; CHECK-NEXT: [[__EXP2:%.*]] = call fast <2 x float> @llvm.exp2.v2f32(<2 x float> [[__YLOGX]]) ; CHECK-NEXT: ret <2 x float> [[__EXP2]] ; @@ -1011,7 +1011,7 @@ define float @test_powr_afn_f32_nnan_ninf_x_known_positive(float nofpclass(ninf ; CHECK-LABEL: define float @test_powr_afn_f32_nnan_ninf_x_known_positive ; CHECK-SAME: (float nofpclass(ninf nsub nnorm) [[X:%.*]], float [[Y:%.*]]) { ; CHECK-NEXT: [[__LOG2:%.*]] = call nnan ninf afn float @llvm.log2.f32(float [[X]]) -; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn float [[__LOG2]], [[Y]] +; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn float [[Y]], [[__LOG2]] ; CHECK-NEXT: [[__EXP2:%.*]] = call nnan ninf afn float @llvm.exp2.f32(float [[__YLOGX]]) ; CHECK-NEXT: ret float [[__EXP2]] ; @@ -1033,7 +1033,7 @@ define <2 x float> @test_powr_afn_v2f32_nnan_ninf_x_known_positive(<2 x float> n ; CHECK-LABEL: define <2 x float> @test_powr_afn_v2f32_nnan_ninf_x_known_positive ; CHECK-SAME: (<2 x float> nofpclass(ninf nsub nnorm) [[X:%.*]], <2 x float> [[Y:%.*]]) { ; CHECK-NEXT: [[__LOG2:%.*]] = call nnan ninf afn <2 x float> @llvm.log2.v2f32(<2 x float> [[X]]) -; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn <2 x float> [[__LOG2]], [[Y]] +; CHECK-NEXT: [[__YLOGX:%.*]] = fmul nnan ninf afn <2 x float> [[Y]], [[__LOG2]] ; CHECK-NEXT: [[__EXP2:%.*]] = call nnan ninf afn <2 x float> @llvm.exp2.v2f32(<2 x float> [[__YLOGX]]) ; CHECK-NEXT: ret <2 x float> [[__EXP2]] ; diff --git a/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-value.ll b/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-value.ll index 653970cc34022..1956f454a52bb 100644 --- a/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-value.ll +++ b/llvm/test/Transforms/IndVarSimplify/rewrite-loop-exit-value.ll @@ -165,8 +165,8 @@ define i16 @pr57336(i16 %end, i16 %m) mustprogress { ; CHECK: for.body: ; CHECK-NEXT: [[INC8:%.*]] = phi i16 [ [[INC:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ] ; CHECK-NEXT: [[INC]] = add nuw nsw i16 [[INC8]], 1 -; CHECK-NEXT: [[MUL:%.*]] = mul nsw i16 [[INC8]], [[M:%.*]] -; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i16 [[MUL]], [[END:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = mul nsw i16 [[M:%.*]], [[INC8]] +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp slt i16 [[END:%.*]], [[MUL]] ; CHECK-NEXT: br i1 [[CMP_NOT]], label [[CRIT_EDGE:%.*]], label [[FOR_BODY]] ; CHECK: crit_edge: ; CHECK-NEXT: [[TMP0:%.*]] = add i16 [[END]], 1 @@ -254,7 +254,7 @@ define i32 @vscale_slt_with_vp_umin2(ptr nocapture %A, i32 %n) mustprogress vsca ; CHECK-NEXT: entry: ; CHECK-NEXT: [[VSCALE:%.*]] = call i32 @llvm.vscale.i32() ; CHECK-NEXT: [[VF:%.*]] = shl nuw nsw i32 [[VSCALE]], 2 -; CHECK-NEXT: [[CMP4:%.*]] = icmp slt i32 [[VF]], [[N:%.*]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[N:%.*]], [[VF]] ; CHECK-NEXT: br i1 [[CMP4]], label [[FOR_BODY_PREHEADER:%.*]], label [[EARLY_EXIT:%.*]] ; CHECK: for.body.preheader: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] diff --git a/llvm/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll b/llvm/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll index 0f50ea9dfe316..7689fc9f68241 100644 --- a/llvm/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll +++ b/llvm/test/Transforms/InstCombine/2004-11-27-SetCCForCastLargerAndConstant.ll @@ -272,7 +272,7 @@ define i1 @gt_unsigned_to_small_negative(i8 %SB) { define i1 @different_size_zext_zext_ugt(i7 %x, i4 %y) { ; CHECK-LABEL: @different_size_zext_zext_ugt( ; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp ult i7 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i7 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zx = zext i7 %x to i25 @@ -284,7 +284,7 @@ define i1 @different_size_zext_zext_ugt(i7 %x, i4 %y) { define <2 x i1> @different_size_zext_zext_ugt_commute(<2 x i4> %x, <2 x i7> %y) { ; CHECK-LABEL: @different_size_zext_zext_ugt_commute( ; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i4> [[X:%.*]] to <2 x i7> -; CHECK-NEXT: [[R:%.*]] = icmp ugt <2 x i7> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i7> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; %zx = zext <2 x i4> %x to <2 x i25> @@ -296,7 +296,7 @@ define <2 x i1> @different_size_zext_zext_ugt_commute(<2 x i4> %x, <2 x i7> %y) define i1 @different_size_zext_zext_ult(i4 %x, i7 %y) { ; CHECK-LABEL: @different_size_zext_zext_ult( ; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[X:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp ult i7 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i7 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zx = zext i4 %x to i25 @@ -308,7 +308,7 @@ define i1 @different_size_zext_zext_ult(i4 %x, i7 %y) { define i1 @different_size_zext_zext_eq(i4 %x, i7 %y) { ; CHECK-LABEL: @different_size_zext_zext_eq( ; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[X:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp eq i7 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i7 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zx = zext i4 %x to i25 @@ -320,7 +320,7 @@ define i1 @different_size_zext_zext_eq(i4 %x, i7 %y) { define i1 @different_size_zext_zext_ne_commute(i7 %x, i4 %y) { ; CHECK-LABEL: @different_size_zext_zext_ne_commute( ; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp ne i7 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i7 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zx = zext i7 %x to i25 @@ -332,7 +332,7 @@ define i1 @different_size_zext_zext_ne_commute(i7 %x, i4 %y) { define i1 @different_size_zext_zext_slt(i7 %x, i4 %y) { ; CHECK-LABEL: @different_size_zext_zext_slt( ; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp ugt i7 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i7 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zx = zext i7 %x to i25 @@ -344,7 +344,7 @@ define i1 @different_size_zext_zext_slt(i7 %x, i4 %y) { define i1 @different_size_zext_zext_sgt(i7 %x, i4 %y) { ; CHECK-LABEL: @different_size_zext_zext_sgt( ; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp ult i7 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i7 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zx = zext i7 %x to i25 @@ -356,7 +356,7 @@ define i1 @different_size_zext_zext_sgt(i7 %x, i4 %y) { define i1 @different_size_sext_sext_sgt(i7 %x, i4 %y) { ; CHECK-LABEL: @different_size_sext_sext_sgt( ; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp slt i7 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sgt i7 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %sx = sext i7 %x to i25 @@ -368,7 +368,7 @@ define i1 @different_size_sext_sext_sgt(i7 %x, i4 %y) { define i1 @different_size_sext_sext_sle(i7 %x, i4 %y) { ; CHECK-LABEL: @different_size_sext_sext_sle( ; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp sge i7 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sle i7 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %sx = sext i7 %x to i25 @@ -380,7 +380,7 @@ define i1 @different_size_sext_sext_sle(i7 %x, i4 %y) { define i1 @different_size_sext_sext_eq(i7 %x, i4 %y) { ; CHECK-LABEL: @different_size_sext_sext_eq( ; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp eq i7 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i7 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %sx = sext i7 %x to i25 @@ -392,7 +392,7 @@ define i1 @different_size_sext_sext_eq(i7 %x, i4 %y) { define i1 @different_size_sext_sext_ule(i7 %x, i4 %y) { ; CHECK-LABEL: @different_size_sext_sext_ule( ; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp uge i7 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i7 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %sx = sext i7 %x to i25 @@ -423,7 +423,7 @@ define i1 @different_size_sext_sext_ule_extra_use1(i7 %x, i4 %y) { ; CHECK-NEXT: [[SY:%.*]] = sext i4 [[Y:%.*]] to i25 ; CHECK-NEXT: call void @use(i25 [[SY]]) ; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp uge i7 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i7 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %sx = sext i7 %x to i25 @@ -438,7 +438,7 @@ define i1 @different_size_sext_sext_ule_extra_use2(i7 %x, i4 %y) { ; CHECK-NEXT: [[SX:%.*]] = sext i7 [[X:%.*]] to i25 ; CHECK-NEXT: call void @use(i25 [[SX]]) ; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y:%.*]] to i7 -; CHECK-NEXT: [[R:%.*]] = icmp uge i7 [[TMP1]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i7 [[X]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %sx = sext i7 %x to i25 diff --git a/llvm/test/Transforms/InstCombine/2010-11-23-Distributed.ll b/llvm/test/Transforms/InstCombine/2010-11-23-Distributed.ll index 70fd7274f35d4..45564cd9d95f3 100644 --- a/llvm/test/Transforms/InstCombine/2010-11-23-Distributed.ll +++ b/llvm/test/Transforms/InstCombine/2010-11-23-Distributed.ll @@ -16,7 +16,7 @@ define i32 @foo(i32 %x, i32 %y) { define i1 @bar(i64 %x, i64 %y) { ; CHECK-LABEL: @bar( ; CHECK-NEXT: [[Y1:%.*]] = xor i64 [[X:%.*]], -1 -; CHECK-NEXT: [[B:%.*]] = and i64 [[Y1]], [[Y:%.*]] +; CHECK-NEXT: [[B:%.*]] = and i64 [[Y:%.*]], [[Y1]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i64 [[B]], 0 ; CHECK-NEXT: ret i1 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/abs-1.ll b/llvm/test/Transforms/InstCombine/abs-1.ll index 0cf7cd97d8ff4..63287e59f6634 100644 --- a/llvm/test/Transforms/InstCombine/abs-1.ll +++ b/llvm/test/Transforms/InstCombine/abs-1.ll @@ -306,7 +306,7 @@ define i32 @nabs_canonical_9(i32 %a, i32 %b) { ; CHECK-LABEL: @nabs_canonical_9( ; CHECK-NEXT: [[T1:%.*]] = sub i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.abs.i32(i32 [[T1]], i1 false) -; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[A]] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[A]], [[TMP1]] ; CHECK-NEXT: [[ADD:%.*]] = sub i32 [[B]], [[TMP2]] ; CHECK-NEXT: ret i32 [[ADD]] ; @@ -417,7 +417,7 @@ declare void @extra_use_i1(i1) define i8 @shifty_abs_too_many_uses(i8 %x) { ; CHECK-LABEL: @shifty_abs_too_many_uses( ; CHECK-NEXT: [[SIGNBIT:%.*]] = ashr i8 [[X:%.*]], 7 -; CHECK-NEXT: [[ADD:%.*]] = add i8 [[SIGNBIT]], [[X]] +; CHECK-NEXT: [[ADD:%.*]] = add i8 [[X]], [[SIGNBIT]] ; CHECK-NEXT: [[ABS:%.*]] = xor i8 [[ADD]], [[SIGNBIT]] ; CHECK-NEXT: call void @extra_use(i8 [[SIGNBIT]]) ; CHECK-NEXT: ret i8 [[ABS]] diff --git a/llvm/test/Transforms/InstCombine/add-mask-neg.ll b/llvm/test/Transforms/InstCombine/add-mask-neg.ll index 0e579f3097607..b72f051a0b799 100644 --- a/llvm/test/Transforms/InstCombine/add-mask-neg.ll +++ b/llvm/test/Transforms/InstCombine/add-mask-neg.ll @@ -49,7 +49,7 @@ define i32 @dec_commute_mask_neg_i32(i32 %X) { define i32 @dec_mask_neg_multiuse_i32(i32 %X) { ; CHECK-LABEL: @dec_mask_neg_multiuse_i32( ; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = and i32 [[NEG]], [[X]] +; CHECK-NEXT: [[MASK:%.*]] = and i32 [[X]], [[NEG]] ; CHECK-NEXT: [[DEC:%.*]] = add i32 [[MASK]], -1 ; CHECK-NEXT: call void @use(i32 [[NEG]]) ; CHECK-NEXT: ret i32 [[DEC]] @@ -64,7 +64,7 @@ define i32 @dec_mask_neg_multiuse_i32(i32 %X) { define i32 @dec_mask_multiuse_neg_i32(i32 %X) { ; CHECK-LABEL: @dec_mask_multiuse_neg_i32( ; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = and i32 [[NEG]], [[X]] +; CHECK-NEXT: [[MASK:%.*]] = and i32 [[X]], [[NEG]] ; CHECK-NEXT: [[DEC:%.*]] = add i32 [[MASK]], -1 ; CHECK-NEXT: call void @use(i32 [[MASK]]) ; CHECK-NEXT: ret i32 [[DEC]] @@ -105,7 +105,7 @@ define <2 x i32> @dec_mask_neg_v2i32_poison(<2 x i32> %X) { define <2 x i32> @dec_mask_multiuse_neg_multiuse_v2i32(<2 x i32> %X) { ; CHECK-LABEL: @dec_mask_multiuse_neg_multiuse_v2i32( ; CHECK-NEXT: [[NEG:%.*]] = sub <2 x i32> zeroinitializer, [[X:%.*]] -; CHECK-NEXT: [[MASK:%.*]] = and <2 x i32> [[NEG]], [[X]] +; CHECK-NEXT: [[MASK:%.*]] = and <2 x i32> [[X]], [[NEG]] ; CHECK-NEXT: [[DEC:%.*]] = add <2 x i32> [[MASK]], ; CHECK-NEXT: call void @usev(<2 x i32> [[NEG]]) ; CHECK-NEXT: call void @usev(<2 x i32> [[MASK]]) diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll index b1f21e58de1e2..36da56d8441bf 100644 --- a/llvm/test/Transforms/InstCombine/add.ll +++ b/llvm/test/Transforms/InstCombine/add.ll @@ -260,7 +260,7 @@ define i32 @test9(i32 %A) { define i1 @test10(i8 %a, i8 %b) { ; CHECK-LABEL: @test10( ; CHECK-NEXT: [[ADD:%.*]] = sub i8 0, [[B:%.*]] -; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[ADD]], [[A:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[A:%.*]], [[ADD]] ; CHECK-NEXT: ret i1 [[C]] ; %add = add i8 %a, %b @@ -271,7 +271,7 @@ define i1 @test10(i8 %a, i8 %b) { define <2 x i1> @test10vec(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: @test10vec( ; CHECK-NEXT: [[C:%.*]] = sub <2 x i8> zeroinitializer, [[B:%.*]] -; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> [[C]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = icmp ne <2 x i8> [[A:%.*]], [[C]] ; CHECK-NEXT: ret <2 x i1> [[D]] ; %c = add <2 x i8> %a, %b @@ -302,7 +302,7 @@ define <2 x i1> @test11vec(<2 x i8> %a) { define i8 @reassoc_shl1(i8 %x, i8 %y) { ; CHECK-LABEL: @reassoc_shl1( ; CHECK-NEXT: [[REASS_ADD:%.*]] = shl i8 [[X:%.*]], 1 -; CHECK-NEXT: [[R:%.*]] = add i8 [[REASS_ADD]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = add i8 [[Y:%.*]], [[REASS_ADD]] ; CHECK-NEXT: ret i8 [[R]] ; %a = add i8 %y, %x @@ -313,7 +313,7 @@ define i8 @reassoc_shl1(i8 %x, i8 %y) { define <2 x i8> @reassoc_shl1_commute1(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @reassoc_shl1_commute1( ; CHECK-NEXT: [[REASS_ADD:%.*]] = shl <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[REASS_ADD]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[Y:%.*]], [[REASS_ADD]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %a = add <2 x i8> %x, %y @@ -1274,7 +1274,7 @@ define <2 x i32> @test44_vec_non_splat(<2 x i32> %A) { define i32 @lshr_add(i1 %x, i1 %y) { ; CHECK-LABEL: @lshr_add( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X:%.*]], true -; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = zext i1 [[TMP2]] to i32 ; CHECK-NEXT: ret i32 [[R]] ; @@ -1288,7 +1288,7 @@ define i32 @lshr_add(i1 %x, i1 %y) { define i5 @and_add(i1 %x, i1 %y) { ; CHECK-LABEL: @and_add( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[X:%.*]], true -; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP2]], i5 -2, i5 0 ; CHECK-NEXT: ret i5 [[R]] ; @@ -1302,7 +1302,7 @@ define i5 @and_add(i1 %x, i1 %y) { define <2 x i8> @ashr_add_commute(<2 x i1> %x, <2 x i1> %y) { ; CHECK-LABEL: @ashr_add_commute( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[X:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i1> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i1> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = sext <2 x i1> [[TMP2]] to <2 x i8> ; CHECK-NEXT: ret <2 x i8> [[TMP3]] ; @@ -1656,7 +1656,7 @@ define i8 @add_and_xor_wrong_const(i8 %x, i8 %y) { define i8 @add_and_xor_wrong_op(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @add_and_xor_wrong_op( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[Z:%.*]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[XOR]], [[Y:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y:%.*]], [[XOR]] ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[AND]], [[X:%.*]] ; CHECK-NEXT: ret i8 [[ADD]] ; @@ -1711,7 +1711,7 @@ define i8 @add_and_xor_extra_use(i8 noundef %x, i8 %y) { ; CHECK-LABEL: @add_and_xor_extra_use( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: call void @use(i8 [[XOR]]) -; CHECK-NEXT: [[AND:%.*]] = and i8 [[XOR]], [[Y:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[Y:%.*]], [[XOR]] ; CHECK-NEXT: call void @use(i8 [[AND]]) ; CHECK-NEXT: [[ADD:%.*]] = or i8 [[Y]], [[X]] ; CHECK-NEXT: ret i8 [[ADD]] @@ -1956,7 +1956,7 @@ define i32 @add_add_add_commute1(i32 %A, i32 %B, i32 %C, i32 %D) { define i32 @add_add_add_commute2(i32 %A, i32 %B, i32 %C, i32 %D) { ; CHECK-LABEL: @add_add_add_commute2( ; CHECK-NEXT: [[E:%.*]] = add i32 [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[F:%.*]] = add i32 [[E]], [[C:%.*]] +; CHECK-NEXT: [[F:%.*]] = add i32 [[C:%.*]], [[E]] ; CHECK-NEXT: [[G:%.*]] = add i32 [[F]], [[D:%.*]] ; CHECK-NEXT: ret i32 [[G]] ; @@ -1969,8 +1969,8 @@ define i32 @add_add_add_commute2(i32 %A, i32 %B, i32 %C, i32 %D) { define i32 @add_add_add_commute3(i32 %A, i32 %B, i32 %C, i32 %D) { ; CHECK-LABEL: @add_add_add_commute3( ; CHECK-NEXT: [[E:%.*]] = add i32 [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[F:%.*]] = add i32 [[E]], [[C:%.*]] -; CHECK-NEXT: [[G:%.*]] = add i32 [[F]], [[D:%.*]] +; CHECK-NEXT: [[F:%.*]] = add i32 [[C:%.*]], [[E]] +; CHECK-NEXT: [[G:%.*]] = add i32 [[D:%.*]], [[F]] ; CHECK-NEXT: ret i32 [[G]] ; %E = add i32 %B, %A @@ -1984,7 +1984,7 @@ define i32 @add_add_add_commute3(i32 %A, i32 %B, i32 %C, i32 %D) { define i8 @mul_add_common_factor_commute1(i8 %x, i8 %y) { ; CHECK-LABEL: @mul_add_common_factor_commute1( ; CHECK-NEXT: [[X1:%.*]] = add i8 [[Y:%.*]], 1 -; CHECK-NEXT: [[A:%.*]] = mul i8 [[X1]], [[X:%.*]] +; CHECK-NEXT: [[A:%.*]] = mul i8 [[X:%.*]], [[X1]] ; CHECK-NEXT: ret i8 [[A]] ; %m = mul nsw i8 %x, %y @@ -2078,7 +2078,7 @@ define i8 @not_mul_wrong_op(i8 %x, i8 %y) { ; CHECK-LABEL: @not_mul_wrong_op( ; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[X:%.*]], 42 ; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MUL]], -1 -; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[NOT]], [[Y:%.*]] +; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[Y:%.*]], [[NOT]] ; CHECK-NEXT: ret i8 [[PLUSX]] ; %mul = mul i8 %x, 42 @@ -2094,7 +2094,7 @@ define i8 @not_mul_use1(i8 %x) { ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i8 [[X:%.*]], 42 ; CHECK-NEXT: call void @use(i8 [[MUL]]) ; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MUL]], -1 -; CHECK-NEXT: [[PLUSX:%.*]] = add nsw i8 [[NOT]], [[X]] +; CHECK-NEXT: [[PLUSX:%.*]] = add nsw i8 [[X]], [[NOT]] ; CHECK-NEXT: ret i8 [[PLUSX]] ; %mul = mul nsw i8 %x, 42 @@ -2111,7 +2111,7 @@ define i8 @not_mul_use2(i8 %x) { ; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[X:%.*]], 42 ; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[MUL]], -1 ; CHECK-NEXT: call void @use(i8 [[NOT]]) -; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[NOT]], [[X]] +; CHECK-NEXT: [[PLUSX:%.*]] = add i8 [[X]], [[NOT]] ; CHECK-NEXT: ret i8 [[PLUSX]] ; %mul = mul i8 %x, 42 @@ -3395,7 +3395,7 @@ define i32 @add_reduce_sqr_sum_flipped(i32 %a, i32 %b) { define i32 @add_reduce_sqr_sum_flipped2(i32 %a, i32 %bx) { ; CHECK-LABEL: @add_reduce_sqr_sum_flipped2( ; CHECK-NEXT: [[B:%.*]] = xor i32 [[BX:%.*]], 42 -; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B]], [[A:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B]] ; CHECK-NEXT: [[ADD:%.*]] = mul i32 [[TMP1]], [[TMP1]] ; CHECK-NEXT: ret i32 [[ADD]] ; @@ -3455,7 +3455,7 @@ define i32 @add_reduce_sqr_sum_order2_flipped(i32 %a, i32 %b) { define i32 @add_reduce_sqr_sum_order2_flipped2(i32 %a, i32 %bx) { ; CHECK-LABEL: @add_reduce_sqr_sum_order2_flipped2( ; CHECK-NEXT: [[B:%.*]] = xor i32 [[BX:%.*]], 42 -; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B]], [[A:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B]] ; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]] ; CHECK-NEXT: ret i32 [[AB2]] ; @@ -3472,7 +3472,7 @@ define i32 @add_reduce_sqr_sum_order2_flipped2(i32 %a, i32 %bx) { define i32 @add_reduce_sqr_sum_order2_flipped3(i32 %a, i32 %bx) { ; CHECK-LABEL: @add_reduce_sqr_sum_order2_flipped3( ; CHECK-NEXT: [[B:%.*]] = xor i32 [[BX:%.*]], 42 -; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B]], [[A:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B]] ; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]] ; CHECK-NEXT: ret i32 [[AB2]] ; @@ -3669,7 +3669,7 @@ define i32 @add_reduce_sqr_sum_order5_flipped2(i32 %a, i32 %b) { define i32 @add_reduce_sqr_sum_order5_flipped3(i32 %ax, i32 %b) { ; CHECK-LABEL: @add_reduce_sqr_sum_order5_flipped3( ; CHECK-NEXT: [[A:%.*]] = xor i32 [[AX:%.*]], 42 -; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A]], [[B:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[B:%.*]], [[A]] ; CHECK-NEXT: [[AB2:%.*]] = mul i32 [[TMP1]], [[TMP1]] ; CHECK-NEXT: ret i32 [[AB2]] ; @@ -4044,7 +4044,7 @@ define i32 @add_reduce_sqr_sum_varB_invalid3(i32 %a, i32 %b) { ; CHECK-NEXT: [[A_B:%.*]] = mul nsw i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TWOAB:%.*]] = shl i32 [[A_B]], 1 ; CHECK-NEXT: [[B_SQ1:%.*]] = add i32 [[A]], [[B]] -; CHECK-NEXT: [[A2_B2:%.*]] = mul i32 [[B_SQ1]], [[B]] +; CHECK-NEXT: [[A2_B2:%.*]] = mul i32 [[B]], [[B_SQ1]] ; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]] ; CHECK-NEXT: ret i32 [[AB2]] ; @@ -4062,7 +4062,7 @@ define i32 @add_reduce_sqr_sum_varB_invalid4(i32 %a, i32 %b) { ; CHECK-NEXT: [[A_B:%.*]] = mul nsw i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: [[TWOAB:%.*]] = shl i32 [[A_B]], 1 ; CHECK-NEXT: [[NOT_B_SQ1:%.*]] = add i32 [[A]], [[B]] -; CHECK-NEXT: [[A2_B2:%.*]] = mul i32 [[NOT_B_SQ1]], [[A]] +; CHECK-NEXT: [[A2_B2:%.*]] = mul i32 [[A]], [[NOT_B_SQ1]] ; CHECK-NEXT: [[AB2:%.*]] = add i32 [[TWOAB]], [[A2_B2]] ; CHECK-NEXT: ret i32 [[AB2]] ; diff --git a/llvm/test/Transforms/InstCombine/add2.ll b/llvm/test/Transforms/InstCombine/add2.ll index 9ebcdac77179e..ae80ab2e92ad1 100644 --- a/llvm/test/Transforms/InstCombine/add2.ll +++ b/llvm/test/Transforms/InstCombine/add2.ll @@ -452,7 +452,7 @@ define i8 @add_of_mul(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @add_of_mul( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[MB1:%.*]] = add i8 [[Y:%.*]], [[Z:%.*]] -; CHECK-NEXT: [[SUM:%.*]] = mul i8 [[MB1]], [[X:%.*]] +; CHECK-NEXT: [[SUM:%.*]] = mul i8 [[X:%.*]], [[MB1]] ; CHECK-NEXT: ret i8 [[SUM]] ; entry: diff --git a/llvm/test/Transforms/InstCombine/add_or_sub.ll b/llvm/test/Transforms/InstCombine/add_or_sub.ll index 5f1234618b9a6..ef44f036b71fa 100644 --- a/llvm/test/Transforms/InstCombine/add_or_sub.ll +++ b/llvm/test/Transforms/InstCombine/add_or_sub.ll @@ -103,7 +103,7 @@ define i12 @add_or_sub_comb_i12_multiuse_only_sub(i12 %p) { define i8 @add_or_sub_comb_i8_negative_y_sub(i8 %x, i8 %y) { ; CHECK-LABEL: @add_or_sub_comb_i8_negative_y_sub( ; CHECK-NEXT: [[SUB:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[OR:%.*]] = or i8 [[SUB]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i8 [[X:%.*]], [[SUB]] ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[OR]], [[X]] ; CHECK-NEXT: ret i8 [[ADD]] ; @@ -116,7 +116,7 @@ define i8 @add_or_sub_comb_i8_negative_y_sub(i8 %x, i8 %y) { define i8 @add_or_sub_comb_i8_negative_y_or(i8 %x, i8 %y) { ; CHECK-LABEL: @add_or_sub_comb_i8_negative_y_or( ; CHECK-NEXT: [[SUB:%.*]] = sub i8 0, [[X:%.*]] -; CHECK-NEXT: [[OR:%.*]] = or i8 [[SUB]], [[Y:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i8 [[Y:%.*]], [[SUB]] ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[OR]], [[X]] ; CHECK-NEXT: ret i8 [[ADD]] ; @@ -129,7 +129,7 @@ define i8 @add_or_sub_comb_i8_negative_y_or(i8 %x, i8 %y) { define i8 @add_or_sub_comb_i8_negative_y_add(i8 %x, i8 %y) { ; CHECK-LABEL: @add_or_sub_comb_i8_negative_y_add( ; CHECK-NEXT: [[SUB:%.*]] = sub i8 0, [[X:%.*]] -; CHECK-NEXT: [[OR:%.*]] = or i8 [[SUB]], [[X]] +; CHECK-NEXT: [[OR:%.*]] = or i8 [[X]], [[SUB]] ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[OR]], [[Y:%.*]] ; CHECK-NEXT: ret i8 [[ADD]] ; @@ -142,7 +142,7 @@ define i8 @add_or_sub_comb_i8_negative_y_add(i8 %x, i8 %y) { define i8 @add_or_sub_comb_i8_negative_xor_instead_or(i8 %x) { ; CHECK-LABEL: @add_or_sub_comb_i8_negative_xor_instead_or( ; CHECK-NEXT: [[SUB:%.*]] = sub i8 0, [[X:%.*]] -; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[X]] +; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X]], [[SUB]] ; CHECK-NEXT: [[ADD:%.*]] = add i8 [[XOR]], [[X]] ; CHECK-NEXT: ret i8 [[ADD]] ; diff --git a/llvm/test/Transforms/InstCombine/and-or-icmp-const-icmp.ll b/llvm/test/Transforms/InstCombine/and-or-icmp-const-icmp.ll index 9365f8281ccbd..de5de37fe2df6 100644 --- a/llvm/test/Transforms/InstCombine/and-or-icmp-const-icmp.ll +++ b/llvm/test/Transforms/InstCombine/and-or-icmp-const-icmp.ll @@ -275,7 +275,7 @@ define i1 @ne_commuted_equal_minus_1(i8 %x, i8 %py) { ; CHECK-LABEL: define i1 @ne_commuted_equal_minus_1( ; CHECK-SAME: i8 [[X:%.*]], i8 [[PY:%.*]]) { ; CHECK-NEXT: [[Y:%.*]] = sdiv i8 42, [[PY]] -; CHECK-NEXT: [[AND:%.*]] = icmp ugt i8 [[Y]], [[X]] +; CHECK-NEXT: [[AND:%.*]] = icmp ult i8 [[X]], [[Y]] ; CHECK-NEXT: ret i1 [[AND]] ; %y = sdiv i8 42, %py ; thwart complexity-based canonicalization diff --git a/llvm/test/Transforms/InstCombine/and-or-icmps.ll b/llvm/test/Transforms/InstCombine/and-or-icmps.ll index 7d4fddc1563fe..74ef365db8d22 100644 --- a/llvm/test/Transforms/InstCombine/and-or-icmps.ll +++ b/llvm/test/Transforms/InstCombine/and-or-icmps.ll @@ -1320,7 +1320,7 @@ define i1 @bitwise_and_bitwise_and_icmps(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = and i1 [[C1]], [[TMP3]] ; CHECK-NEXT: ret i1 [[AND2]] @@ -1341,7 +1341,7 @@ define i1 @bitwise_and_bitwise_and_icmps_comm1(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = and i1 [[C1]], [[TMP3]] ; CHECK-NEXT: ret i1 [[AND2]] @@ -1362,7 +1362,7 @@ define i1 @bitwise_and_bitwise_and_icmps_comm2(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = and i1 [[TMP3]], [[C1]] ; CHECK-NEXT: ret i1 [[AND2]] @@ -1383,7 +1383,7 @@ define i1 @bitwise_and_bitwise_and_icmps_comm3(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = and i1 [[TMP3]], [[C1]] ; CHECK-NEXT: ret i1 [[AND2]] @@ -1404,7 +1404,7 @@ define i1 @bitwise_and_logical_and_icmps(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = select i1 [[C1]], i1 [[TMP3]], i1 false ; CHECK-NEXT: ret i1 [[AND2]] @@ -1425,7 +1425,7 @@ define i1 @bitwise_and_logical_and_icmps_comm1(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = select i1 [[C1]], i1 [[TMP3]], i1 false ; CHECK-NEXT: ret i1 [[AND2]] @@ -1447,7 +1447,7 @@ define i1 @bitwise_and_logical_and_icmps_comm2(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = freeze i8 [[Z_SHIFT]] ; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP2]], [[X:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[X:%.*]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i8 [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[AND2:%.*]] = select i1 [[TMP4]], i1 [[C1]], i1 false ; CHECK-NEXT: ret i1 [[AND2]] @@ -1468,7 +1468,7 @@ define i1 @bitwise_and_logical_and_icmps_comm3(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = select i1 [[TMP3]], i1 [[C1]], i1 false ; CHECK-NEXT: ret i1 [[AND2]] @@ -1489,7 +1489,7 @@ define i1 @logical_and_bitwise_and_icmps(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0 ; CHECK-NEXT: [[AND1:%.*]] = and i1 [[C1]], [[C2]] @@ -1512,7 +1512,7 @@ define i1 @logical_and_bitwise_and_icmps_comm1(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0 ; CHECK-NEXT: [[AND1:%.*]] = and i1 [[C1]], [[C2]] @@ -1535,7 +1535,7 @@ define i1 @logical_and_bitwise_and_icmps_comm2(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0 ; CHECK-NEXT: [[AND1:%.*]] = and i1 [[C2]], [[C1]] @@ -1558,7 +1558,7 @@ define i1 @logical_and_bitwise_and_icmps_comm3(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0 ; CHECK-NEXT: [[AND1:%.*]] = and i1 [[C2]], [[C1]] @@ -1581,7 +1581,7 @@ define i1 @logical_and_logical_and_icmps(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0 ; CHECK-NEXT: [[AND1:%.*]] = select i1 [[C1]], i1 [[C2]], i1 false @@ -1604,7 +1604,7 @@ define i1 @logical_and_logical_and_icmps_comm1(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[C3]], i1 [[C1]], i1 false @@ -1627,7 +1627,7 @@ define i1 @logical_and_logical_and_icmps_comm2(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp ne i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp ne i8 [[X_M2]], 0 ; CHECK-NEXT: [[AND1:%.*]] = select i1 [[C2]], i1 [[C1]], i1 false @@ -1650,7 +1650,7 @@ define i1 @logical_and_logical_and_icmps_comm3(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = select i1 [[TMP3]], i1 [[C1]], i1 false ; CHECK-NEXT: ret i1 [[AND2]] @@ -1671,7 +1671,7 @@ define i1 @bitwise_or_bitwise_or_icmps(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i1 [[C1]], [[TMP3]] ; CHECK-NEXT: ret i1 [[OR2]] @@ -1692,7 +1692,7 @@ define i1 @bitwise_or_bitwise_or_icmps_comm1(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i1 [[C1]], [[TMP3]] ; CHECK-NEXT: ret i1 [[OR2]] @@ -1713,7 +1713,7 @@ define i1 @bitwise_or_bitwise_or_icmps_comm2(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i1 [[TMP3]], [[C1]] ; CHECK-NEXT: ret i1 [[OR2]] @@ -1734,7 +1734,7 @@ define i1 @bitwise_or_bitwise_or_icmps_comm3(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i1 [[TMP3]], [[C1]] ; CHECK-NEXT: ret i1 [[OR2]] @@ -1755,7 +1755,7 @@ define i1 @bitwise_or_logical_or_icmps(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = select i1 [[C1]], i1 true, i1 [[TMP3]] ; CHECK-NEXT: ret i1 [[OR2]] @@ -1776,7 +1776,7 @@ define i1 @bitwise_or_logical_or_icmps_comm1(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = select i1 [[C1]], i1 true, i1 [[TMP3]] ; CHECK-NEXT: ret i1 [[OR2]] @@ -1798,7 +1798,7 @@ define i1 @bitwise_or_logical_or_icmps_comm2(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = freeze i8 [[Z_SHIFT]] ; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], 1 -; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[TMP2]], [[X:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and i8 [[X:%.*]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i8 [[TMP3]], [[TMP2]] ; CHECK-NEXT: [[OR2:%.*]] = select i1 [[TMP4]], i1 true, i1 [[C1]] ; CHECK-NEXT: ret i1 [[OR2]] @@ -1819,7 +1819,7 @@ define i1 @bitwise_or_logical_or_icmps_comm3(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = select i1 [[TMP3]], i1 true, i1 [[C1]] ; CHECK-NEXT: ret i1 [[OR2]] @@ -1840,7 +1840,7 @@ define i1 @logical_or_bitwise_or_icmps(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0 ; CHECK-NEXT: [[OR1:%.*]] = or i1 [[C1]], [[C2]] @@ -1863,7 +1863,7 @@ define i1 @logical_or_bitwise_or_icmps_comm1(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0 ; CHECK-NEXT: [[OR1:%.*]] = or i1 [[C1]], [[C2]] @@ -1886,7 +1886,7 @@ define i1 @logical_or_bitwise_or_icmps_comm2(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0 ; CHECK-NEXT: [[OR1:%.*]] = or i1 [[C2]], [[C1]] @@ -1909,7 +1909,7 @@ define i1 @logical_or_bitwise_or_icmps_comm3(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0 ; CHECK-NEXT: [[OR1:%.*]] = or i1 [[C2]], [[C1]] @@ -1932,7 +1932,7 @@ define i1 @logical_or_logical_or_icmps(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0 ; CHECK-NEXT: [[OR1:%.*]] = select i1 [[C1]], i1 true, i1 [[C2]] @@ -1955,7 +1955,7 @@ define i1 @logical_or_logical_or_icmps_comm1(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[C3]], i1 true, i1 [[C1]] @@ -1978,7 +1978,7 @@ define i1 @logical_or_logical_or_icmps_comm2(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[X_M1:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] -; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[Z_SHIFT]], [[X]] +; CHECK-NEXT: [[X_M2:%.*]] = and i8 [[X]], [[Z_SHIFT]] ; CHECK-NEXT: [[C2:%.*]] = icmp eq i8 [[X_M1]], 0 ; CHECK-NEXT: [[C3:%.*]] = icmp eq i8 [[X_M2]], 0 ; CHECK-NEXT: [[OR1:%.*]] = select i1 [[C2]], i1 true, i1 [[C1]] @@ -2001,7 +2001,7 @@ define i1 @logical_or_logical_or_icmps_comm3(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[C1:%.*]] = icmp eq i8 [[Y:%.*]], 42 ; CHECK-NEXT: [[Z_SHIFT:%.*]] = shl nuw i8 1, [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[Z_SHIFT]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i8 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = select i1 [[TMP3]], i1 true, i1 [[C1]] ; CHECK-NEXT: ret i1 [[OR2]] @@ -2052,7 +2052,7 @@ define i1 @bitwise_and_logical_and_masked_icmp_allzeros(i1 %c, i32 %x) { define i1 @bitwise_and_logical_and_masked_icmp_allzeros_poison1(i1 %c, i32 %x, i32 %y) { ; CHECK-LABEL: @bitwise_and_logical_and_masked_icmp_allzeros_poison1( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[Y:%.*]], 7 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], 0 ; CHECK-NEXT: [[AND2:%.*]] = select i1 [[TMP3]], i1 [[C:%.*]], i1 false ; CHECK-NEXT: ret i1 [[AND2]] @@ -2104,7 +2104,7 @@ define i1 @bitwise_and_logical_and_masked_icmp_allones(i1 %c, i32 %x) { define i1 @bitwise_and_logical_and_masked_icmp_allones_poison1(i1 %c, i32 %x, i32 %y) { ; CHECK-LABEL: @bitwise_and_logical_and_masked_icmp_allones_poison1( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[Y:%.*]], 7 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = select i1 [[TMP3]], i1 [[C:%.*]], i1 false ; CHECK-NEXT: ret i1 [[AND2]] @@ -3118,8 +3118,8 @@ entry: define i1 @icmp_eq_or_z_or_pow2orz(i8 %x, i8 %y) { ; CHECK-LABEL: @icmp_eq_or_z_or_pow2orz( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[NY]], [[Y]] -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[POW2ORZ]], [[X:%.*]] +; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[Y]], [[NY]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[POW2ORZ]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], [[X]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -3136,8 +3136,8 @@ define i1 @icmp_eq_or_z_or_pow2orz(i8 %x, i8 %y) { define i1 @icmp_eq_or_z_or_pow2orz_logical(i8 %x, i8 %y) { ; CHECK-LABEL: @icmp_eq_or_z_or_pow2orz_logical( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[NY]], [[Y]] -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[POW2ORZ]], [[X:%.*]] +; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[Y]], [[NY]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[POW2ORZ]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], [[X]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -3155,9 +3155,9 @@ define i1 @icmp_eq_or_z_or_pow2orz_logical(i8 %x, i8 %y) { define i1 @icmp_eq_or_z_or_pow2orz_fail_multiuse(i8 %x, i8 %y) { ; CHECK-LABEL: @icmp_eq_or_z_or_pow2orz_fail_multiuse( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[NY]], [[Y]] +; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[Y]], [[NY]] ; CHECK-NEXT: [[C0:%.*]] = icmp eq i8 [[X:%.*]], 0 -; CHECK-NEXT: [[CP2:%.*]] = icmp eq i8 [[POW2ORZ]], [[X]] +; CHECK-NEXT: [[CP2:%.*]] = icmp eq i8 [[X]], [[POW2ORZ]] ; CHECK-NEXT: call void @use(i1 [[C0]]) ; CHECK-NEXT: [[R:%.*]] = or i1 [[C0]], [[CP2]] ; CHECK-NEXT: ret i1 [[R]] @@ -3176,9 +3176,9 @@ define i1 @icmp_eq_or_z_or_pow2orz_fail_multiuse(i8 %x, i8 %y) { define i1 @icmp_eq_or_z_or_pow2orz_fail_logic_or(i8 %x, i8 %y) { ; CHECK-LABEL: @icmp_eq_or_z_or_pow2orz_fail_logic_or( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[NY]], [[Y]] +; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[Y]], [[NY]] ; CHECK-NEXT: [[C0:%.*]] = icmp eq i8 [[X:%.*]], 0 -; CHECK-NEXT: [[CP2:%.*]] = icmp eq i8 [[POW2ORZ]], [[X]] +; CHECK-NEXT: [[CP2:%.*]] = icmp eq i8 [[X]], [[POW2ORZ]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C0]], i1 true, i1 [[CP2]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -3195,8 +3195,8 @@ define i1 @icmp_eq_or_z_or_pow2orz_fail_logic_or(i8 %x, i8 %y) { define <2 x i1> @icmp_ne_and_z_and_pow2orz(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @icmp_ne_and_z_and_pow2orz( ; CHECK-NEXT: [[NY:%.*]] = sub <2 x i8> zeroinitializer, [[Y:%.*]] -; CHECK-NEXT: [[POW2ORZ:%.*]] = and <2 x i8> [[NY]], [[Y]] -; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[POW2ORZ]], [[X:%.*]] +; CHECK-NEXT: [[POW2ORZ:%.*]] = and <2 x i8> [[Y]], [[NY]] +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X:%.*]], [[POW2ORZ]] ; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i8> [[TMP1]], [[X]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; @@ -3226,9 +3226,9 @@ define i1 @icmp_ne_and_z_and_onefail(i8 %x) { define i1 @icmp_ne_and_z_and_pow2orz_fail_multiuse1(i8 %x, i8 %y) { ; CHECK-LABEL: @icmp_ne_and_z_and_pow2orz_fail_multiuse1( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[NY]], [[Y]] +; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[Y]], [[NY]] ; CHECK-NEXT: [[C0:%.*]] = icmp eq i8 [[X:%.*]], 0 -; CHECK-NEXT: [[CP2:%.*]] = icmp eq i8 [[POW2ORZ]], [[X]] +; CHECK-NEXT: [[CP2:%.*]] = icmp eq i8 [[X]], [[POW2ORZ]] ; CHECK-NEXT: call void @use(i1 [[C0]]) ; CHECK-NEXT: [[R:%.*]] = or i1 [[C0]], [[CP2]] ; CHECK-NEXT: ret i1 [[R]] @@ -3247,9 +3247,9 @@ define i1 @icmp_ne_and_z_and_pow2orz_fail_multiuse1(i8 %x, i8 %y) { define <2 x i1> @icmp_ne_and_z_and_pow2orz_fail_logic_and(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @icmp_ne_and_z_and_pow2orz_fail_logic_and( ; CHECK-NEXT: [[NY:%.*]] = sub <2 x i8> zeroinitializer, [[Y:%.*]] -; CHECK-NEXT: [[POW2ORZ:%.*]] = and <2 x i8> [[NY]], [[Y]] +; CHECK-NEXT: [[POW2ORZ:%.*]] = and <2 x i8> [[Y]], [[NY]] ; CHECK-NEXT: [[C0:%.*]] = icmp ne <2 x i8> [[X:%.*]], zeroinitializer -; CHECK-NEXT: [[CP2:%.*]] = icmp ne <2 x i8> [[POW2ORZ]], [[X]] +; CHECK-NEXT: [[CP2:%.*]] = icmp ne <2 x i8> [[X]], [[POW2ORZ]] ; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[C0]], <2 x i1> [[CP2]], <2 x i1> zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[R]] ; @@ -3267,7 +3267,7 @@ define i1 @icmp_eq_or_z_or_pow2orz_fail_not_pow2(i8 %x, i8 %y) { ; CHECK-NEXT: [[NY:%.*]] = sub i8 1, [[Y:%.*]] ; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[NY]], [[Y]] ; CHECK-NEXT: [[C0:%.*]] = icmp eq i8 [[X:%.*]], 0 -; CHECK-NEXT: [[CP2:%.*]] = icmp eq i8 [[POW2ORZ]], [[X]] +; CHECK-NEXT: [[CP2:%.*]] = icmp eq i8 [[X]], [[POW2ORZ]] ; CHECK-NEXT: [[R:%.*]] = or i1 [[C0]], [[CP2]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -3283,9 +3283,9 @@ define i1 @icmp_eq_or_z_or_pow2orz_fail_not_pow2(i8 %x, i8 %y) { define i1 @icmp_eq_or_z_or_pow2orz_fail_nonzero_const(i8 %x, i8 %y) { ; CHECK-LABEL: @icmp_eq_or_z_or_pow2orz_fail_nonzero_const( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[NY]], [[Y]] +; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[Y]], [[NY]] ; CHECK-NEXT: [[C0:%.*]] = icmp eq i8 [[X:%.*]], 1 -; CHECK-NEXT: [[CP2:%.*]] = icmp eq i8 [[POW2ORZ]], [[X]] +; CHECK-NEXT: [[CP2:%.*]] = icmp eq i8 [[X]], [[POW2ORZ]] ; CHECK-NEXT: [[R:%.*]] = or i1 [[C0]], [[CP2]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -3301,8 +3301,8 @@ define i1 @icmp_eq_or_z_or_pow2orz_fail_nonzero_const(i8 %x, i8 %y) { define <2 x i1> @icmp_ne_and_z_and_pow2orz_fail_bad_pred(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @icmp_ne_and_z_and_pow2orz_fail_bad_pred( ; CHECK-NEXT: [[NY:%.*]] = sub <2 x i8> zeroinitializer, [[Y:%.*]] -; CHECK-NEXT: [[POW2ORZ:%.*]] = and <2 x i8> [[NY]], [[Y]] -; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i8> [[POW2ORZ]], [[X:%.*]] +; CHECK-NEXT: [[POW2ORZ:%.*]] = and <2 x i8> [[Y]], [[NY]] +; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i8> [[X:%.*]], [[POW2ORZ]] ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[TMP1]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[R]] ; @@ -3318,9 +3318,9 @@ define <2 x i1> @icmp_ne_and_z_and_pow2orz_fail_bad_pred(<2 x i8> %x, <2 x i8> % define i1 @icmp_eq_or_z_or_pow2orz_fail_bad_pred2(i8 %x, i8 %y) { ; CHECK-LABEL: @icmp_eq_or_z_or_pow2orz_fail_bad_pred2( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[NY]], [[Y]] +; CHECK-NEXT: [[POW2ORZ:%.*]] = and i8 [[Y]], [[NY]] ; CHECK-NEXT: [[C0:%.*]] = icmp slt i8 [[X:%.*]], 1 -; CHECK-NEXT: [[CP2:%.*]] = icmp sge i8 [[POW2ORZ]], [[X]] +; CHECK-NEXT: [[CP2:%.*]] = icmp sle i8 [[X]], [[POW2ORZ]] ; CHECK-NEXT: [[R:%.*]] = or i1 [[C0]], [[CP2]] ; CHECK-NEXT: ret i1 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/and-or-not.ll b/llvm/test/Transforms/InstCombine/and-or-not.ll index 2e351c30ea1f7..5e6c480df5d10 100644 --- a/llvm/test/Transforms/InstCombine/and-or-not.ll +++ b/llvm/test/Transforms/InstCombine/and-or-not.ll @@ -506,8 +506,8 @@ define i64 @PR32830(i64 %a, i64 %b, i64 %c) { ; CHECK-LABEL: @PR32830( ; CHECK-NEXT: [[NOTA:%.*]] = xor i64 [[A:%.*]], -1 ; CHECK-NEXT: [[NOTB:%.*]] = xor i64 [[B:%.*]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i64 [[NOTB]], [[A]] -; CHECK-NEXT: [[OR2:%.*]] = or i64 [[NOTA]], [[C:%.*]] +; CHECK-NEXT: [[OR1:%.*]] = or i64 [[A]], [[NOTB]] +; CHECK-NEXT: [[OR2:%.*]] = or i64 [[C:%.*]], [[NOTA]] ; CHECK-NEXT: [[AND:%.*]] = and i64 [[OR1]], [[OR2]] ; CHECK-NEXT: ret i64 [[AND]] ; @@ -813,7 +813,7 @@ define i4 @reduce_xor_common_op_commute1(i4 %x, i4 %y, i4 %z) { define i4 @annihilate_xor_common_op_commute2(i4 %x, i4 %y, i4 %p, i4 %q) { ; CHECK-LABEL: @annihilate_xor_common_op_commute2( ; CHECK-NEXT: [[Z:%.*]] = mul i4 [[P:%.*]], [[P]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i4 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i4 [[Y:%.*]], [[Z]] ; CHECK-NEXT: [[TMP2:%.*]] = xor i4 [[TMP1]], [[Q:%.*]] ; CHECK-NEXT: ret i4 [[TMP2]] ; @@ -828,8 +828,8 @@ define i4 @annihilate_xor_common_op_commute2(i4 %x, i4 %y, i4 %p, i4 %q) { define <2 x i4> @reduce_xor_common_op_commute3(<2 x i4> %x, <2 x i4> %y, <2 x i4> %p) { ; CHECK-LABEL: @reduce_xor_common_op_commute3( ; CHECK-NEXT: [[Z:%.*]] = mul <2 x i4> [[P:%.*]], [[P]] -; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i4> [[Z]], [[Y:%.*]] -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i4> [[Y:%.*]], [[Z]] +; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %z = mul <2 x i4> %p, %p ; thwart complexity-based canonicalization diff --git a/llvm/test/Transforms/InstCombine/and-or.ll b/llvm/test/Transforms/InstCombine/and-or.ll index b4ef27607121d..fee055a2e1245 100644 --- a/llvm/test/Transforms/InstCombine/and-or.ll +++ b/llvm/test/Transforms/InstCombine/and-or.ll @@ -385,7 +385,7 @@ define i8 @or_or_and_noOneUse(i8 %a, i8 %b, i8 %c, i8 %d) { ; CHECK-NEXT: call void @use(i8 [[AND1]]) ; CHECK-NEXT: [[AND2:%.*]] = and i8 [[A]], [[D:%.*]] ; CHECK-NEXT: call void @use(i8 [[AND2]]) -; CHECK-NEXT: [[OR1:%.*]] = or i8 [[AND2]], [[C:%.*]] +; CHECK-NEXT: [[OR1:%.*]] = or i8 [[C:%.*]], [[AND2]] ; CHECK-NEXT: call void @use(i8 [[OR1]]) ; CHECK-NEXT: [[OR2:%.*]] = or i8 [[OR1]], [[AND1]] ; CHECK-NEXT: ret i8 [[OR2]] @@ -405,7 +405,7 @@ define i8 @or_or_and_pat1(i8 %a, i8 %b, i8 %c, i8 %d) { ; CHECK-LABEL: @or_or_and_pat1( ; CHECK-NEXT: [[CT:%.*]] = udiv i8 42, [[C:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[B:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i8 [[CT]], [[TMP2]] ; CHECK-NEXT: ret i8 [[OR2]] ; @@ -439,7 +439,7 @@ define i8 @or_or_and_pat3(i8 %a, i8 %b, i8 %c, i8 %d) { ; CHECK-LABEL: @or_or_and_pat3( ; CHECK-NEXT: [[CT:%.*]] = udiv i8 42, [[C:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i8 [[CT]], [[TMP2]] ; CHECK-NEXT: ret i8 [[OR2]] ; @@ -472,7 +472,7 @@ define i8 @or_or_and_pat4(i8 %a, i8 %b, i8 %c, i8 %d) { define i8 @or_or_and_pat5(i8 %a, i8 %b, i8 %c, i8 %d) { ; CHECK-LABEL: @or_or_and_pat5( ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[B:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i8 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i8 [[OR2]] ; @@ -502,7 +502,7 @@ define i8 @or_or_and_pat6(i8 %a, i8 %b, i8 %c, i8 %d) { define i8 @or_or_and_pat7(i8 %a, i8 %b, i8 %c, i8 %d) { ; CHECK-LABEL: @or_or_and_pat7( ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i8 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i8 [[OR2]] ; @@ -535,7 +535,7 @@ define i8 @or_and_or_noOneUse(i8 %a, i8 %b, i8 %c, i8 %d) { ; CHECK-NEXT: call void @use(i8 [[AND1]]) ; CHECK-NEXT: [[AND2:%.*]] = and i8 [[A]], [[D:%.*]] ; CHECK-NEXT: call void @use(i8 [[AND2]]) -; CHECK-NEXT: [[OR1:%.*]] = or i8 [[AND2]], [[C:%.*]] +; CHECK-NEXT: [[OR1:%.*]] = or i8 [[C:%.*]], [[AND2]] ; CHECK-NEXT: call void @use(i8 [[OR1]]) ; CHECK-NEXT: [[OR2:%.*]] = or i8 [[AND1]], [[OR1]] ; CHECK-NEXT: ret i8 [[OR2]] @@ -555,7 +555,7 @@ define i8 @or_and_or_pat1(i8 %a, i8 %b, i8 %c, i8 %d) { ; CHECK-LABEL: @or_and_or_pat1( ; CHECK-NEXT: [[CT:%.*]] = udiv i8 42, [[C:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[B:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i8 [[CT]], [[TMP2]] ; CHECK-NEXT: ret i8 [[OR2]] ; @@ -589,7 +589,7 @@ define i8 @or_and_or_pat3(i8 %a, i8 %b, i8 %c, i8 %d) { ; CHECK-LABEL: @or_and_or_pat3( ; CHECK-NEXT: [[CT:%.*]] = udiv i8 42, [[C:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i8 [[CT]], [[TMP2]] ; CHECK-NEXT: ret i8 [[OR2]] ; @@ -622,7 +622,7 @@ define i8 @or_and_or_pat4(i8 %a, i8 %b, i8 %c, i8 %d) { define i8 @or_and_or_pat5(i8 %a, i8 %b, i8 %c, i8 %d) { ; CHECK-LABEL: @or_and_or_pat5( ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[B:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i8 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i8 [[OR2]] ; @@ -652,7 +652,7 @@ define i8 @or_and_or_pat6(i8 %a, i8 %b, i8 %c, i8 %d) { define i8 @or_and_or_pat7(i8 %a, i8 %b, i8 %c, i8 %d) { ; CHECK-LABEL: @or_and_or_pat7( ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[D:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = or i8 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i8 [[OR2]] ; @@ -687,8 +687,8 @@ define i32 @or_or_and_noOneUse_fail1(i32 %a, i32 %b) { ; CHECK-NEXT: call void @use2(i32 [[AND]]) ; CHECK-NEXT: [[AND1:%.*]] = or i32 [[B:%.*]], 157 ; CHECK-NEXT: [[OR:%.*]] = and i32 [[SHR]], [[AND1]] -; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[B]], 23 -; CHECK-NEXT: [[AND9:%.*]] = and i32 [[TMP1]], 157 +; CHECK-NEXT: [[SHR8:%.*]] = lshr i32 [[B]], 23 +; CHECK-NEXT: [[AND9:%.*]] = and i32 [[SHR8]], 157 ; CHECK-NEXT: [[R:%.*]] = or i32 [[OR]], [[AND9]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -714,7 +714,7 @@ define { i1, i1, i1, i1, i1 } @or_or_and_noOneUse_fail2(i1 %a_0, i1 %a_1, i1 %a_ ; CHECK-NEXT: [[TMP3:%.*]] = and i1 [[A_1:%.*]], [[B_1:%.*]] ; CHECK-NEXT: [[TMP4:%.*]] = xor i1 [[TMP3]], true ; CHECK-NEXT: [[TMP5:%.*]] = and i1 [[TMP0]], [[A_1]] -; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[TMP2]], [[A_1]] +; CHECK-NEXT: [[TMP6:%.*]] = or i1 [[A_1]], [[TMP2]] ; CHECK-NEXT: [[TMP7:%.*]] = and i1 [[TMP6]], [[B_1]] ; CHECK-NEXT: [[D:%.*]] = or i1 [[TMP7]], [[TMP5]] ; CHECK-NEXT: [[DOTNOT1:%.*]] = or i1 [[TMP1]], [[TMP3]] diff --git a/llvm/test/Transforms/InstCombine/and-xor-merge.ll b/llvm/test/Transforms/InstCombine/and-xor-merge.ll index 80bdf67525faa..cf1285cbc11a4 100644 --- a/llvm/test/Transforms/InstCombine/and-xor-merge.ll +++ b/llvm/test/Transforms/InstCombine/and-xor-merge.ll @@ -5,7 +5,7 @@ define i32 @test1(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @test1( ; CHECK-NEXT: [[T61:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[T7:%.*]] = and i32 [[T61]], [[Z:%.*]] +; CHECK-NEXT: [[T7:%.*]] = and i32 [[Z:%.*]], [[T61]] ; CHECK-NEXT: ret i32 [[T7]] ; %t3 = and i32 %z, %x diff --git a/llvm/test/Transforms/InstCombine/and-xor-or.ll b/llvm/test/Transforms/InstCombine/and-xor-or.ll index b26d6e16c2db2..3dbf9af7e1934 100644 --- a/llvm/test/Transforms/InstCombine/and-xor-or.ll +++ b/llvm/test/Transforms/InstCombine/and-xor-or.ll @@ -339,8 +339,8 @@ define i64 @and_xor_or_negative(i64 %x, i64 %y, i64 %z, i64 %w) { ; CHECK-LABEL: define {{[^@]+}}@and_xor_or_negative ; CHECK-SAME: (i64 [[X:%.*]], i64 [[Y:%.*]], i64 [[Z:%.*]], i64 [[W:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[Y]], [[X]] -; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], [[Z]] -; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[TMP2]], [[W]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[Z]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = or i64 [[W]], [[TMP2]] ; CHECK-NEXT: ret i64 [[TMP3]] ; %1 = and i64 %y, %x @@ -585,7 +585,7 @@ define i64 @sext_or_chain(i64 %a, i16 %b, i16 %c) { ; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) { ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i64 ; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[C]] to i64 -; CHECK-NEXT: [[OR:%.*]] = or i64 [[CONV]], [[A]] +; CHECK-NEXT: [[OR:%.*]] = or i64 [[A]], [[CONV]] ; CHECK-NEXT: [[OR2:%.*]] = or i64 [[OR]], [[CONV2]] ; CHECK-NEXT: ret i64 [[OR2]] ; @@ -601,7 +601,7 @@ define i64 @zext_or_chain(i64 %a, i16 %b, i16 %c) { ; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) { ; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B]] to i64 ; CHECK-NEXT: [[CONV2:%.*]] = zext i16 [[C]] to i64 -; CHECK-NEXT: [[OR:%.*]] = or i64 [[CONV]], [[A]] +; CHECK-NEXT: [[OR:%.*]] = or i64 [[A]], [[CONV]] ; CHECK-NEXT: [[OR2:%.*]] = or i64 [[OR]], [[CONV2]] ; CHECK-NEXT: ret i64 [[OR2]] ; @@ -617,7 +617,7 @@ define i64 @sext_and_chain(i64 %a, i16 %b, i16 %c) { ; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) { ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i64 ; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[C]] to i64 -; CHECK-NEXT: [[AND:%.*]] = and i64 [[CONV]], [[A]] +; CHECK-NEXT: [[AND:%.*]] = and i64 [[A]], [[CONV]] ; CHECK-NEXT: [[AND2:%.*]] = and i64 [[AND]], [[CONV2]] ; CHECK-NEXT: ret i64 [[AND2]] ; @@ -633,7 +633,7 @@ define i64 @zext_and_chain(i64 %a, i16 %b, i16 %c) { ; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) { ; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B]] to i64 ; CHECK-NEXT: [[CONV2:%.*]] = zext i16 [[C]] to i64 -; CHECK-NEXT: [[AND:%.*]] = and i64 [[CONV]], [[A]] +; CHECK-NEXT: [[AND:%.*]] = and i64 [[A]], [[CONV]] ; CHECK-NEXT: [[AND2:%.*]] = and i64 [[AND]], [[CONV2]] ; CHECK-NEXT: ret i64 [[AND2]] ; @@ -649,7 +649,7 @@ define i64 @sext_xor_chain(i64 %a, i16 %b, i16 %c) { ; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) { ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i64 ; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[C]] to i64 -; CHECK-NEXT: [[XOR:%.*]] = xor i64 [[CONV]], [[A]] +; CHECK-NEXT: [[XOR:%.*]] = xor i64 [[A]], [[CONV]] ; CHECK-NEXT: [[XOR2:%.*]] = xor i64 [[XOR]], [[CONV2]] ; CHECK-NEXT: ret i64 [[XOR2]] ; @@ -665,7 +665,7 @@ define i64 @zext_xor_chain(i64 %a, i16 %b, i16 %c) { ; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]]) { ; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B]] to i64 ; CHECK-NEXT: [[CONV2:%.*]] = zext i16 [[C]] to i64 -; CHECK-NEXT: [[XOR:%.*]] = xor i64 [[CONV]], [[A]] +; CHECK-NEXT: [[XOR:%.*]] = xor i64 [[A]], [[CONV]] ; CHECK-NEXT: [[XOR2:%.*]] = xor i64 [[XOR]], [[CONV2]] ; CHECK-NEXT: ret i64 [[XOR2]] ; @@ -682,7 +682,7 @@ define i64 @sext_or_chain_two_uses1(i64 %a, i16 %b, i16 %c, i64 %d) { ; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]], i64 [[D:%.*]]) { ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i64 ; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[C]] to i64 -; CHECK-NEXT: [[OR:%.*]] = or i64 [[CONV]], [[A]] +; CHECK-NEXT: [[OR:%.*]] = or i64 [[A]], [[CONV]] ; CHECK-NEXT: [[OR2:%.*]] = or i64 [[OR]], [[CONV2]] ; CHECK-NEXT: [[USE:%.*]] = udiv i64 [[OR]], [[D]] ; CHECK-NEXT: [[RETVAL:%.*]] = udiv i64 [[OR2]], [[USE]] @@ -702,7 +702,7 @@ define i64 @sext_or_chain_two_uses2(i64 %a, i16 %b, i16 %c, i64 %d) { ; CHECK-SAME: (i64 [[A:%.*]], i16 [[B:%.*]], i16 [[C:%.*]], i64 [[D:%.*]]) { ; CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B]] to i64 ; CHECK-NEXT: [[CONV2:%.*]] = sext i16 [[C]] to i64 -; CHECK-NEXT: [[OR:%.*]] = or i64 [[CONV]], [[A]] +; CHECK-NEXT: [[OR:%.*]] = or i64 [[A]], [[CONV]] ; CHECK-NEXT: [[OR2:%.*]] = or i64 [[OR]], [[CONV2]] ; CHECK-NEXT: [[USE1:%.*]] = udiv i64 [[OR2]], [[D]] ; CHECK-NEXT: [[USE2:%.*]] = udiv i64 [[OR2]], [[USE1]] @@ -761,7 +761,7 @@ define i32 @not_and_and_not_commute1(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[B]], [[C]] ; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], -1 -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[TMP2]], [[A]] +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[TMP2]] ; CHECK-NEXT: ret i32 [[AND2]] ; %not1 = xor i32 %b, -1 @@ -856,7 +856,7 @@ define i32 @not_or_or_not_commute1(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[C]] ; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], -1 -; CHECK-NEXT: [[OR2:%.*]] = or i32 [[TMP2]], [[A]] +; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[TMP2]] ; CHECK-NEXT: ret i32 [[OR2]] ; %not1 = xor i32 %b, -1 @@ -952,7 +952,7 @@ define i32 @or_not_and_commute2(i32 %a, i32 %b0, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@or_not_and_commute2 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]] ; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[OR3:%.*]] = and i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i32 [[OR3]] @@ -990,7 +990,7 @@ define i32 @or_not_and_commute4(i32 %a, i32 %b, i32 %c0) { ; CHECK-LABEL: define {{[^@]+}}@or_not_and_commute4 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C0:%.*]]) { ; CHECK-NEXT: [[C:%.*]] = sdiv i32 42, [[C0]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[OR3:%.*]] = and i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i32 [[OR3]] @@ -1011,7 +1011,7 @@ define i32 @or_not_and_commute5(i32 %a0, i32 %b, i32 %c0) { ; CHECK-SAME: (i32 [[A0:%.*]], i32 [[B:%.*]], i32 [[C0:%.*]]) { ; CHECK-NEXT: [[A:%.*]] = sdiv i32 42, [[A0]] ; CHECK-NEXT: [[C:%.*]] = sdiv i32 42, [[C0]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[OR3:%.*]] = and i32 [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret i32 [[OR3]] @@ -1137,10 +1137,10 @@ define i32 @or_not_and_extra_not_use2(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[C]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[NOT1]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND1]], [[AND2]] ; CHECK-NEXT: call void @use(i32 [[NOT2]]) ; CHECK-NEXT: ret i32 [[OR3]] @@ -1161,7 +1161,7 @@ define i32 @or_not_and_extra_and_use1(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[C]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[NOT1]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[OR3:%.*]] = and i32 [[TMP1]], [[TMP2]] @@ -1184,10 +1184,10 @@ define i32 @or_not_and_extra_and_use2(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[C]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[NOT1]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND1]], [[AND2]] ; CHECK-NEXT: call void @use(i32 [[AND2]]) ; CHECK-NEXT: ret i32 [[OR3]] @@ -1250,10 +1250,10 @@ define i32 @or_not_and_wrong_c(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]], i32 [[D:%.*]]) { ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[C]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[NOT1]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[D]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND1]], [[AND2]] ; CHECK-NEXT: ret i32 [[OR3]] ; @@ -1272,10 +1272,10 @@ define i32 @or_not_and_wrong_b(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]], i32 [[D:%.*]]) { ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[C]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[NOT1]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[NOT2]], [[D]] +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[D]], [[NOT2]] ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND1]], [[AND2]] ; CHECK-NEXT: ret i32 [[OR3]] ; @@ -1333,7 +1333,7 @@ define i32 @and_not_or_commute2(i32 %a, i32 %b0, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@and_not_or_commute2 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]] ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1 ; CHECK-NEXT: ret i32 [[AND3]] @@ -1371,7 +1371,7 @@ define i32 @and_not_or_commute4(i32 %a, i32 %b, i32 %c0) { ; CHECK-LABEL: define {{[^@]+}}@and_not_or_commute4 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C0:%.*]]) { ; CHECK-NEXT: [[C:%.*]] = sdiv i32 42, [[C0]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1 ; CHECK-NEXT: ret i32 [[AND3]] @@ -1392,7 +1392,7 @@ define i32 @and_not_or_commute5(i32 %a0, i32 %b, i32 %c0) { ; CHECK-SAME: (i32 [[A0:%.*]], i32 [[B:%.*]], i32 [[C0:%.*]]) { ; CHECK-NEXT: [[A:%.*]] = sdiv i32 42, [[A0]] ; CHECK-NEXT: [[C:%.*]] = sdiv i32 42, [[C0]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1 ; CHECK-NEXT: ret i32 [[AND3]] @@ -1518,10 +1518,10 @@ define i32 @and_not_or_extra_not_use2(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[C]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[C]], [[NOT1]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 -; CHECK-NEXT: [[OR2:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR2:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR1]], [[OR2]] ; CHECK-NEXT: call void @use(i32 [[NOT2]]) ; CHECK-NEXT: ret i32 [[AND3]] @@ -1542,7 +1542,7 @@ define i32 @and_not_or_extra_and_use1(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[C]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[C]], [[NOT1]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1 @@ -1565,10 +1565,10 @@ define i32 @and_not_or_extra_and_use2(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[C]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[C]], [[NOT1]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 -; CHECK-NEXT: [[OR2:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR2:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR1]], [[OR2]] ; CHECK-NEXT: call void @use(i32 [[OR2]]) ; CHECK-NEXT: ret i32 [[AND3]] @@ -1631,10 +1631,10 @@ define i32 @and_not_or_wrong_c(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]], i32 [[D:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[C]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[C]], [[NOT1]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[D]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 -; CHECK-NEXT: [[OR2:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR2:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR1]], [[OR2]] ; CHECK-NEXT: ret i32 [[AND3]] ; @@ -1653,10 +1653,10 @@ define i32 @and_not_or_wrong_b(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]], i32 [[D:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[C]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[C]], [[NOT1]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 -; CHECK-NEXT: [[OR2:%.*]] = or i32 [[NOT2]], [[D]] +; CHECK-NEXT: [[OR2:%.*]] = or i32 [[D]], [[NOT2]] ; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR1]], [[OR2]] ; CHECK-NEXT: ret i32 [[AND3]] ; @@ -1693,7 +1693,7 @@ define i32 @or_and_not_not_commute1(i32 %a, i32 %b0, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@or_and_not_not_commute1 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]] -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[B]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[OR3:%.*]] = xor i32 [[TMP2]], -1 ; CHECK-NEXT: ret i32 [[OR3]] @@ -1780,7 +1780,7 @@ define i32 @or_and_not_not_commute6(i32 %a, i32 %b0, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@or_and_not_not_commute6 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]] -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[B]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[OR3:%.*]] = xor i32 [[TMP2]], -1 ; CHECK-NEXT: ret i32 [[OR3]] @@ -1819,7 +1819,7 @@ define i32 @or_and_not_not_extra_not_use1(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND]], [[NOT1]] ; CHECK-NEXT: call void @use(i32 [[NOT1]]) ; CHECK-NEXT: ret i32 [[OR3]] @@ -1860,7 +1860,7 @@ define i32 @or_and_not_not_extra_and_use(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[B]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[OR3:%.*]] = xor i32 [[TMP2]], -1 @@ -1884,7 +1884,7 @@ define i32 @or_and_not_not_extra_or_use1(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND]], [[NOT1]] ; CHECK-NEXT: call void @use(i32 [[OR1]]) ; CHECK-NEXT: ret i32 [[OR3]] @@ -1929,7 +1929,7 @@ define i32 @or_and_not_not_2_extra_uses(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[AND]]) ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND]], [[NOT1]] ; CHECK-NEXT: ret i32 [[OR3]] @@ -1952,7 +1952,7 @@ define i32 @or_and_not_not_wrong_a(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND]], [[NOT1]] ; CHECK-NEXT: ret i32 [[OR3]] ; @@ -1972,7 +1972,7 @@ define i32 @or_and_not_not_wrong_b(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND]], [[NOT1]] ; CHECK-NEXT: ret i32 [[OR3]] ; @@ -2008,7 +2008,7 @@ define i32 @and_or_not_not_commute1(i32 %a, i32 %b0, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@and_or_not_not_commute1 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]] -; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[B]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[B]] ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1 ; CHECK-NEXT: ret i32 [[AND3]] @@ -2095,7 +2095,7 @@ define i32 @and_or_not_not_commute6(i32 %a, i32 %b0, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@and_or_not_not_commute6 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]] -; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[B]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[B]] ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1 ; CHECK-NEXT: ret i32 [[AND3]] @@ -2134,7 +2134,7 @@ define i32 @and_or_not_not_extra_not_use1(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND1]], [[OR]] ; CHECK-NEXT: call void @use(i32 [[NOT1]]) ; CHECK-NEXT: ret i32 [[AND3]] @@ -2175,7 +2175,7 @@ define i32 @and_or_not_not_extra_and_use(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[B]] ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[TMP2]], -1 @@ -2198,7 +2198,7 @@ define i32 @and_or_not_not_extra_or_use1(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[A]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND1]], [[OR]] ; CHECK-NEXT: call void @use(i32 [[AND1]]) ; CHECK-NEXT: ret i32 [[AND3]] @@ -2240,7 +2240,7 @@ define i32 @and_or_not_not_2_extra_uses(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: call void @use(i32 [[AND1]]) ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[OR]]) ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND1]], [[OR]] ; CHECK-NEXT: ret i32 [[AND3]] @@ -2262,7 +2262,7 @@ define i32 @and_or_not_not_wrong_a(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[D]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND1]], [[OR]] ; CHECK-NEXT: ret i32 [[AND3]] ; @@ -2282,7 +2282,7 @@ define i32 @and_or_not_not_wrong_b(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[A]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR]], [[NOT1]] ; CHECK-NEXT: ret i32 [[AND3]] ; @@ -2471,7 +2471,7 @@ define i32 @and_not_or_or_not_or_xor_use3(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 @@ -2539,7 +2539,7 @@ define i32 @and_not_or_or_not_or_xor_use6(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[OR2]], -1 @@ -2567,7 +2567,7 @@ define i32 @or_not_and_and_not_and_xor(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]] @@ -2588,7 +2588,7 @@ define i32 @or_not_and_and_not_and_xor_commute1(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C]], [[B]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]] @@ -2632,7 +2632,7 @@ define i32 @or_not_and_and_not_and_xor_commute3(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[C]], [[B]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]] @@ -2676,7 +2676,7 @@ define i32 @or_not_and_and_not_and_xor_commute5(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]] @@ -2697,7 +2697,7 @@ define i32 @or_not_and_and_not_and_xor_use1(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]] @@ -2720,7 +2720,7 @@ define i32 @or_not_and_and_not_and_xor_use2(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]] @@ -2743,7 +2743,7 @@ define i32 @or_not_and_and_not_and_xor_use3(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]] @@ -2766,7 +2766,7 @@ define i32 @or_not_and_and_not_and_xor_use4(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]] @@ -2789,7 +2789,7 @@ define i32 @or_not_and_and_not_and_xor_use5(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR1]] @@ -2812,7 +2812,7 @@ define i32 @or_not_and_and_not_and_xor_use6(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT1]], [[A]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[A]], [[NOT1]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[B]], [[C]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[XOR1]], [[A]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[AND2]], -1 @@ -2965,7 +2965,7 @@ define i32 @not_and_and_or_not_or_or_commute3(i32 %a, i32 %b0, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@not_and_and_or_not_or_or_commute3 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[OR3:%.*]] = xor i32 [[TMP2]], -1 ; CHECK-NEXT: ret i32 [[OR3]] @@ -3051,7 +3051,7 @@ define i32 @not_and_and_or_not_or_or_use3(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[OR1]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR2]], -1 ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[AND1]], [[C]] ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND2]], [[NOT1]] ; CHECK-NEXT: call void @use(i32 [[NOT1]]) @@ -3093,7 +3093,7 @@ define i32 @not_and_and_or_not_or_or_use5(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@not_and_and_or_not_or_or_use5 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[A]] ; CHECK-NEXT: [[OR3:%.*]] = xor i32 [[TMP2]], -1 @@ -3118,7 +3118,7 @@ define i32 @not_and_and_or_not_or_or_use6(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[OR1]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR2]], -1 ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[AND1]], [[C]] ; CHECK-NEXT: [[OR3:%.*]] = or i32 [[AND2]], [[NOT1]] ; CHECK-NEXT: call void @use(i32 [[AND2]]) @@ -3270,7 +3270,7 @@ define i32 @not_or_or_and_not_and_and_commute3(i32 %a, i32 %b0, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B0:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]] ; CHECK-NEXT: [[AND3:%.*]] = or i32 [[TMP1]], [[NOT2]] ; CHECK-NEXT: ret i32 [[AND3]] ; @@ -3355,7 +3355,7 @@ define i32 @not_or_or_and_not_and_and_use3(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[AND1]], [[C]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND2]], -1 ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[OR1]], [[C]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR2]] ; CHECK-NEXT: call void @use(i32 [[NOT1]]) @@ -3396,7 +3396,7 @@ define i32 @not_or_or_and_not_and_and_use5(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@not_or_or_and_not_and_and_use5 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[C]], [[B]] ; CHECK-NEXT: [[AND3:%.*]] = or i32 [[TMP1]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[OR1]]) @@ -3419,7 +3419,7 @@ define i32 @not_or_or_and_not_and_and_use6(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[A]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[AND1]], [[C]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[OR1]], [[C]] ; CHECK-NEXT: [[AND3:%.*]] = xor i32 [[AND2]], [[OR2]] ; CHECK-NEXT: call void @use(i32 [[OR2]]) @@ -3443,7 +3443,7 @@ define i32 @not_and_and_or_no_or(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: ret i32 [[OR2]] ; @@ -3461,7 +3461,7 @@ define i32 @not_and_and_or_no_or_commute1_and(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: ret i32 [[OR2]] ; @@ -3479,7 +3479,7 @@ define i32 @not_and_and_or_no_or_commute2_and(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: ret i32 [[OR2]] ; @@ -3497,7 +3497,7 @@ define i32 @not_and_and_or_no_or_commute1(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: ret i32 [[OR2]] ; @@ -3516,7 +3516,7 @@ define i32 @not_and_and_or_no_or_commute2(i32 %a, i32 %b0, i32 %c) { ; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: ret i32 [[OR2]] ; @@ -3555,7 +3555,7 @@ define i32 @not_and_and_or_no_or_use1(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[NOT2]]) ; CHECK-NEXT: ret i32 [[OR2]] @@ -3575,7 +3575,7 @@ define i32 @not_and_and_or_no_or_use2(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[NOT2]]) ; CHECK-NEXT: ret i32 [[OR2]] @@ -3595,7 +3595,7 @@ define i32 @not_and_and_or_no_or_use3(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[NOT2]]) ; CHECK-NEXT: ret i32 [[OR2]] @@ -3615,7 +3615,7 @@ define i32 @not_and_and_or_no_or_use4(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[NOT2]]) ; CHECK-NEXT: ret i32 [[OR2]] @@ -3636,7 +3636,7 @@ define i32 @not_and_and_or_no_or_use5(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[A]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[NOT2]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[NOT2]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[TMP1]], [[B]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[AND2]], [[NOT1]] ; CHECK-NEXT: call void @use(i32 [[OR1]]) @@ -3658,7 +3658,7 @@ define i32 @not_and_and_or_no_or_use6(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[A]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[NOT2]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[NOT2]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[TMP1]], [[B]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[AND2]], [[NOT1]] ; CHECK-NEXT: call void @use(i32 [[NOT1]]) @@ -3678,9 +3678,9 @@ define i32 @not_and_and_or_no_or_use7(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@not_and_and_or_no_or_use7 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[OR2:%.*]] = and i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[AND1]]) ; CHECK-NEXT: ret i32 [[OR2]] @@ -3701,7 +3701,7 @@ define i32 @not_and_and_or_no_or_use8(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[A]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[OR1]], -1 ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[NOT2]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[C]], [[NOT2]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[TMP1]], [[B]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[AND2]], [[NOT1]] ; CHECK-NEXT: call void @use(i32 [[AND2]]) @@ -3724,7 +3724,7 @@ define i32 @not_or_or_and_no_and(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: ret i32 [[AND2]] ; @@ -3742,7 +3742,7 @@ define i32 @not_or_or_and_no_and_commute1_or(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: ret i32 [[AND2]] ; @@ -3760,7 +3760,7 @@ define i32 @not_or_or_and_no_and_commute2_or(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: ret i32 [[AND2]] ; @@ -3778,7 +3778,7 @@ define i32 @not_or_or_and_no_and_commute1(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: ret i32 [[AND2]] ; @@ -3797,7 +3797,7 @@ define i32 @not_or_or_and_no_and_commute2(i32 %a, i32 %b0, i32 %c) { ; CHECK-NEXT: [[B:%.*]] = sdiv i32 42, [[B0]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: ret i32 [[AND2]] ; @@ -3836,7 +3836,7 @@ define i32 @not_or_or_and_no_and_use1(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[NOT2]]) ; CHECK-NEXT: ret i32 [[AND2]] @@ -3856,7 +3856,7 @@ define i32 @not_or_or_and_no_and_use2(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[NOT2]]) ; CHECK-NEXT: ret i32 [[AND2]] @@ -3876,7 +3876,7 @@ define i32 @not_or_or_and_no_and_use3(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[NOT2]]) ; CHECK-NEXT: ret i32 [[AND2]] @@ -3896,7 +3896,7 @@ define i32 @not_or_or_and_no_and_use4(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[NOT2]]) ; CHECK-NEXT: ret i32 [[AND2]] @@ -3916,7 +3916,7 @@ define i32 @not_or_or_and_no_and_use5(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[A]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[NOT2]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[NOT2]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[TMP1]], [[B]] ; CHECK-NEXT: [[AND2:%.*]] = xor i32 [[AND1]], [[OR2]] ; CHECK-NEXT: call void @use(i32 [[AND1]]) @@ -3938,7 +3938,7 @@ define i32 @not_or_or_and_no_and_use6(i32 %a, i32 %b, i32 %c) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[A]] ; CHECK-NEXT: [[NOT1:%.*]] = xor i32 [[AND1]], -1 ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[NOT2]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[NOT2]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[TMP1]], [[B]] ; CHECK-NEXT: [[AND2:%.*]] = xor i32 [[AND1]], [[OR2]] ; CHECK-NEXT: call void @use(i32 [[NOT1]]) @@ -3958,9 +3958,9 @@ define i32 @not_or_or_and_no_and_use7(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: define {{[^@]+}}@not_or_or_and_no_and_use7 ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[NOT2]], [[B]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[B]], [[NOT2]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[C]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[C]], [[TMP1]] ; CHECK-NEXT: [[AND2:%.*]] = or i32 [[TMP2]], [[NOT2]] ; CHECK-NEXT: call void @use(i32 [[OR1]]) ; CHECK-NEXT: ret i32 [[AND2]] @@ -3980,7 +3980,7 @@ define i32 @not_or_or_and_no_and_use8(i32 %a, i32 %b, i32 %c) { ; CHECK-SAME: (i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) { ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[B]], [[A]] ; CHECK-NEXT: [[NOT2:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[NOT2]], [[C]] +; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[C]], [[NOT2]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[TMP1]], [[B]] ; CHECK-NEXT: [[AND2:%.*]] = xor i32 [[AND1]], [[OR2]] ; CHECK-NEXT: call void @use(i32 [[OR2]]) @@ -4000,7 +4000,7 @@ define i4 @and_orn_xor(i4 %a, i4 %b) { ; CHECK-LABEL: define {{[^@]+}}@and_orn_xor ; CHECK-SAME: (i4 [[A:%.*]], i4 [[B:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = xor i4 [[A]], -1 -; CHECK-NEXT: [[R:%.*]] = and i4 [[TMP1]], [[B]] +; CHECK-NEXT: [[R:%.*]] = and i4 [[B]], [[TMP1]] ; CHECK-NEXT: ret i4 [[R]] ; %xor = xor i4 %a, %b @@ -4014,7 +4014,7 @@ define <2 x i4> @and_orn_xor_commute1(<2 x i4> %a, <2 x i4> %b) { ; CHECK-LABEL: define {{[^@]+}}@and_orn_xor_commute1 ; CHECK-SAME: (<2 x i4> [[A:%.*]], <2 x i4> [[B:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i4> [[A]], -; CHECK-NEXT: [[R:%.*]] = and <2 x i4> [[TMP1]], [[B]] +; CHECK-NEXT: [[R:%.*]] = and <2 x i4> [[B]], [[TMP1]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %xor = xor <2 x i4> %a, %b @@ -4030,7 +4030,7 @@ define i32 @and_orn_xor_commute2(i32 %a, i32 %b) { ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[B]], [[A]] ; CHECK-NEXT: call void @use(i32 [[XOR]]) ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], [[B]] +; CHECK-NEXT: [[R:%.*]] = and i32 [[B]], [[TMP1]] ; CHECK-NEXT: ret i32 [[R]] ; %xor = xor i32 %b, %a @@ -4047,7 +4047,7 @@ define i32 @and_orn_xor_commute3(i32 %a, i32 %b) { ; CHECK-NEXT: [[NOTA:%.*]] = xor i32 [[A]], -1 ; CHECK-NEXT: call void @use(i32 [[NOTA]]) ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], -1 -; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], [[B]] +; CHECK-NEXT: [[R:%.*]] = and i32 [[B]], [[TMP1]] ; CHECK-NEXT: ret i32 [[R]] ; %xor = xor i32 %b, %a @@ -4207,7 +4207,7 @@ define i16 @and_zext_zext(i8 %x, i4 %y) { ; CHECK-LABEL: define {{[^@]+}}@and_zext_zext ; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y]] to i8 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = zext nneg i8 [[TMP2]] to i16 ; CHECK-NEXT: ret i16 [[R]] ; @@ -4221,7 +4221,7 @@ define i16 @or_zext_zext(i8 %x, i4 %y) { ; CHECK-LABEL: define {{[^@]+}}@or_zext_zext ; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = zext i4 [[Y]] to i8 -; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = zext i8 [[TMP2]] to i16 ; CHECK-NEXT: ret i16 [[R]] ; @@ -4235,7 +4235,7 @@ define <2 x i16> @xor_zext_zext(<2 x i8> %x, <2 x i4> %y) { ; CHECK-LABEL: define {{[^@]+}}@xor_zext_zext ; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i4> [[Y:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i4> [[Y]] to <2 x i8> -; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i8> [[TMP1]], [[X]] +; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i8> [[X]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = zext <2 x i8> [[TMP2]] to <2 x i16> ; CHECK-NEXT: ret <2 x i16> [[R]] ; @@ -4249,7 +4249,7 @@ define i16 @and_sext_sext(i8 %x, i4 %y) { ; CHECK-LABEL: define {{[^@]+}}@and_sext_sext ; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y]] to i8 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = sext i8 [[TMP2]] to i16 ; CHECK-NEXT: ret i16 [[R]] ; @@ -4263,7 +4263,7 @@ define i16 @or_sext_sext(i8 %x, i4 %y) { ; CHECK-LABEL: define {{[^@]+}}@or_sext_sext ; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y]] to i8 -; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = sext i8 [[TMP2]] to i16 ; CHECK-NEXT: ret i16 [[R]] ; @@ -4277,7 +4277,7 @@ define i16 @xor_sext_sext(i8 %x, i4 %y) { ; CHECK-LABEL: define {{[^@]+}}@xor_sext_sext ; CHECK-SAME: (i8 [[X:%.*]], i4 [[Y:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = sext i4 [[Y]] to i8 -; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[X]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = sext i8 [[TMP2]] to i16 ; CHECK-NEXT: ret i16 [[R]] ; @@ -4801,7 +4801,7 @@ define i1 @test_and_xor_freely_invertable_multiuse(i32 %x, i32 %y, i1 %z) { ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X]], [[Y]] ; CHECK-NEXT: call void @use_i1(i1 [[CMP]]) ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[CMP]], true -; CHECK-NEXT: [[AND:%.*]] = and i1 [[TMP1]], [[Z]] +; CHECK-NEXT: [[AND:%.*]] = and i1 [[Z]], [[TMP1]] ; CHECK-NEXT: ret i1 [[AND]] ; %cmp = icmp sgt i32 %x, %y diff --git a/llvm/test/Transforms/InstCombine/and.ll b/llvm/test/Transforms/InstCombine/and.ll index b5250fc1a7849..466718c802300 100644 --- a/llvm/test/Transforms/InstCombine/and.ll +++ b/llvm/test/Transforms/InstCombine/and.ll @@ -831,7 +831,7 @@ define i64 @test39(i32 %X) { define i32 @lowmask_add_zext(i8 %x, i32 %y) { ; CHECK-LABEL: @lowmask_add_zext( ; CHECK-NEXT: [[Y_TR:%.*]] = trunc i32 [[Y:%.*]] to i8 -; CHECK-NEXT: [[BO_NARROW:%.*]] = add i8 [[Y_TR]], [[X:%.*]] +; CHECK-NEXT: [[BO_NARROW:%.*]] = add i8 [[X:%.*]], [[Y_TR]] ; CHECK-NEXT: [[R:%.*]] = zext i8 [[BO_NARROW]] to i32 ; CHECK-NEXT: ret i32 [[R]] ; @@ -845,7 +845,7 @@ define i32 @lowmask_add_zext_commute(i16 %x, i32 %p) { ; CHECK-LABEL: @lowmask_add_zext_commute( ; CHECK-NEXT: [[Y:%.*]] = mul i32 [[P:%.*]], [[P]] ; CHECK-NEXT: [[Y_TR:%.*]] = trunc i32 [[Y]] to i16 -; CHECK-NEXT: [[BO_NARROW:%.*]] = add i16 [[Y_TR]], [[X:%.*]] +; CHECK-NEXT: [[BO_NARROW:%.*]] = add i16 [[X:%.*]], [[Y_TR]] ; CHECK-NEXT: [[R:%.*]] = zext i16 [[BO_NARROW]] to i32 ; CHECK-NEXT: ret i32 [[R]] ; @@ -861,7 +861,7 @@ define i32 @lowmask_add_zext_commute(i16 %x, i32 %p) { define i32 @lowmask_add_zext_wrong_mask(i8 %x, i32 %y) { ; CHECK-LABEL: @lowmask_add_zext_wrong_mask( ; CHECK-NEXT: [[ZX:%.*]] = zext i8 [[X:%.*]] to i32 -; CHECK-NEXT: [[BO:%.*]] = add i32 [[ZX]], [[Y:%.*]] +; CHECK-NEXT: [[BO:%.*]] = add i32 [[Y:%.*]], [[ZX]] ; CHECK-NEXT: [[R:%.*]] = and i32 [[BO]], 511 ; CHECK-NEXT: ret i32 [[R]] ; @@ -877,7 +877,7 @@ define i32 @lowmask_add_zext_use1(i8 %x, i32 %y) { ; CHECK-LABEL: @lowmask_add_zext_use1( ; CHECK-NEXT: [[ZX:%.*]] = zext i8 [[X:%.*]] to i32 ; CHECK-NEXT: call void @use32(i32 [[ZX]]) -; CHECK-NEXT: [[BO:%.*]] = add i32 [[ZX]], [[Y:%.*]] +; CHECK-NEXT: [[BO:%.*]] = add i32 [[Y:%.*]], [[ZX]] ; CHECK-NEXT: [[R:%.*]] = and i32 [[BO]], 255 ; CHECK-NEXT: ret i32 [[R]] ; @@ -893,7 +893,7 @@ define i32 @lowmask_add_zext_use1(i8 %x, i32 %y) { define i32 @lowmask_add_zext_use2(i8 %x, i32 %y) { ; CHECK-LABEL: @lowmask_add_zext_use2( ; CHECK-NEXT: [[ZX:%.*]] = zext i8 [[X:%.*]] to i32 -; CHECK-NEXT: [[BO:%.*]] = add i32 [[ZX]], [[Y:%.*]] +; CHECK-NEXT: [[BO:%.*]] = add i32 [[Y:%.*]], [[ZX]] ; CHECK-NEXT: call void @use32(i32 [[BO]]) ; CHECK-NEXT: [[R:%.*]] = and i32 [[BO]], 255 ; CHECK-NEXT: ret i32 [[R]] @@ -938,7 +938,7 @@ define i17 @lowmask_sub_zext_commute(i5 %x, i17 %y) { define i32 @lowmask_mul_zext(i8 %x, i32 %y) { ; CHECK-LABEL: @lowmask_mul_zext( ; CHECK-NEXT: [[Y_TR:%.*]] = trunc i32 [[Y:%.*]] to i8 -; CHECK-NEXT: [[BO_NARROW:%.*]] = mul i8 [[Y_TR]], [[X:%.*]] +; CHECK-NEXT: [[BO_NARROW:%.*]] = mul i8 [[X:%.*]], [[Y_TR]] ; CHECK-NEXT: [[R:%.*]] = zext i8 [[BO_NARROW]] to i32 ; CHECK-NEXT: ret i32 [[R]] ; @@ -952,7 +952,7 @@ define i32 @lowmask_xor_zext_commute(i8 %x, i32 %p) { ; CHECK-LABEL: @lowmask_xor_zext_commute( ; CHECK-NEXT: [[Y:%.*]] = mul i32 [[P:%.*]], [[P]] ; CHECK-NEXT: [[Y_TR:%.*]] = trunc i32 [[Y]] to i8 -; CHECK-NEXT: [[BO_NARROW:%.*]] = xor i8 [[Y_TR]], [[X:%.*]] +; CHECK-NEXT: [[BO_NARROW:%.*]] = xor i8 [[X:%.*]], [[Y_TR]] ; CHECK-NEXT: [[R:%.*]] = zext i8 [[BO_NARROW]] to i32 ; CHECK-NEXT: ret i32 [[R]] ; @@ -966,7 +966,7 @@ define i32 @lowmask_xor_zext_commute(i8 %x, i32 %p) { define i24 @lowmask_or_zext_commute(i16 %x, i24 %y) { ; CHECK-LABEL: @lowmask_or_zext_commute( ; CHECK-NEXT: [[Y_TR:%.*]] = trunc i24 [[Y:%.*]] to i16 -; CHECK-NEXT: [[BO_NARROW:%.*]] = or i16 [[Y_TR]], [[X:%.*]] +; CHECK-NEXT: [[BO_NARROW:%.*]] = or i16 [[X:%.*]], [[Y_TR]] ; CHECK-NEXT: [[R:%.*]] = zext i16 [[BO_NARROW]] to i24 ; CHECK-NEXT: ret i24 [[R]] ; @@ -1127,7 +1127,7 @@ define i32 @test45(i32 %x, i32 %y) nounwind { ; y & (~y | x) -> y | x define i32 @test46(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: @test46( -; CHECK-NEXT: [[A:%.*]] = and i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret i32 [[A]] ; %n = xor i32 %y, -1 @@ -1139,7 +1139,7 @@ define i32 @test46(i32 %x, i32 %y) nounwind { ; y & (x | ~y) -> y | x define i32 @test47(i32 %x, i32 %y) nounwind { ; CHECK-LABEL: @test47( -; CHECK-NEXT: [[A:%.*]] = and i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret i32 [[A]] ; %n = xor i32 %y, -1 @@ -1814,7 +1814,7 @@ define i16 @signbit_splat_mask_use2(i8 %x, i16 %y) { ; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7 ; CHECK-NEXT: [[S:%.*]] = sext i8 [[A]] to i16 ; CHECK-NEXT: call void @use16(i16 [[S]]) -; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[Y:%.*]], [[S]] ; CHECK-NEXT: ret i16 [[R]] ; %a = ashr i8 %x, 7 @@ -1830,7 +1830,7 @@ define i16 @not_signbit_splat_mask1(i8 %x, i16 %y) { ; CHECK-LABEL: @not_signbit_splat_mask1( ; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 7 ; CHECK-NEXT: [[Z:%.*]] = zext i8 [[A]] to i16 -; CHECK-NEXT: [[R:%.*]] = and i16 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[Y:%.*]], [[Z]] ; CHECK-NEXT: ret i16 [[R]] ; %a = ashr i8 %x, 7 @@ -1845,7 +1845,7 @@ define i16 @not_signbit_splat_mask2(i8 %x, i16 %y) { ; CHECK-LABEL: @not_signbit_splat_mask2( ; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 6 ; CHECK-NEXT: [[S:%.*]] = sext i8 [[A]] to i16 -; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[Y:%.*]], [[S]] ; CHECK-NEXT: ret i16 [[R]] ; %a = ashr i8 %x, 6 @@ -1920,7 +1920,7 @@ define i8 @not_ashr_not_bitwidth_mask(i8 %x, i8 %y) { ; CHECK-LABEL: @not_ashr_not_bitwidth_mask( ; CHECK-NEXT: [[SIGN:%.*]] = ashr i8 [[X:%.*]], 6 ; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[SIGN]], -1 -; CHECK-NEXT: [[R:%.*]] = and i8 [[NOT]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[Y:%.*]], [[NOT]] ; CHECK-NEXT: ret i8 [[R]] ; %sign = ashr i8 %x, 6 @@ -1935,7 +1935,7 @@ define i8 @not_lshr_bitwidth_mask(i8 %x, i8 %y) { ; CHECK-LABEL: @not_lshr_bitwidth_mask( ; CHECK-NEXT: [[SIGN:%.*]] = lshr i8 [[X:%.*]], 7 ; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[SIGN]], -1 -; CHECK-NEXT: [[R:%.*]] = and i8 [[NOT]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[Y:%.*]], [[NOT]] ; CHECK-NEXT: ret i8 [[R]] ; %sign = lshr i8 %x, 7 @@ -2029,7 +2029,7 @@ define i16 @not_invert_signbit_splat_mask1(i8 %x, i16 %y) { ; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1 ; CHECK-NEXT: [[N:%.*]] = sext i1 [[ISNOTNEG]] to i8 ; CHECK-NEXT: [[Z:%.*]] = zext i8 [[N]] to i16 -; CHECK-NEXT: [[R:%.*]] = and i16 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[Y:%.*]], [[Z]] ; CHECK-NEXT: ret i16 [[R]] ; %a = ashr i8 %x, 7 @@ -2046,7 +2046,7 @@ define i16 @not_invert_signbit_splat_mask2(i8 %x, i16 %y) { ; CHECK-NEXT: [[A:%.*]] = ashr i8 [[X:%.*]], 6 ; CHECK-NEXT: [[N:%.*]] = xor i8 [[A]], -1 ; CHECK-NEXT: [[S:%.*]] = sext i8 [[N]] to i16 -; CHECK-NEXT: [[R:%.*]] = and i16 [[S]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[Y:%.*]], [[S]] ; CHECK-NEXT: ret i16 [[R]] ; %a = ashr i8 %x, 6 @@ -2504,7 +2504,7 @@ define i8 @negate_lowbitmask_use2(i8 %x, i8 %y) { ; CHECK-NEXT: [[A:%.*]] = and i8 [[X:%.*]], 1 ; CHECK-NEXT: [[N:%.*]] = sub nsw i8 0, [[A]] ; CHECK-NEXT: call void @use8(i8 [[N]]) -; CHECK-NEXT: [[R:%.*]] = and i8 [[N]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[Y:%.*]], [[N]] ; CHECK-NEXT: ret i8 [[R]] ; %a = and i8 %x, 1 @@ -2553,7 +2553,7 @@ define i32 @and_zext_multiuse(i32 %a, i1 %b) { ; CHECK-LABEL: @and_zext_multiuse( ; CHECK-NEXT: [[MASK:%.*]] = zext i1 [[B:%.*]] to i32 ; CHECK-NEXT: call void @use32(i32 [[MASK]]) -; CHECK-NEXT: [[R:%.*]] = and i32 [[MASK]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i32 [[A:%.*]], [[MASK]] ; CHECK-NEXT: ret i32 [[R]] ; %mask = zext i1 %b to i32 @@ -2636,7 +2636,7 @@ define i32 @and_zext_eq_zero(i32 %A, i32 %C) { define i32 @canonicalize_and_add_power2_or_zero(i32 %x, i32 %y) { ; CHECK-LABEL: @canonicalize_and_add_power2_or_zero( ; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]] +; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]] ; CHECK-NEXT: call void @use32(i32 [[P2]]) ; CHECK-NEXT: [[X2:%.*]] = mul i32 [[X:%.*]], [[X]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X2]], -1 @@ -2656,7 +2656,7 @@ define i32 @canonicalize_and_add_power2_or_zero(i32 %x, i32 %y) { define i32 @canonicalize_and_sub_power2_or_zero(i32 %x, i32 %y) { ; CHECK-LABEL: @canonicalize_and_sub_power2_or_zero( ; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]] +; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]] ; CHECK-NEXT: call void @use32(i32 [[P2]]) ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[P2]], [[TMP1]] @@ -2674,7 +2674,7 @@ define i32 @canonicalize_and_sub_power2_or_zero(i32 %x, i32 %y) { define i32 @canonicalize_and_add_power2_or_zero_commuted1(i32 %x, i32 %y) { ; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_commuted1( ; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]] +; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]] ; CHECK-NEXT: call void @use32(i32 [[P2]]) ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[P2]], [[TMP1]] @@ -2692,7 +2692,7 @@ define i32 @canonicalize_and_add_power2_or_zero_commuted1(i32 %x, i32 %y) { define i32 @canonicalize_and_add_power2_or_zero_commuted2(i32 %x, i32 %y) { ; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_commuted2( ; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]] +; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]] ; CHECK-NEXT: call void @use32(i32 [[P2]]) ; CHECK-NEXT: [[X2:%.*]] = mul i32 [[X:%.*]], [[X]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X2]], -1 @@ -2712,7 +2712,7 @@ define i32 @canonicalize_and_add_power2_or_zero_commuted2(i32 %x, i32 %y) { define i32 @canonicalize_and_add_power2_or_zero_commuted3(i32 %x, i32 %y) { ; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_commuted3( ; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]] +; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]] ; CHECK-NEXT: call void @use32(i32 [[P2]]) ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[P2]], [[TMP1]] @@ -2730,7 +2730,7 @@ define i32 @canonicalize_and_add_power2_or_zero_commuted3(i32 %x, i32 %y) { define i32 @canonicalize_and_sub_power2_or_zero_commuted_nofold(i32 %x, i32 %y) { ; CHECK-LABEL: @canonicalize_and_sub_power2_or_zero_commuted_nofold( ; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]] +; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]] ; CHECK-NEXT: call void @use32(i32 [[P2]]) ; CHECK-NEXT: [[VAL:%.*]] = sub i32 [[P2]], [[X:%.*]] ; CHECK-NEXT: [[AND:%.*]] = and i32 [[VAL]], [[P2]] @@ -2759,7 +2759,7 @@ define i32 @canonicalize_and_add_non_power2_or_zero_nofold(i32 %x, i32 %y) { define i32 @canonicalize_and_add_power2_or_zero_multiuse_nofold(i32 %x, i32 %y) { ; CHECK-LABEL: @canonicalize_and_add_power2_or_zero_multiuse_nofold( ; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]] +; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]] ; CHECK-NEXT: call void @use32(i32 [[P2]]) ; CHECK-NEXT: [[X2:%.*]] = mul i32 [[X:%.*]], [[X]] ; CHECK-NEXT: [[VAL:%.*]] = add i32 [[X2]], [[P2]] @@ -2781,7 +2781,7 @@ define i32 @canonicalize_and_add_power2_or_zero_multiuse_nofold(i32 %x, i32 %y) define i32 @canonicalize_and_sub_power2_or_zero_multiuse_nofold(i32 %x, i32 %y) { ; CHECK-LABEL: @canonicalize_and_sub_power2_or_zero_multiuse_nofold( ; CHECK-NEXT: [[NY:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[P2:%.*]] = and i32 [[NY]], [[Y]] +; CHECK-NEXT: [[P2:%.*]] = and i32 [[Y]], [[NY]] ; CHECK-NEXT: call void @use32(i32 [[P2]]) ; CHECK-NEXT: [[VAL:%.*]] = sub i32 [[X:%.*]], [[P2]] ; CHECK-NEXT: call void @use32(i32 [[VAL]]) diff --git a/llvm/test/Transforms/InstCombine/apint-and-xor-merge.ll b/llvm/test/Transforms/InstCombine/apint-and-xor-merge.ll index 9810e5057d8a9..eca38586d01d0 100644 --- a/llvm/test/Transforms/InstCombine/apint-and-xor-merge.ll +++ b/llvm/test/Transforms/InstCombine/apint-and-xor-merge.ll @@ -8,7 +8,7 @@ define i57 @test1(i57 %x, i57 %y, i57 %z) { ; CHECK-LABEL: @test1( ; CHECK-NEXT: [[TMP61:%.*]] = xor i57 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[TMP7:%.*]] = and i57 [[TMP61]], [[Z:%.*]] +; CHECK-NEXT: [[TMP7:%.*]] = and i57 [[Z:%.*]], [[TMP61]] ; CHECK-NEXT: ret i57 [[TMP7]] ; %tmp3 = and i57 %z, %x diff --git a/llvm/test/Transforms/InstCombine/apint-or.ll b/llvm/test/Transforms/InstCombine/apint-or.ll index 38bffdf35a364..07a0e497e521e 100644 --- a/llvm/test/Transforms/InstCombine/apint-or.ll +++ b/llvm/test/Transforms/InstCombine/apint-or.ll @@ -20,7 +20,7 @@ define i39 @test2(i39 %V, i39 %M) { ; CHECK-LABEL: define i39 @test2( ; CHECK-SAME: i39 [[V:%.*]], i39 [[M:%.*]]) { ; CHECK-NEXT: [[N:%.*]] = and i39 [[M]], -274877906944 -; CHECK-NEXT: [[A:%.*]] = add i39 [[N]], [[V]] +; CHECK-NEXT: [[A:%.*]] = add i39 [[V]], [[N]] ; CHECK-NEXT: ret i39 [[A]] ; %C1 = xor i39 274877906943, -1 ;; C2 = 274877906943 @@ -51,7 +51,7 @@ define i399 @test5(i399 %V, i399 %M) { ; CHECK-LABEL: define i399 @test5( ; CHECK-SAME: i399 [[V:%.*]], i399 [[M:%.*]]) { ; CHECK-NEXT: [[N:%.*]] = and i399 [[M]], 18446742974197923840 -; CHECK-NEXT: [[A:%.*]] = add i399 [[N]], [[V]] +; CHECK-NEXT: [[A:%.*]] = add i399 [[V]], [[N]] ; CHECK-NEXT: ret i399 [[A]] ; %C1 = xor i399 274877906943, -1 ;; C2 = 274877906943 diff --git a/llvm/test/Transforms/InstCombine/apint-shift.ll b/llvm/test/Transforms/InstCombine/apint-shift.ll index ecf9c4e9c4e69..21c6c18009d1d 100644 --- a/llvm/test/Transforms/InstCombine/apint-shift.ll +++ b/llvm/test/Transforms/InstCombine/apint-shift.ll @@ -538,7 +538,7 @@ define <2 x i43> @lshr_shl_eq_amt_multi_use_splat_vec(<2 x i43> %A) { define i37 @test25(i37 %AA, i37 %BB) { ; CHECK-LABEL: @test25( ; CHECK-NEXT: [[D:%.*]] = and i37 [[AA:%.*]], -131072 -; CHECK-NEXT: [[C2:%.*]] = add i37 [[D]], [[BB:%.*]] +; CHECK-NEXT: [[C2:%.*]] = add i37 [[BB:%.*]], [[D]] ; CHECK-NEXT: [[F:%.*]] = and i37 [[C2]], -131072 ; CHECK-NEXT: ret i37 [[F]] ; diff --git a/llvm/test/Transforms/InstCombine/apint-sub.ll b/llvm/test/Transforms/InstCombine/apint-sub.ll index 1c0374d443740..e9abe1a7e627d 100644 --- a/llvm/test/Transforms/InstCombine/apint-sub.ll +++ b/llvm/test/Transforms/InstCombine/apint-sub.ll @@ -50,7 +50,7 @@ define i19 @test5(i19 %A, i19 %Bok, i19 %Cok) { define i57 @test6(i57 %A, i57 %B) { ; CHECK-LABEL: @test6( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i57 [[B:%.*]], -1 -; CHECK-NEXT: [[D:%.*]] = and i57 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = and i57 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret i57 [[D]] ; %C = and i57 %A, %B diff --git a/llvm/test/Transforms/InstCombine/ashr-lshr.ll b/llvm/test/Transforms/InstCombine/ashr-lshr.ll index a81cd47b1cd4b..9e31c9b0738c6 100644 --- a/llvm/test/Transforms/InstCombine/ashr-lshr.ll +++ b/llvm/test/Transforms/InstCombine/ashr-lshr.ll @@ -620,7 +620,7 @@ define <2 x i8> @ashr_known_pos_exact_vec(<2 x i8> %x, <2 x i8> %y) { define i32 @lshr_mul_times_3_div_2(i32 %0) { ; CHECK-LABEL: @lshr_mul_times_3_div_2( ; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 1 -; CHECK-NEXT: [[LSHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]] +; CHECK-NEXT: [[LSHR:%.*]] = add nuw nsw i32 [[TMP0]], [[TMP2]] ; CHECK-NEXT: ret i32 [[LSHR]] ; %mul = mul nsw nuw i32 %0, 3 @@ -631,7 +631,7 @@ define i32 @lshr_mul_times_3_div_2(i32 %0) { define i32 @lshr_mul_times_3_div_2_exact(i32 %x) { ; CHECK-LABEL: @lshr_mul_times_3_div_2_exact( ; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 1 -; CHECK-NEXT: [[LSHR:%.*]] = add nsw i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[LSHR:%.*]] = add nsw i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i32 [[LSHR]] ; %mul = mul nsw i32 %x, 3 @@ -670,7 +670,7 @@ define i32 @mul_times_3_div_2_multiuse_lshr(i32 %x) { define i32 @lshr_mul_times_3_div_2_exact_2(i32 %x) { ; CHECK-LABEL: @lshr_mul_times_3_div_2_exact_2( ; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 1 -; CHECK-NEXT: [[LSHR:%.*]] = add nuw i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[LSHR:%.*]] = add nuw i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i32 [[LSHR]] ; %mul = mul nuw i32 %x, 3 @@ -681,7 +681,7 @@ define i32 @lshr_mul_times_3_div_2_exact_2(i32 %x) { define i32 @lshr_mul_times_5_div_4(i32 %0) { ; CHECK-LABEL: @lshr_mul_times_5_div_4( ; CHECK-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP0:%.*]], 2 -; CHECK-NEXT: [[LSHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]] +; CHECK-NEXT: [[LSHR:%.*]] = add nuw nsw i32 [[TMP0]], [[TMP2]] ; CHECK-NEXT: ret i32 [[LSHR]] ; %mul = mul nsw nuw i32 %0, 5 @@ -692,7 +692,7 @@ define i32 @lshr_mul_times_5_div_4(i32 %0) { define i32 @lshr_mul_times_5_div_4_exact(i32 %x) { ; CHECK-LABEL: @lshr_mul_times_5_div_4_exact( ; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 2 -; CHECK-NEXT: [[LSHR:%.*]] = add nsw i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[LSHR:%.*]] = add nsw i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i32 [[LSHR]] ; %mul = mul nsw i32 %x, 5 @@ -731,7 +731,7 @@ define i32 @mul_times_5_div_4_multiuse_lshr(i32 %x) { define i32 @lshr_mul_times_5_div_4_exact_2(i32 %x) { ; CHECK-LABEL: @lshr_mul_times_5_div_4_exact_2( ; CHECK-NEXT: [[TMP1:%.*]] = lshr exact i32 [[X:%.*]], 2 -; CHECK-NEXT: [[LSHR:%.*]] = add nuw i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[LSHR:%.*]] = add nuw i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i32 [[LSHR]] ; %mul = mul nuw i32 %x, 5 @@ -742,7 +742,7 @@ define i32 @lshr_mul_times_5_div_4_exact_2(i32 %x) { define i32 @ashr_mul_times_3_div_2(i32 %0) { ; CHECK-LABEL: @ashr_mul_times_3_div_2( ; CHECK-NEXT: [[TMP2:%.*]] = ashr i32 [[TMP0:%.*]], 1 -; CHECK-NEXT: [[ASHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]] +; CHECK-NEXT: [[ASHR:%.*]] = add nuw nsw i32 [[TMP0]], [[TMP2]] ; CHECK-NEXT: ret i32 [[ASHR]] ; %mul = mul nuw nsw i32 %0, 3 @@ -753,7 +753,7 @@ define i32 @ashr_mul_times_3_div_2(i32 %0) { define i32 @ashr_mul_times_3_div_2_exact(i32 %x) { ; CHECK-LABEL: @ashr_mul_times_3_div_2_exact( ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 1 -; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i32 [[ASHR]] ; %mul = mul nsw i32 %x, 3 @@ -805,7 +805,7 @@ define i32 @mul_times_3_div_2_multiuse_ashr(i32 %x) { define i32 @ashr_mul_times_3_div_2_exact_2(i32 %x) { ; CHECK-LABEL: @ashr_mul_times_3_div_2_exact_2( ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 1 -; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i32 [[ASHR]] ; %mul = mul nsw i32 %x, 3 @@ -816,7 +816,7 @@ define i32 @ashr_mul_times_3_div_2_exact_2(i32 %x) { define i32 @ashr_mul_times_5_div_4(i32 %0) { ; CHECK-LABEL: @ashr_mul_times_5_div_4( ; CHECK-NEXT: [[TMP2:%.*]] = ashr i32 [[TMP0:%.*]], 2 -; CHECK-NEXT: [[ASHR:%.*]] = add nuw nsw i32 [[TMP2]], [[TMP0]] +; CHECK-NEXT: [[ASHR:%.*]] = add nuw nsw i32 [[TMP0]], [[TMP2]] ; CHECK-NEXT: ret i32 [[ASHR]] ; %mul = mul nuw nsw i32 %0, 5 @@ -827,7 +827,7 @@ define i32 @ashr_mul_times_5_div_4(i32 %0) { define i32 @ashr_mul_times_5_div_4_exact(i32 %x) { ; CHECK-LABEL: @ashr_mul_times_5_div_4_exact( ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 2 -; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i32 [[ASHR]] ; %mul = mul nsw i32 %x, 5 @@ -866,7 +866,7 @@ define i32 @mul_times_5_div_4_multiuse_ashr(i32 %x) { define i32 @ashr_mul_times_5_div_4_exact_2(i32 %x) { ; CHECK-LABEL: @ashr_mul_times_5_div_4_exact_2( ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact i32 [[X:%.*]], 2 -; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[ASHR:%.*]] = add nsw i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i32 [[ASHR]] ; %mul = mul nsw i32 %x, 5 diff --git a/llvm/test/Transforms/InstCombine/assume-align.ll b/llvm/test/Transforms/InstCombine/assume-align.ll index 798707f317d29..ce3195d50be7c 100644 --- a/llvm/test/Transforms/InstCombine/assume-align.ll +++ b/llvm/test/Transforms/InstCombine/assume-align.ll @@ -88,7 +88,7 @@ define void @f3(i64 %a, ptr %b) { ; CHECK-LABEL: @f3( ; CHECK-NEXT: [[C:%.*]] = ptrtoint ptr [[B:%.*]] to i64 ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(ptr [[B]], i64 4294967296) ] -; CHECK-NEXT: [[D:%.*]] = add i64 [[C]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = add i64 [[A:%.*]], [[C]] ; CHECK-NEXT: call void @g(i64 [[D]]) ; CHECK-NEXT: ret void ; diff --git a/llvm/test/Transforms/InstCombine/assume-separate_storage.ll b/llvm/test/Transforms/InstCombine/assume-separate_storage.ll index 8fa8c3e80786d..b94c303e5a70c 100644 --- a/llvm/test/Transforms/InstCombine/assume-separate_storage.ll +++ b/llvm/test/Transforms/InstCombine/assume-separate_storage.ll @@ -24,7 +24,7 @@ define i64 @folds_removed_operands(ptr %a, ptr %b, i64 %n1, i64 %n2) { ; CHECK-LABEL: @folds_removed_operands( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[REASS_ADD:%.*]] = shl i64 [[N2:%.*]], 1 -; CHECK-NEXT: [[Y:%.*]] = add i64 [[REASS_ADD]], [[N1:%.*]] +; CHECK-NEXT: [[Y:%.*]] = add i64 [[N1:%.*]], [[REASS_ADD]] ; CHECK-NEXT: call void @llvm.assume(i1 true) [ "separate_storage"(ptr [[A:%.*]], ptr [[B:%.*]]) ] ; CHECK-NEXT: ret i64 [[Y]] ; diff --git a/llvm/test/Transforms/InstCombine/avg-lsb.ll b/llvm/test/Transforms/InstCombine/avg-lsb.ll index 23a47166bf2fe..1e9e4e3bcafb2 100644 --- a/llvm/test/Transforms/InstCombine/avg-lsb.ll +++ b/llvm/test/Transforms/InstCombine/avg-lsb.ll @@ -5,7 +5,7 @@ define i8 @avg_lsb(i8 %a, i8 %b) { ; CHECK-LABEL: define i8 @avg_lsb( ; CHECK-SAME: i8 [[A:%.*]], i8 [[B:%.*]]) { ; CHECK-NEXT: [[REM:%.*]] = and i8 [[A]], 1 -; CHECK-NEXT: [[DIV2:%.*]] = and i8 [[REM]], [[B]] +; CHECK-NEXT: [[DIV2:%.*]] = and i8 [[B]], [[REM]] ; CHECK-NEXT: ret i8 [[DIV2]] ; %rem = and i8 %a, 1 @@ -35,7 +35,7 @@ define <2 x i8> @avg_lsb_vector(<2 x i8> %a, <2 x i8> %b) { ; CHECK-LABEL: define <2 x i8> @avg_lsb_vector( ; CHECK-SAME: <2 x i8> [[A:%.*]], <2 x i8> [[B:%.*]]) { ; CHECK-NEXT: [[REM:%.*]] = and <2 x i8> [[A]], -; CHECK-NEXT: [[DIV2:%.*]] = and <2 x i8> [[REM]], [[B]] +; CHECK-NEXT: [[DIV2:%.*]] = and <2 x i8> [[B]], [[REM]] ; CHECK-NEXT: ret <2 x i8> [[DIV2]] ; %rem = and <2 x i8> %a, diff --git a/llvm/test/Transforms/InstCombine/binop-and-shifts.ll b/llvm/test/Transforms/InstCombine/binop-and-shifts.ll index f776dc13bb4e5..4b5de41fc7095 100644 --- a/llvm/test/Transforms/InstCombine/binop-and-shifts.ll +++ b/llvm/test/Transforms/InstCombine/binop-and-shifts.ll @@ -77,7 +77,7 @@ define i8 @shl_and_and_fail2(i8 %x, i8 %y) { define <2 x i8> @lshr_and_or(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @lshr_and_or( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i8> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i8> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[BW1:%.*]] = lshr <2 x i8> [[TMP2]], ; CHECK-NEXT: ret <2 x i8> [[BW1]] ; @@ -106,7 +106,7 @@ define <2 x i8> @lshr_and_or_fail(<2 x i8> %x, <2 x i8> %y) { define i8 @shl_and_xor(i8 %x, i8 %y) { ; CHECK-LABEL: @shl_and_xor( ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], 10 -; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[BW1:%.*]] = shl i8 [[TMP2]], 1 ; CHECK-NEXT: ret i8 [[BW1]] ; @@ -120,7 +120,7 @@ define i8 @shl_and_xor(i8 %x, i8 %y) { define i8 @shl_and_add(i8 %x, i8 %y) { ; CHECK-LABEL: @shl_and_add( ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[Y:%.*]], 59 -; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[BW1:%.*]] = shl i8 [[TMP2]], 1 ; CHECK-NEXT: ret i8 [[BW1]] ; @@ -149,7 +149,7 @@ define i8 @shl_xor_add_fail(i8 %x, i8 %y) { define i8 @lshr_or_and(i8 %x, i8 %y) { ; CHECK-LABEL: @lshr_or_and( ; CHECK-NEXT: [[TMP1:%.*]] = or i8 [[X:%.*]], -64 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[BW1:%.*]] = lshr i8 [[TMP2]], 5 ; CHECK-NEXT: ret i8 [[BW1]] ; @@ -177,7 +177,7 @@ define i8 @lshr_or_or_fail(i8 %x, i8 %y) { define <2 x i8> @shl_xor_and(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @shl_xor_and( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[BW1:%.*]] = shl <2 x i8> [[TMP2]], ; CHECK-NEXT: ret <2 x i8> [[BW1]] ; @@ -307,7 +307,7 @@ define i8 @lshr_add_add_no_const_fail(i8 %x, i8 %y, i8 %sh, i8 %mask) { define <2 x i8> @lshr_add_and(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @lshr_add_and( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[BW1:%.*]] = lshr <2 x i8> [[TMP2]], ; CHECK-NEXT: ret <2 x i8> [[BW1]] ; @@ -393,7 +393,7 @@ define i8 @lshr_xor_or_fail_bad_mask(i8 %x, i8 %y) { define <2 x i8> @lshr_or_xor_good_mask(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @lshr_or_xor_good_mask( ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[BW1:%.*]] = lshr <2 x i8> [[TMP2]], ; CHECK-NEXT: ret <2 x i8> [[BW1]] ; @@ -450,7 +450,7 @@ define i8 @shl_xor_xor_bad_mask_distribute(i8 %x, i8 %y) { define i8 @shl_add_and(i8 %x, i8 %y) { ; CHECK-LABEL: @shl_add_and( ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], 61 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[BW1:%.*]] = shl i8 [[TMP2]], 1 ; CHECK-NEXT: ret i8 [[BW1]] ; @@ -509,7 +509,7 @@ define i8 @lshr_add_xor_fail(i8 %x, i8 %y) { define <2 x i8> @lshr_and_add(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @lshr_and_add( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i8> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i8> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[BW1:%.*]] = shl <2 x i8> [[TMP2]], ; CHECK-NEXT: ret <2 x i8> [[BW1]] ; @@ -555,7 +555,7 @@ define i8 @shl_add_and_fail_mismatch_shift(i8 %x, i8 %y) { define i8 @and_ashr_not(i8 %x, i8 %y, i8 %shamt) { ; CHECK-LABEL: @and_ashr_not( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]] ; CHECK-NEXT: ret i8 [[AND]] ; @@ -569,7 +569,7 @@ define i8 @and_ashr_not(i8 %x, i8 %y, i8 %shamt) { define i8 @and_ashr_not_commuted(i8 %x, i8 %y, i8 %shamt) { ; CHECK-LABEL: @and_ashr_not_commuted( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]] ; CHECK-NEXT: ret i8 [[AND]] ; @@ -634,7 +634,7 @@ define i8 @and_ashr_not_fail_invalid_xor_constant(i8 %x, i8 %y, i8 %shamt) { define <4 x i8> @and_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { ; CHECK-LABEL: @and_ashr_not_vec( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]] ; CHECK-NEXT: ret <4 x i8> [[AND]] ; @@ -648,7 +648,7 @@ define <4 x i8> @and_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { define <4 x i8> @and_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { ; CHECK-LABEL: @and_ashr_not_vec_commuted( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]] ; CHECK-NEXT: ret <4 x i8> [[AND]] ; @@ -662,7 +662,7 @@ define <4 x i8> @and_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %s define <4 x i8> @and_ashr_not_vec_poison_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { ; CHECK-LABEL: @and_ashr_not_vec_poison_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]] ; CHECK-NEXT: ret <4 x i8> [[AND]] ; @@ -689,7 +689,7 @@ define <4 x i8> @and_ashr_not_vec_poison_2(<4 x i8> %x, <4 x i8> %y, <4 x i8> %s define i8 @or_ashr_not(i8 %x, i8 %y, i8 %shamt) { ; CHECK-LABEL: @or_ashr_not( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]] ; CHECK-NEXT: ret i8 [[OR]] ; @@ -703,7 +703,7 @@ define i8 @or_ashr_not(i8 %x, i8 %y, i8 %shamt) { define i8 @or_ashr_not_commuted(i8 %x, i8 %y, i8 %shamt) { ; CHECK-LABEL: @or_ashr_not_commuted( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = ashr i8 [[TMP2]], [[SHAMT:%.*]] ; CHECK-NEXT: ret i8 [[OR]] ; @@ -768,7 +768,7 @@ define i8 @or_ashr_not_fail_invalid_xor_constant(i8 %x, i8 %y, i8 %shamt) { define <4 x i8> @or_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { ; CHECK-LABEL: @or_ashr_not_vec( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]] ; CHECK-NEXT: ret <4 x i8> [[OR]] ; @@ -782,7 +782,7 @@ define <4 x i8> @or_ashr_not_vec(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { define <4 x i8> @or_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { ; CHECK-LABEL: @or_ashr_not_vec_commuted( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]] ; CHECK-NEXT: ret <4 x i8> [[OR]] ; @@ -796,7 +796,7 @@ define <4 x i8> @or_ashr_not_vec_commuted(<4 x i8> %x, <4 x i8> %y, <4 x i8> %sh define <4 x i8> @or_ashr_not_vec_poison_1(<4 x i8> %x, <4 x i8> %y, <4 x i8> %shamt) { ; CHECK-LABEL: @or_ashr_not_vec_poison_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = or <4 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = ashr <4 x i8> [[TMP2]], [[SHAMT:%.*]] ; CHECK-NEXT: ret <4 x i8> [[OR]] ; diff --git a/llvm/test/Transforms/InstCombine/binop-cast.ll b/llvm/test/Transforms/InstCombine/binop-cast.ll index d521a7d5a2b3a..9d3b18c5e79ed 100644 --- a/llvm/test/Transforms/InstCombine/binop-cast.ll +++ b/llvm/test/Transforms/InstCombine/binop-cast.ll @@ -129,7 +129,7 @@ define i32 @and_not_zext_to_sel(i32 %x, i1 %y) { ; CHECK-NEXT: [[ZEXT:%.*]] = zext i1 [[Y:%.*]] to i32 ; CHECK-NEXT: call void @use(i32 [[ZEXT]]) ; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[ZEXT]], -1 -; CHECK-NEXT: [[R:%.*]] = and i32 [[NOT]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i32 [[X:%.*]], [[NOT]] ; CHECK-NEXT: ret i32 [[R]] ; %zext = zext i1 %y to i32 @@ -175,7 +175,7 @@ define i32 @or_sext_to_sel_multi_use(i32 %x, i1 %y) { ; CHECK-LABEL: @or_sext_to_sel_multi_use( ; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[Y:%.*]] to i32 ; CHECK-NEXT: call void @use(i32 [[SEXT]]) -; CHECK-NEXT: [[R:%.*]] = or i32 [[SEXT]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = or i32 [[X:%.*]], [[SEXT]] ; CHECK-NEXT: ret i32 [[R]] ; %sext = sext i1 %y to i32 @@ -200,7 +200,7 @@ define i32 @or_sext_to_sel_multi_use_constant_mask(i1 %y) { define i32 @xor_sext_to_sel(i32 %x, i1 %y) { ; CHECK-LABEL: @xor_sext_to_sel( ; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[Y:%.*]] to i32 -; CHECK-NEXT: [[R:%.*]] = xor i32 [[SEXT]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = xor i32 [[X:%.*]], [[SEXT]] ; CHECK-NEXT: ret i32 [[R]] ; %sext = sext i1 %y to i32 @@ -236,7 +236,7 @@ define i32 @xor_sext_to_sel_multi_use(i32 %x, i1 %y) { ; CHECK-LABEL: @xor_sext_to_sel_multi_use( ; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[Y:%.*]] to i32 ; CHECK-NEXT: call void @use(i32 [[SEXT]]) -; CHECK-NEXT: [[R:%.*]] = xor i32 [[SEXT]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = xor i32 [[X:%.*]], [[SEXT]] ; CHECK-NEXT: ret i32 [[R]] ; %sext = sext i1 %y to i32 diff --git a/llvm/test/Transforms/InstCombine/bit-checks.ll b/llvm/test/Transforms/InstCombine/bit-checks.ll index c7e1fbb894549..208b2b16e9903 100644 --- a/llvm/test/Transforms/InstCombine/bit-checks.ll +++ b/llvm/test/Transforms/InstCombine/bit-checks.ll @@ -137,7 +137,7 @@ define i32 @main3b_logical(i32 %argc) { define i32 @main3e_like(i32 %argc, i32 %argc2, i32 %argc3) { ; CHECK-LABEL: @main3e_like( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], 0 ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -238,7 +238,7 @@ define i32 @main3d_logical(i32 %argc) { define i32 @main3f_like(i32 %argc, i32 %argc2, i32 %argc3) { ; CHECK-LABEL: @main3f_like( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR_COND_NOT:%.*]] = icmp eq i32 [[TMP2]], 0 ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[OR_COND_NOT]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -355,7 +355,7 @@ define i32 @main4b_logical(i32 %argc) { define i32 @main4e_like(i32 %argc, i32 %argc2, i32 %argc3) { ; CHECK-LABEL: @main4e_like( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -456,7 +456,7 @@ define i32 @main4d_logical(i32 %argc) { define i32 @main4f_like(i32 %argc, i32 %argc2, i32 %argc3) { ; CHECK-LABEL: @main4f_like( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR_COND_NOT:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[OR_COND_NOT]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -529,7 +529,7 @@ define i32 @main5_like_logical(i32 %argc, i32 %argc2) { define i32 @main5e_like(i32 %argc, i32 %argc2, i32 %argc3) { ; CHECK-LABEL: @main5e_like( ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC3:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[ARGC]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -602,7 +602,7 @@ define i32 @main5c_like_logical(i32 %argc, i32 %argc2) { define i32 @main5f_like(i32 %argc, i32 %argc2, i32 %argc3) { ; CHECK-LABEL: @main5f_like( ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC3:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR_COND_NOT:%.*]] = icmp eq i32 [[TMP2]], [[ARGC]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[OR_COND_NOT]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -772,7 +772,7 @@ define i32 @main6d_logical(i32 %argc) { define i32 @main7a(i32 %argc, i32 %argc2, i32 %argc3) { ; CHECK-LABEL: @main7a( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARGC2:%.*]], [[ARGC3:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -810,8 +810,8 @@ define i32 @main7b(i32 %argc, i32 %argc2, i32 %argc3x) { ; CHECK-LABEL: @main7b( ; CHECK-NEXT: [[ARGC3:%.*]] = mul i32 [[ARGC3X:%.*]], 42 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC:%.*]], [[ARGC2:%.*]] -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND1]], [[ARGC2]] -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC3]], [[ARGC]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[ARGC2]], [[AND1]] +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC]], [[ARGC3]] ; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[ARGC3]], [[AND2]] ; CHECK-NEXT: [[AND_COND_NOT:%.*]] = or i1 [[TOBOOL]], [[TOBOOL3]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32 @@ -830,9 +830,9 @@ define i32 @main7b(i32 %argc, i32 %argc2, i32 %argc3x) { define i32 @main7b_logical(i32 %argc, i32 %argc2, i32 %argc3) { ; CHECK-LABEL: @main7b_logical( ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC:%.*]], [[ARGC2:%.*]] -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND1]], [[ARGC2]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[ARGC2]], [[AND1]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC]], [[ARGC3:%.*]] -; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[AND2]], [[ARGC3]] +; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[ARGC3]], [[AND2]] ; CHECK-NEXT: [[AND_COND_NOT:%.*]] = select i1 [[TOBOOL]], i1 true, i1 [[TOBOOL3]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -851,7 +851,7 @@ define i32 @main7c(i32 %argc, i32 %argc2, i32 %argc3x) { ; CHECK-LABEL: @main7c( ; CHECK-NEXT: [[ARGC3:%.*]] = mul i32 [[ARGC3X:%.*]], 42 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC:%.*]] -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND1]], [[ARGC2]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[ARGC2]], [[AND1]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC3]], [[ARGC]] ; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[ARGC3]], [[AND2]] ; CHECK-NEXT: [[AND_COND_NOT:%.*]] = or i1 [[TOBOOL]], [[TOBOOL3]] @@ -871,9 +871,9 @@ define i32 @main7c(i32 %argc, i32 %argc2, i32 %argc3x) { define i32 @main7c_logical(i32 %argc, i32 %argc2, i32 %argc3) { ; CHECK-LABEL: @main7c_logical( ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC:%.*]] -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND1]], [[ARGC2]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[ARGC2]], [[AND1]] ; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC]] -; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[AND2]], [[ARGC3]] +; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[ARGC3]], [[AND2]] ; CHECK-NEXT: [[AND_COND_NOT:%.*]] = select i1 [[TOBOOL]], i1 true, i1 [[TOBOOL3]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -893,7 +893,7 @@ define i32 @main7d(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %argc5) { ; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]] ; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[BC]], [[DE]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -913,9 +913,9 @@ define i32 @main7d_logical(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %a ; CHECK-LABEL: @main7d_logical( ; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]] ; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]] -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[BC]], [[ARGC:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC:%.*]], [[BC]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[AND1]], [[BC]] -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[DE]], [[ARGC]] +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC]], [[DE]] ; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[AND2]], [[DE]] ; CHECK-NEXT: [[AND_COND_NOT:%.*]] = select i1 [[TOBOOL]], i1 true, i1 [[TOBOOL3]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32 @@ -938,7 +938,7 @@ define i32 @main7e(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %argc5) { ; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]] ; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[BC]], [[DE]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -983,7 +983,7 @@ define i32 @main7f(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %argc5) { ; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]] ; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[BC]], [[DE]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] @@ -1003,9 +1003,9 @@ define i32 @main7f_logical(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %a ; CHECK-LABEL: @main7f_logical( ; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]] ; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]] -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[BC]], [[ARGC:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[ARGC:%.*]], [[BC]] ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[BC]], [[AND1]] -; CHECK-NEXT: [[AND2:%.*]] = and i32 [[DE]], [[ARGC]] +; CHECK-NEXT: [[AND2:%.*]] = and i32 [[ARGC]], [[DE]] ; CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i32 [[DE]], [[AND2]] ; CHECK-NEXT: [[AND_COND_NOT:%.*]] = select i1 [[TOBOOL]], i1 true, i1 [[TOBOOL3]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND_NOT]] to i32 @@ -1028,7 +1028,7 @@ define i32 @main7g(i32 %argc, i32 %argc2, i32 %argc3, i32 %argc4, i32 %argc5) { ; CHECK-NEXT: [[BC:%.*]] = and i32 [[ARGC2:%.*]], [[ARGC4:%.*]] ; CHECK-NEXT: [[DE:%.*]] = and i32 [[ARGC3:%.*]], [[ARGC5:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[BC]], [[DE]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARGC:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARGC:%.*]], [[TMP1]] ; CHECK-NEXT: [[AND_COND:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: [[STOREMERGE:%.*]] = zext i1 [[AND_COND]] to i32 ; CHECK-NEXT: ret i32 [[STOREMERGE]] diff --git a/llvm/test/Transforms/InstCombine/bitcast-inseltpoison.ll b/llvm/test/Transforms/InstCombine/bitcast-inseltpoison.ll index 061182fdaf3c8..3744d8c9171c7 100644 --- a/llvm/test/Transforms/InstCombine/bitcast-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/bitcast-inseltpoison.ll @@ -120,7 +120,7 @@ define <2 x i8> @canonicalize_bitcast_logic_with_constant(<4 x i4> %x) { define <4 x i32> @bitcasts_and_bitcast(<4 x i32> %a, <8 x i16> %b) { ; CHECK-LABEL: @bitcasts_and_bitcast( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B:%.*]] to <4 x i32> -; CHECK-NEXT: [[BC3:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[BC3:%.*]] = and <4 x i32> [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret <4 x i32> [[BC3]] ; %bc1 = bitcast <4 x i32> %a to <2 x i64> @@ -133,7 +133,7 @@ define <4 x i32> @bitcasts_and_bitcast(<4 x i32> %a, <8 x i16> %b) { define <4 x float> @bitcasts_and_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) { ; CHECK-LABEL: @bitcasts_and_bitcast_to_fp( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A:%.*]] to <8 x i16> -; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i16> [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i16> [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[BC3:%.*]] = bitcast <8 x i16> [[TMP2]] to <4 x float> ; CHECK-NEXT: ret <4 x float> [[BC3]] ; @@ -149,7 +149,7 @@ define <4 x float> @bitcasts_and_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) { define i128 @bitcast_or_bitcast(i128 %a, <2 x i64> %b) { ; CHECK-LABEL: @bitcast_or_bitcast( ; CHECK-NEXT: [[BC1:%.*]] = bitcast i128 [[A:%.*]] to <2 x i64> -; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[BC1]], [[B:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[B:%.*]], [[BC1]] ; CHECK-NEXT: [[BC2:%.*]] = bitcast <2 x i64> [[OR]] to i128 ; CHECK-NEXT: ret i128 [[BC2]] ; @@ -164,7 +164,7 @@ define i128 @bitcast_or_bitcast(i128 %a, <2 x i64> %b) { define <4 x i32> @bitcast_xor_bitcast(<4 x i32> %a, i128 %b) { ; CHECK-LABEL: @bitcast_xor_bitcast( ; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> [[A:%.*]] to i128 -; CHECK-NEXT: [[XOR:%.*]] = xor i128 [[BC1]], [[B:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor i128 [[B:%.*]], [[BC1]] ; CHECK-NEXT: [[BC2:%.*]] = bitcast i128 [[XOR]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[BC2]] ; diff --git a/llvm/test/Transforms/InstCombine/bitcast.ll b/llvm/test/Transforms/InstCombine/bitcast.ll index 26047f2c899a3..4ab24ce7b925d 100644 --- a/llvm/test/Transforms/InstCombine/bitcast.ll +++ b/llvm/test/Transforms/InstCombine/bitcast.ll @@ -122,7 +122,7 @@ define <2 x i8> @canonicalize_bitcast_logic_with_constant(<4 x i4> %x) { define <4 x i32> @bitcasts_and_bitcast(<4 x i32> %a, <8 x i16> %b) { ; CHECK-LABEL: @bitcasts_and_bitcast( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i16> [[B:%.*]] to <4 x i32> -; CHECK-NEXT: [[BC3:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[BC3:%.*]] = and <4 x i32> [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret <4 x i32> [[BC3]] ; %bc1 = bitcast <4 x i32> %a to <2 x i64> @@ -135,7 +135,7 @@ define <4 x i32> @bitcasts_and_bitcast(<4 x i32> %a, <8 x i16> %b) { define <4 x float> @bitcasts_and_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) { ; CHECK-LABEL: @bitcasts_and_bitcast_to_fp( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A:%.*]] to <8 x i16> -; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i16> [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <8 x i16> [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[BC3:%.*]] = bitcast <8 x i16> [[TMP2]] to <4 x float> ; CHECK-NEXT: ret <4 x float> [[BC3]] ; @@ -149,7 +149,7 @@ define <4 x float> @bitcasts_and_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) { define <2 x double> @bitcasts_or_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) { ; CHECK-LABEL: @bitcasts_or_bitcast_to_fp( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[A:%.*]] to <8 x i16> -; CHECK-NEXT: [[TMP2:%.*]] = or <8 x i16> [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = or <8 x i16> [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[BC3:%.*]] = bitcast <8 x i16> [[TMP2]] to <2 x double> ; CHECK-NEXT: ret <2 x double> [[BC3]] ; @@ -163,7 +163,7 @@ define <2 x double> @bitcasts_or_bitcast_to_fp(<4 x float> %a, <8 x i16> %b) { define <4 x float> @bitcasts_xor_bitcast_to_fp(<2 x double> %a, <8 x i16> %b) { ; CHECK-LABEL: @bitcasts_xor_bitcast_to_fp( ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[A:%.*]] to <8 x i16> -; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i16> [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = xor <8 x i16> [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[BC3:%.*]] = bitcast <8 x i16> [[TMP2]] to <4 x float> ; CHECK-NEXT: ret <4 x float> [[BC3]] ; @@ -198,7 +198,7 @@ define <4 x float> @bitcasts_and_bitcast_to_fp_multiuse(<4 x float> %a, <8 x i16 define i128 @bitcast_or_bitcast(i128 %a, <2 x i64> %b) { ; CHECK-LABEL: @bitcast_or_bitcast( ; CHECK-NEXT: [[BC1:%.*]] = bitcast i128 [[A:%.*]] to <2 x i64> -; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[BC1]], [[B:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[B:%.*]], [[BC1]] ; CHECK-NEXT: [[BC2:%.*]] = bitcast <2 x i64> [[OR]] to i128 ; CHECK-NEXT: ret i128 [[BC2]] ; @@ -213,7 +213,7 @@ define i128 @bitcast_or_bitcast(i128 %a, <2 x i64> %b) { define <4 x i32> @bitcast_xor_bitcast(<4 x i32> %a, i128 %b) { ; CHECK-LABEL: @bitcast_xor_bitcast( ; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> [[A:%.*]] to i128 -; CHECK-NEXT: [[XOR:%.*]] = xor i128 [[BC1]], [[B:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor i128 [[B:%.*]], [[BC1]] ; CHECK-NEXT: [[BC2:%.*]] = bitcast i128 [[XOR]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[BC2]] ; diff --git a/llvm/test/Transforms/InstCombine/bitreverse.ll b/llvm/test/Transforms/InstCombine/bitreverse.ll index cbe9695c48690..fe44a7a77bdff 100644 --- a/llvm/test/Transforms/InstCombine/bitreverse.ll +++ b/llvm/test/Transforms/InstCombine/bitreverse.ll @@ -403,7 +403,7 @@ define i64 @PR59897(i1 %X1_2) { define i16 @rev_xor_lhs_rev16(i16 %a, i16 %b) #0 { ; CHECK-LABEL: @rev_xor_lhs_rev16( ; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bitreverse.i16(i16 [[B:%.*]]) -; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i16 [[TMP2]] ; %1 = tail call i16 @llvm.bitreverse.i16(i16 %a) @@ -475,7 +475,7 @@ define <2 x i32> @rev_xor_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 { define i64 @rev_and_rhs_rev64_multiuse1(i64 %a, i64 %b) #0 { ; CHECK-LABEL: @rev_and_rhs_rev64_multiuse1( ; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[B:%.*]]) -; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP2]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] @@ -490,7 +490,7 @@ define i64 @rev_and_rhs_rev64_multiuse1(i64 %a, i64 %b) #0 { define i64 @rev_and_rhs_rev64_multiuse2(i64 %a, i64 %b) #0 { ; CHECK-LABEL: @rev_and_rhs_rev64_multiuse2( ; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[B:%.*]]) -; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bitreverse.i64(i64 [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP1]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] diff --git a/llvm/test/Transforms/InstCombine/bswap-fold.ll b/llvm/test/Transforms/InstCombine/bswap-fold.ll index 91674c6017a9e..ddc0430896e7d 100644 --- a/llvm/test/Transforms/InstCombine/bswap-fold.ll +++ b/llvm/test/Transforms/InstCombine/bswap-fold.ll @@ -544,7 +544,7 @@ define i64 @bs_and64i_multiuse(i64 %a, i64 %b) #0 { define i16 @bs_and_lhs_bs16(i16 %a, i16 %b) #0 { ; CHECK-LABEL: @bs_and_lhs_bs16( ; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]]) -; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i16 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i16 [[TMP2]] ; %1 = tail call i16 @llvm.bswap.i16(i16 %a) @@ -556,7 +556,7 @@ define i16 @bs_and_lhs_bs16(i16 %a, i16 %b) #0 { define i16 @bs_or_lhs_bs16(i16 %a, i16 %b) #0 { ; CHECK-LABEL: @bs_or_lhs_bs16( ; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]]) -; CHECK-NEXT: [[TMP2:%.*]] = or i16 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = or i16 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i16 [[TMP2]] ; %1 = tail call i16 @llvm.bswap.i16(i16 %a) @@ -568,7 +568,7 @@ define i16 @bs_or_lhs_bs16(i16 %a, i16 %b) #0 { define i16 @bs_xor_lhs_bs16(i16 %a, i16 %b) #0 { ; CHECK-LABEL: @bs_xor_lhs_bs16( ; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[B:%.*]]) -; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i16 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i16 [[TMP2]] ; %1 = tail call i16 @llvm.bswap.i16(i16 %a) @@ -724,7 +724,7 @@ define <2 x i32> @bs_xor_rhs_i32vec(<2 x i32> %a, <2 x i32> %b) #0 { define i64 @bs_and_rhs_bs64_multiuse1(i64 %a, i64 %b) #0 { ; CHECK-LABEL: @bs_and_rhs_bs64_multiuse1( ; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]]) -; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP2]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] @@ -739,7 +739,7 @@ define i64 @bs_and_rhs_bs64_multiuse1(i64 %a, i64 %b) #0 { define i64 @bs_and_rhs_bs64_multiuse2(i64 %a, i64 %b) #0 { ; CHECK-LABEL: @bs_and_rhs_bs64_multiuse2( ; CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]]) -; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[TMP2]]) ; CHECK-NEXT: [[TMP4:%.*]] = mul i64 [[TMP1]], [[TMP3]] ; CHECK-NEXT: ret i64 [[TMP4]] diff --git a/llvm/test/Transforms/InstCombine/call-guard.ll b/llvm/test/Transforms/InstCombine/call-guard.ll index 358518b9bd1cb..6b31c78118d0b 100644 --- a/llvm/test/Transforms/InstCombine/call-guard.ll +++ b/llvm/test/Transforms/InstCombine/call-guard.ll @@ -80,7 +80,7 @@ define void @negative_load(i32 %V1, ptr %P) { define void @deref_load(i32 %V1, ptr dereferenceable(4) align 4 %P) nofree nosync { ; CHECK-LABEL: @deref_load( ; CHECK-NEXT: [[V2:%.*]] = load i32, ptr [[P:%.*]], align 4 -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[V2]], [[V1:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[V1:%.*]], [[V2]] ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0 ; CHECK-NEXT: call void (i1, ...) @llvm.experimental.guard(i1 [[TMP2]], i32 123) [ "deopt"() ] ; CHECK-NEXT: ret void diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll index 759770688cf20..3d5696a024513 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-eq-to-icmp-ule.ll @@ -27,7 +27,7 @@ define i1 @p0(i8 %x) { define i1 @pv(i8 %x, i8 %y) { ; CHECK-LABEL: @pv( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll index 9b28129dd9e17..21daeb8983a85 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll @@ -27,7 +27,7 @@ define i1 @p0(i8 %x) { define i1 @pv(i8 %x, i8 %y) { ; CHECK-LABEL: @pv( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll index cfd48821b2c1d..1dac73df38789 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-uge-to-icmp-ule.ll @@ -27,7 +27,7 @@ define i1 @p0(i8 %x) { define i1 @pv(i8 %x, i8 %y) { ; CHECK-LABEL: @pv( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y diff --git a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll index 70fb34f499289..7eda7bb58f270 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-constant-low-bit-mask-and-icmp-ult-to-icmp-ugt.ll @@ -27,7 +27,7 @@ define i1 @p0(i8 %x) { define i1 @pv(i8 %x, i8 %y) { ; CHECK-LABEL: @pv( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll index dc5658d302d99..5a58fc96c6643 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll @@ -15,7 +15,7 @@ define i1 @p0(i8 %x, i8 %y) { ; CHECK-LABEL: @p0( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y @@ -31,7 +31,7 @@ define i1 @p0(i8 %x, i8 %y) { define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @p1_vec( ; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i8> , [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp uge <2 x i8> [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule <2 x i8> [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret <2 x i1> [[RET]] ; %tmp0 = lshr <2 x i8> , %y @@ -43,7 +43,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { define <3 x i1> @p2_vec_poison(<3 x i8> %x, <3 x i8> %y) { ; CHECK-LABEL: @p2_vec_poison( ; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> , [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp uge <3 x i8> [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule <3 x i8> [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %tmp0 = lshr <3 x i8> , %y @@ -110,7 +110,7 @@ define i1 @oneuse0(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse0( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[TMP0]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y @@ -125,7 +125,7 @@ define i1 @oneuse1(i8 %x, i8 %y) { ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[TMP1]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y @@ -141,7 +141,7 @@ define i1 @oneuse2(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[TMP0]]) ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[TMP1]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[TMP0]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll index 8fbbd2bb9907d..edd528b500e55 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll @@ -15,7 +15,7 @@ define i1 @p0(i8 %x, i8 %y) { ; CHECK-LABEL: @p0( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y @@ -31,7 +31,7 @@ define i1 @p0(i8 %x, i8 %y) { define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @p1_vec( ; CHECK-NEXT: [[TMP0:%.*]] = lshr <2 x i8> , [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ult <2 x i8> [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt <2 x i8> [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret <2 x i1> [[RET]] ; %tmp0 = lshr <2 x i8> , %y @@ -43,7 +43,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { define <3 x i1> @p2_vec_poison(<3 x i8> %x, <3 x i8> %y) { ; CHECK-LABEL: @p2_vec_poison( ; CHECK-NEXT: [[TMP0:%.*]] = lshr <3 x i8> , [[Y:%.*]] -; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt <3 x i8> [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %tmp0 = lshr <3 x i8> , %y @@ -110,7 +110,7 @@ define i1 @oneuse0(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse0( ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[TMP0]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y @@ -125,7 +125,7 @@ define i1 @oneuse1(i8 %x, i8 %y) { ; CHECK-NEXT: [[TMP0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[TMP1]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y @@ -141,7 +141,7 @@ define i1 @oneuse2(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[TMP0]]) ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[TMP0]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[TMP1]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[TMP0]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[TMP0]] ; CHECK-NEXT: ret i1 [[RET]] ; %tmp0 = lshr i8 -1, %y diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll index 443efbe1ecaf6..1adef8b0710b3 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-eq-to-icmp-ule.ll @@ -144,7 +144,7 @@ define i1 @oneuse0(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -161,7 +161,7 @@ define i1 @oneuse1(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -177,9 +177,9 @@ define i1 @oneuse2(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse2( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -196,7 +196,7 @@ define i1 @oneuse3(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -214,9 +214,9 @@ define i1 @oneuse4(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -234,9 +234,9 @@ define i1 @oneuse5(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -257,7 +257,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) { ; CHECK-LABEL: @n0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[T2]], [[NOTX:%.*]] ; CHECK-NEXT: ret i1 [[RET]] ; @@ -271,7 +271,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) { define i1 @n1(i8 %x, i8 %y) { ; CHECK-LABEL: @n1( ; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -286,7 +286,7 @@ define i1 @n2(i8 %x, i8 %y) { ; CHECK-LABEL: @n2( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[T0]], -2 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll index ffde4eae777cb..36238c75370ab 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v2-and-icmp-ne-to-icmp-ugt.ll @@ -144,7 +144,7 @@ define i1 @oneuse0(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -161,7 +161,7 @@ define i1 @oneuse1(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -177,9 +177,9 @@ define i1 @oneuse2(i8 %x, i8 %y) { ; CHECK-LABEL: @oneuse2( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -196,7 +196,7 @@ define i1 @oneuse3(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -214,9 +214,9 @@ define i1 @oneuse4(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -234,9 +234,9 @@ define i1 @oneuse5(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -257,7 +257,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) { ; CHECK-LABEL: @n0( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[T2]], [[NOTX:%.*]] ; CHECK-NEXT: ret i1 [[RET]] ; @@ -271,7 +271,7 @@ define i1 @n0(i8 %x, i8 %y, i8 %notx) { define i1 @n1(i8 %x, i8 %y) { ; CHECK-LABEL: @n1( ; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -286,7 +286,7 @@ define i1 @n2(i8 %x, i8 %y) { ; CHECK-LABEL: @n2( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[T0]], -2 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll index 946bb03e04f7e..fd56324f10dc3 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-eq-to-icmp-ule.ll @@ -174,7 +174,7 @@ define i1 @oneuse0(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 1, %y @@ -193,7 +193,7 @@ define i1 @oneuse1(i8 %x, i8 %y) { ; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0]], -1 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 1, %y @@ -213,7 +213,7 @@ define i1 @oneuse2(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 1, %y @@ -252,7 +252,7 @@ define i1 @n1(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[T0]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -269,7 +269,7 @@ define i1 @n2(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub nuw i8 -2, [[T0]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[RET:%.*]] = icmp eq i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll index 63d406d54179f..4d8ce5d9a6cca 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v3-and-icmp-ne-to-icmp-ugt.ll @@ -174,7 +174,7 @@ define i1 @oneuse0(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 1, %y @@ -193,7 +193,7 @@ define i1 @oneuse1(i8 %x, i8 %y) { ; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0]], -1 ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 1, %y @@ -213,7 +213,7 @@ define i1 @oneuse2(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 1, %y @@ -252,7 +252,7 @@ define i1 @n1(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[T0]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -269,7 +269,7 @@ define i1 @n2(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub nuw i8 -2, [[T0]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll index f48d284e085bc..5fab93092a050 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-eq-to-icmp-ule.ll @@ -23,7 +23,7 @@ define i1 @p0(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -43,7 +43,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw <2 x i8> , [[Y:%.*]] ; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> , [[Y]] -; CHECK-NEXT: [[RET:%.*]] = icmp uge <2 x i8> [[T1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule <2 x i8> [[X:%.*]], [[T1]] ; CHECK-NEXT: ret <2 x i1> [[RET]] ; %t0 = shl <2 x i8> , %y @@ -59,7 +59,7 @@ define <3 x i1> @p2_vec_poison0(<3 x i8> %x, <3 x i8> %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i8> , [[Y:%.*]] ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> , [[Y]] -; CHECK-NEXT: [[RET:%.*]] = icmp uge <3 x i8> [[T1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule <3 x i8> [[X:%.*]], [[T1]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %t0 = shl <3 x i8> , %y @@ -140,7 +140,7 @@ define i1 @oneuse0(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -159,7 +159,7 @@ define i1 @oneuse1(i8 %x, i8 %y) { ; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -179,7 +179,7 @@ define i1 @oneuse2(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp uge i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ule i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y diff --git a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll index f4b3c67164e49..40a67ce1d60cb 100644 --- a/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll +++ b/llvm/test/Transforms/InstCombine/canonicalize-low-bit-mask-v4-and-icmp-ne-to-icmp-ugt.ll @@ -23,7 +23,7 @@ define i1 @p0(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -43,7 +43,7 @@ define <2 x i1> @p1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw <2 x i8> , [[Y:%.*]] ; CHECK-NEXT: call void @use2i8(<2 x i8> [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr <2 x i8> , [[Y]] -; CHECK-NEXT: [[RET:%.*]] = icmp ult <2 x i8> [[T1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt <2 x i8> [[X:%.*]], [[T1]] ; CHECK-NEXT: ret <2 x i1> [[RET]] ; %t0 = shl <2 x i8> , %y @@ -59,7 +59,7 @@ define <3 x i1> @p2_vec_poison0(<3 x i8> %x, <3 x i8> %y) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw <3 x i8> , [[Y:%.*]] ; CHECK-NEXT: call void @use3i8(<3 x i8> [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr <3 x i8> , [[Y]] -; CHECK-NEXT: [[RET:%.*]] = icmp ult <3 x i8> [[T1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt <3 x i8> [[X:%.*]], [[T1]] ; CHECK-NEXT: ret <3 x i1> [[RET]] ; %t0 = shl <3 x i8> , %y @@ -140,7 +140,7 @@ define i1 @oneuse0(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -159,7 +159,7 @@ define i1 @oneuse1(i8 %x, i8 %y) { ; CHECK-NEXT: [[T1:%.*]] = lshr i8 -1, [[Y]] ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y @@ -179,7 +179,7 @@ define i1 @oneuse2(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = and i8 [[T1]], [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[RET:%.*]] = icmp ult i8 [[T1]], [[X]] +; CHECK-NEXT: [[RET:%.*]] = icmp ugt i8 [[X]], [[T1]] ; CHECK-NEXT: ret i1 [[RET]] ; %t0 = shl i8 -1, %y diff --git a/llvm/test/Transforms/InstCombine/cast-mul-select.ll b/llvm/test/Transforms/InstCombine/cast-mul-select.ll index d185e22680523..6eb3a8c0a2049 100644 --- a/llvm/test/Transforms/InstCombine/cast-mul-select.ll +++ b/llvm/test/Transforms/InstCombine/cast-mul-select.ll @@ -196,7 +196,7 @@ define void @PR36225(i32 %a, i32 %b, i1 %c1, i3 %v1, i3 %v2) { ; CHECK: for.end: ; CHECK-NEXT: [[H:%.*]] = phi i8 [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ [[SPEC_SELECT]], [[FOR_BODY3_US]] ], [ 0, [[FOR_BODY3]] ], [ 0, [[FOR_BODY3]] ] ; CHECK-NEXT: [[CONV:%.*]] = zext nneg i8 [[H]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], [[A:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], [[CONV]] ; CHECK-NEXT: br i1 [[CMP]], label [[EXIT]], label [[EXIT2:%.*]] ; CHECK: exit2: ; CHECK-NEXT: unreachable @@ -228,7 +228,7 @@ define void @PR36225(i32 %a, i32 %b, i1 %c1, i3 %v1, i3 %v2) { ; DBGINFO-NEXT: #dbg_value(i8 [[H]], [[META91:![0-9]+]], !DIExpression(), [[DBG100]]) ; DBGINFO-NEXT: [[CONV:%.*]] = zext nneg i8 [[H]] to i32, !dbg [[DBG101:![0-9]+]] ; DBGINFO-NEXT: #dbg_value(i32 [[CONV]], [[META92:![0-9]+]], !DIExpression(), [[DBG101]]) -; DBGINFO-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], [[A:%.*]], !dbg [[DBG102:![0-9]+]] +; DBGINFO-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], [[CONV]], !dbg [[DBG102:![0-9]+]] ; DBGINFO-NEXT: #dbg_value(i1 [[CMP]], [[META93:![0-9]+]], !DIExpression(), [[DBG102]]) ; DBGINFO-NEXT: br i1 [[CMP]], label [[EXIT]], label [[EXIT2:%.*]], !dbg [[DBG103:![0-9]+]] ; DBGINFO: exit2: diff --git a/llvm/test/Transforms/InstCombine/cast.ll b/llvm/test/Transforms/InstCombine/cast.ll index 43c198b9e19d8..0e44dc1b8ca9c 100644 --- a/llvm/test/Transforms/InstCombine/cast.ll +++ b/llvm/test/Transforms/InstCombine/cast.ll @@ -587,7 +587,7 @@ define i64 @test44(i8 %T) { define i64 @test45(i8 %A, i64 %Q) { ; ALL-LABEL: @test45( ; ALL-NEXT: [[B:%.*]] = sext i8 [[A:%.*]] to i64 -; ALL-NEXT: [[C:%.*]] = or i64 [[B]], [[Q:%.*]] +; ALL-NEXT: [[C:%.*]] = or i64 [[Q:%.*]], [[B]] ; ALL-NEXT: [[E:%.*]] = and i64 [[C]], 4294967295 ; ALL-NEXT: ret i64 [[E]] ; @@ -1144,10 +1144,10 @@ define %s @test78(ptr %p, i64 %i, i64 %j, i32 %k, i32 %l, i128 %m, i128 %n) { ; ALL-NEXT: [[A:%.*]] = mul nsw i32 [[K:%.*]], 36 ; ALL-NEXT: [[B:%.*]] = mul nsw i32 [[A]], [[L:%.*]] ; ALL-NEXT: [[C:%.*]] = sext i32 [[B]] to i128 -; ALL-NEXT: [[D:%.*]] = mul nsw i128 [[C]], [[M:%.*]] +; ALL-NEXT: [[D:%.*]] = mul nsw i128 [[M:%.*]], [[C]] ; ALL-NEXT: [[E:%.*]] = mul i128 [[D]], [[N:%.*]] ; ALL-NEXT: [[F:%.*]] = trunc i128 [[E]] to i64 -; ALL-NEXT: [[G:%.*]] = mul nsw i64 [[F]], [[I:%.*]] +; ALL-NEXT: [[G:%.*]] = mul nsw i64 [[I:%.*]], [[F]] ; ALL-NEXT: [[H:%.*]] = mul nsw i64 [[G]], [[J:%.*]] ; ALL-NEXT: [[PP:%.*]] = getelementptr inbounds i8, ptr [[P:%.*]], i64 [[H]] ; ALL-NEXT: [[LOAD:%.*]] = load [[S:%.*]], ptr [[PP]], align 4 diff --git a/llvm/test/Transforms/InstCombine/cast_phi.ll b/llvm/test/Transforms/InstCombine/cast_phi.ll index 68847e73ac5d2..7dfe60539138d 100644 --- a/llvm/test/Transforms/InstCombine/cast_phi.ll +++ b/llvm/test/Transforms/InstCombine/cast_phi.ll @@ -350,7 +350,7 @@ define i32 @zext_in_loop_and_exit_block(i8 %step, i32 %end) { ; CHECK: loop: ; CHECK-NEXT: [[IV:%.*]] = phi i8 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP_LATCH:%.*]] ] ; CHECK-NEXT: [[IV_EXT:%.*]] = zext i8 [[IV]] to i32 -; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[IV_EXT]], [[END:%.*]] +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[END:%.*]], [[IV_EXT]] ; CHECK-NEXT: br i1 [[CMP_NOT]], label [[EXIT:%.*]], label [[LOOP_LATCH]] ; CHECK: loop.latch: ; CHECK-NEXT: [[IV_NEXT]] = add i8 [[IV]], [[STEP:%.*]] diff --git a/llvm/test/Transforms/InstCombine/cast_ptr.ll b/llvm/test/Transforms/InstCombine/cast_ptr.ll index 9f2d128ecc377..db576b9679b14 100644 --- a/llvm/test/Transforms/InstCombine/cast_ptr.ll +++ b/llvm/test/Transforms/InstCombine/cast_ptr.ll @@ -259,7 +259,7 @@ define i32 @ptr_add_in_int(i32 %x, i32 %y) { define i32 @ptr_add_in_int_2(i32 %x, i32 %y) { ; CHECK-LABEL: @ptr_add_in_int_2( ; CHECK-NEXT: [[P2_IDX:%.*]] = shl nsw i32 [[Y:%.*]], 2 -; CHECK-NEXT: [[R:%.*]] = add i32 [[P2_IDX]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = add i32 [[X:%.*]], [[P2_IDX]] ; CHECK-NEXT: ret i32 [[R]] ; %ptr = inttoptr i32 %x to ptr @@ -271,7 +271,7 @@ define i32 @ptr_add_in_int_2(i32 %x, i32 %y) { define i32 @ptr_add_in_int_nneg(i32 %x, i32 %y) { ; CHECK-LABEL: @ptr_add_in_int_nneg( ; CHECK-NEXT: [[Z:%.*]] = call i32 @llvm.abs.i32(i32 [[Y:%.*]], i1 true) -; CHECK-NEXT: [[R:%.*]] = add nuw i32 [[Z]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = add nuw i32 [[X:%.*]], [[Z]] ; CHECK-NEXT: ret i32 [[R]] ; %z = call i32 @llvm.abs.i32(i32 %y, i1 true) @@ -308,7 +308,7 @@ define i16 @ptr_add_in_int_different_type_2(i32 %x, i32 %y) { define i32 @ptr_add_in_int_different_type_3(i16 %x, i32 %y) { ; CHECK-LABEL: @ptr_add_in_int_different_type_3( ; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[X:%.*]] to i32 -; CHECK-NEXT: [[R:%.*]] = add i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = add i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[R]] ; %ptr = inttoptr i16 %x to ptr @@ -320,7 +320,7 @@ define i32 @ptr_add_in_int_different_type_3(i16 %x, i32 %y) { define i32 @ptr_add_in_int_different_type_4(i64 %x, i32 %y) { ; CHECK-LABEL: @ptr_add_in_int_different_type_4( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32 -; CHECK-NEXT: [[R:%.*]] = add i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = add i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[R]] ; %ptr = inttoptr i64 %x to ptr @@ -332,7 +332,7 @@ define i32 @ptr_add_in_int_different_type_4(i64 %x, i32 %y) { define i32 @ptr_add_in_int_not_inbounds(i32 %x, i32 %y) { ; CHECK-LABEL: @ptr_add_in_int_not_inbounds( ; CHECK-NEXT: [[Z:%.*]] = call i32 @llvm.abs.i32(i32 [[Y:%.*]], i1 true) -; CHECK-NEXT: [[R:%.*]] = add i32 [[Z]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = add i32 [[X:%.*]], [[Z]] ; CHECK-NEXT: ret i32 [[R]] ; %z = call i32 @llvm.abs.i32(i32 %y, i1 true) diff --git a/llvm/test/Transforms/InstCombine/cmp-x-vs-neg-x.ll b/llvm/test/Transforms/InstCombine/cmp-x-vs-neg-x.ll index c5ff0f90fdaee..96b03e6cd054c 100644 --- a/llvm/test/Transforms/InstCombine/cmp-x-vs-neg-x.ll +++ b/llvm/test/Transforms/InstCombine/cmp-x-vs-neg-x.ll @@ -132,7 +132,7 @@ define i1 @t9(i8 %x) { define i1 @n10(i8 %x) { ; CHECK-LABEL: @n10( ; CHECK-NEXT: [[NEG_X:%.*]] = sub i8 0, [[X:%.*]] -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[NEG_X]], [[X]] +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X]], [[NEG_X]] ; CHECK-NEXT: ret i1 [[CMP]] ; %neg_x = sub i8 0, %x ; not nsw @@ -154,7 +154,7 @@ define i1 @n11(i8 %x) { define i1 @n12(i8 %x1, i8 %x2) { ; CHECK-LABEL: @n12( ; CHECK-NEXT: [[NEG_X:%.*]] = sub nsw i8 0, [[X1:%.*]] -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[NEG_X]], [[X2:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X2:%.*]], [[NEG_X]] ; CHECK-NEXT: ret i1 [[CMP]] ; %neg_x = sub nsw i8 0, %x1 ; not %x2 diff --git a/llvm/test/Transforms/InstCombine/conditional-negation.ll b/llvm/test/Transforms/InstCombine/conditional-negation.ll index 1bdfd76edb341..0ae1af8f8e67f 100644 --- a/llvm/test/Transforms/InstCombine/conditional-negation.ll +++ b/llvm/test/Transforms/InstCombine/conditional-negation.ll @@ -44,7 +44,7 @@ define i8 @t2(i8 %x, i1 %cond0, i1 %cond1) { ; CHECK-LABEL: @t2( ; CHECK-NEXT: [[COND_SPLAT0:%.*]] = sext i1 [[COND0:%.*]] to i8 ; CHECK-NEXT: [[COND_SPLAT1:%.*]] = sext i1 [[COND1:%.*]] to i8 -; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT0]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT0]] ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT1]] ; CHECK-NEXT: ret i8 [[XOR]] ; @@ -59,7 +59,7 @@ define i8 @t2(i8 %x, i1 %cond0, i1 %cond1) { define i8 @t3(i8 %x, i2 %cond) { ; CHECK-LABEL: @t3( ; CHECK-NEXT: [[COND_SPLAT:%.*]] = sext i2 [[COND:%.*]] to i8 -; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT]] ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT]] ; CHECK-NEXT: ret i8 [[XOR]] ; @@ -71,7 +71,7 @@ define i8 @t3(i8 %x, i2 %cond) { define <2 x i8> @t3_vec(<2 x i8> %x, <2 x i2> %cond) { ; CHECK-LABEL: @t3_vec( ; CHECK-NEXT: [[COND_SPLAT:%.*]] = sext <2 x i2> [[COND:%.*]] to <2 x i8> -; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[COND_SPLAT]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[X:%.*]], [[COND_SPLAT]] ; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[SUB]], [[COND_SPLAT]] ; CHECK-NEXT: ret <2 x i8> [[XOR]] ; @@ -115,7 +115,7 @@ define i8 @extrause01_v1(i8 %x, i1 %cond) { define i8 @extrause10_v1(i8 %x, i1 %cond) { ; CHECK-LABEL: @extrause10_v1( ; CHECK-NEXT: [[COND_SPLAT:%.*]] = sext i1 [[COND:%.*]] to i8 -; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT]] ; CHECK-NEXT: call void @use.i8(i8 [[SUB]]) ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT]] ; CHECK-NEXT: ret i8 [[XOR]] @@ -130,7 +130,7 @@ define i8 @extrause11_v1(i8 %x, i1 %cond) { ; CHECK-LABEL: @extrause11_v1( ; CHECK-NEXT: [[COND_SPLAT:%.*]] = sext i1 [[COND:%.*]] to i8 ; CHECK-NEXT: call void @use.i8(i8 [[COND_SPLAT]]) -; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT]] ; CHECK-NEXT: call void @use.i8(i8 [[SUB]]) ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT]] ; CHECK-NEXT: ret i8 [[XOR]] @@ -195,7 +195,7 @@ define i8 @extrause011_v2(i8 %x, i1 %cond) { define i8 @extrause100_v2(i8 %x, i1 %cond) { ; CHECK-LABEL: @extrause100_v2( ; CHECK-NEXT: [[COND_SPLAT0:%.*]] = sext i1 [[COND:%.*]] to i8 -; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT0]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT0]] ; CHECK-NEXT: call void @use.i8(i8 [[SUB]]) ; CHECK-NEXT: [[X_NEG:%.*]] = sub i8 0, [[X]] ; CHECK-NEXT: [[XOR:%.*]] = select i1 [[COND]], i8 [[X_NEG]], i8 [[X]] @@ -212,7 +212,7 @@ define i8 @extrause101_v2(i8 %x, i1 %cond) { ; CHECK-LABEL: @extrause101_v2( ; CHECK-NEXT: [[COND_SPLAT0:%.*]] = sext i1 [[COND:%.*]] to i8 ; CHECK-NEXT: call void @use.i8(i8 [[COND_SPLAT0]]) -; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT0]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT0]] ; CHECK-NEXT: call void @use.i8(i8 [[SUB]]) ; CHECK-NEXT: [[X_NEG:%.*]] = sub i8 0, [[X]] ; CHECK-NEXT: [[XOR:%.*]] = select i1 [[COND]], i8 [[X_NEG]], i8 [[X]] @@ -231,7 +231,7 @@ define i8 @extrause110_v2(i8 %x, i1 %cond) { ; CHECK-NEXT: [[COND_SPLAT0:%.*]] = sext i1 [[COND:%.*]] to i8 ; CHECK-NEXT: [[COND_SPLAT1:%.*]] = sext i1 [[COND]] to i8 ; CHECK-NEXT: call void @use.i8(i8 [[COND_SPLAT1]]) -; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT0]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT0]] ; CHECK-NEXT: call void @use.i8(i8 [[SUB]]) ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT1]] ; CHECK-NEXT: ret i8 [[XOR]] @@ -250,7 +250,7 @@ define i8 @extrause111_v2(i8 %x, i1 %cond) { ; CHECK-NEXT: call void @use.i8(i8 [[COND_SPLAT0]]) ; CHECK-NEXT: [[COND_SPLAT1:%.*]] = sext i1 [[COND]] to i8 ; CHECK-NEXT: call void @use.i8(i8 [[COND_SPLAT1]]) -; CHECK-NEXT: [[SUB:%.*]] = add i8 [[COND_SPLAT0]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[COND_SPLAT0]] ; CHECK-NEXT: call void @use.i8(i8 [[SUB]]) ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[SUB]], [[COND_SPLAT1]] ; CHECK-NEXT: ret i8 [[XOR]] diff --git a/llvm/test/Transforms/InstCombine/ctpop-cttz.ll b/llvm/test/Transforms/InstCombine/ctpop-cttz.ll index a505654fa96e7..bcfbce8dfd3d2 100644 --- a/llvm/test/Transforms/InstCombine/ctpop-cttz.ll +++ b/llvm/test/Transforms/InstCombine/ctpop-cttz.ll @@ -33,7 +33,7 @@ define <2 x i32> @ctpop1v(<2 x i32> %0) { define i32 @ctpop1_multiuse(i32 %0) { ; CHECK-LABEL: @ctpop1_multiuse( ; CHECK-NEXT: [[TMP2:%.*]] = sub i32 0, [[TMP0:%.*]] -; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP2]], [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = or i32 [[TMP0]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = xor i32 [[TMP3]], -1 ; CHECK-NEXT: [[TMP5:%.*]] = call range(i32 0, 33) i32 @llvm.ctpop.i32(i32 [[TMP4]]) ; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], [[TMP3]] diff --git a/llvm/test/Transforms/InstCombine/ctpop-pow2.ll b/llvm/test/Transforms/InstCombine/ctpop-pow2.ll index 4ef1ed0ec4976..17997b25d096c 100644 --- a/llvm/test/Transforms/InstCombine/ctpop-pow2.ll +++ b/llvm/test/Transforms/InstCombine/ctpop-pow2.ll @@ -12,7 +12,7 @@ declare void @llvm.assume(i1) define i16 @ctpop_x_and_negx(i16 %x) { ; CHECK-LABEL: @ctpop_x_and_negx( ; CHECK-NEXT: [[V0:%.*]] = sub i16 0, [[X:%.*]] -; CHECK-NEXT: [[V1:%.*]] = and i16 [[V0]], [[X]] +; CHECK-NEXT: [[V1:%.*]] = and i16 [[X]], [[V0]] ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i16 [[V1]], 0 ; CHECK-NEXT: [[CNT:%.*]] = zext i1 [[TMP1]] to i16 ; CHECK-NEXT: ret i16 [[CNT]] @@ -74,7 +74,7 @@ define i8 @ctpop_imin_plus1_lshr_nz(i8 %x) { define i64 @ctpop_x_and_negx_nz(i64 %x) { ; CHECK-LABEL: @ctpop_x_and_negx_nz( ; CHECK-NEXT: [[V0:%.*]] = sub i64 0, [[X:%.*]] -; CHECK-NEXT: [[V1:%.*]] = and i64 [[V0]], [[X]] +; CHECK-NEXT: [[V1:%.*]] = and i64 [[X]], [[V0]] ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i64 [[V1]], 0 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) ; CHECK-NEXT: ret i64 1 @@ -127,7 +127,7 @@ define <2 x i32> @ctpop_shl2_1_vec_nz(<2 x i32> %x) { define <2 x i64> @ctpop_x_and_negx_vec(<2 x i64> %x) { ; CHECK-LABEL: @ctpop_x_and_negx_vec( ; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i64> zeroinitializer, [[X:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[SUB]], [[X]] +; CHECK-NEXT: [[AND:%.*]] = and <2 x i64> [[X]], [[SUB]] ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <2 x i64> [[AND]], zeroinitializer ; CHECK-NEXT: [[CNT:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i64> ; CHECK-NEXT: ret <2 x i64> [[CNT]] diff --git a/llvm/test/Transforms/InstCombine/cttz.ll b/llvm/test/Transforms/InstCombine/cttz.ll index 66b7a03fe5d7b..e106faf9cb38f 100644 --- a/llvm/test/Transforms/InstCombine/cttz.ll +++ b/llvm/test/Transforms/InstCombine/cttz.ll @@ -193,7 +193,7 @@ define i32 @cttz_of_lowest_set_bit_wrong_const(i32 %x) { define i32 @cttz_of_lowest_set_bit_wrong_operand(i32 %x, i32 %y) { ; CHECK-LABEL: @cttz_of_lowest_set_bit_wrong_operand( ; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[Y:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i32 [[SUB]], [[X:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[SUB]] ; CHECK-NEXT: [[TZ:%.*]] = call range(i32 0, 33) i32 @llvm.cttz.i32(i32 [[AND]], i1 false) ; CHECK-NEXT: ret i32 [[TZ]] ; @@ -206,7 +206,7 @@ define i32 @cttz_of_lowest_set_bit_wrong_operand(i32 %x, i32 %y) { define i32 @cttz_of_lowest_set_bit_wrong_intrinsic(i32 %x) { ; CHECK-LABEL: @cttz_of_lowest_set_bit_wrong_intrinsic( ; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i32 [[SUB]], [[X]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], [[SUB]] ; CHECK-NEXT: [[TZ:%.*]] = call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[AND]], i1 false) ; CHECK-NEXT: ret i32 [[TZ]] ; diff --git a/llvm/test/Transforms/InstCombine/demorgan.ll b/llvm/test/Transforms/InstCombine/demorgan.ll index 11052d38f9bc7..460758d512bb3 100644 --- a/llvm/test/Transforms/InstCombine/demorgan.ll +++ b/llvm/test/Transforms/InstCombine/demorgan.ll @@ -191,7 +191,7 @@ define i71 @test5_apint(i71 %A, i71 %B) { define i8 @demorgan_nand(i8 %A, i8 %B) { ; CHECK-LABEL: @demorgan_nand( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B:%.*]], -1 -; CHECK-NEXT: [[NOTC:%.*]] = or i8 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = or i8 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret i8 [[NOTC]] ; %notx = xor i8 %A, -1 @@ -205,7 +205,7 @@ define i8 @demorgan_nand(i8 %A, i8 %B) { define i7 @demorgan_nand_apint1(i7 %A, i7 %B) { ; CHECK-LABEL: @demorgan_nand_apint1( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i7 [[B:%.*]], -1 -; CHECK-NEXT: [[NOTC:%.*]] = or i7 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = or i7 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret i7 [[NOTC]] ; %nota = xor i7 %A, -1 @@ -219,7 +219,7 @@ define i7 @demorgan_nand_apint1(i7 %A, i7 %B) { define i117 @demorgan_nand_apint2(i117 %A, i117 %B) { ; CHECK-LABEL: @demorgan_nand_apint2( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i117 [[B:%.*]], -1 -; CHECK-NEXT: [[NOTC:%.*]] = or i117 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = or i117 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret i117 [[NOTC]] ; %nota = xor i117 %A, -1 @@ -233,7 +233,7 @@ define i117 @demorgan_nand_apint2(i117 %A, i117 %B) { define i8 @demorgan_nor(i8 %A, i8 %B) { ; CHECK-LABEL: @demorgan_nor( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B:%.*]], -1 -; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret i8 [[NOTC]] ; %notx = xor i8 %A, -1 @@ -249,7 +249,7 @@ define i8 @demorgan_nor_use2a(i8 %A, i8 %B) { ; CHECK-NEXT: [[NOTA:%.*]] = xor i8 [[A:%.*]], -1 ; CHECK-NEXT: [[USE2A:%.*]] = mul i8 [[NOTA]], 23 ; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B:%.*]], -1 -; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], [[A]] +; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[A]], [[B_NOT]] ; CHECK-NEXT: [[R:%.*]] = sdiv i8 [[NOTC]], [[USE2A]] ; CHECK-NEXT: ret i8 [[R]] ; @@ -267,7 +267,7 @@ define i8 @demorgan_nor_use2b(i8 %A, i8 %B) { ; CHECK-LABEL: @demorgan_nor_use2b( ; CHECK-NEXT: [[USE2B:%.*]] = mul i8 [[B:%.*]], 23 ; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B]], -1 -; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: [[R:%.*]] = sdiv i8 [[NOTC]], [[USE2B]] ; CHECK-NEXT: ret i8 [[R]] ; @@ -284,7 +284,7 @@ define i8 @demorgan_nor_use2b(i8 %A, i8 %B) { define i8 @demorgan_nor_use2c(i8 %A, i8 %B) { ; CHECK-LABEL: @demorgan_nor_use2c( ; CHECK-NEXT: [[NOTA:%.*]] = xor i8 [[A:%.*]], -1 -; CHECK-NEXT: [[C:%.*]] = or i8 [[NOTA]], [[B:%.*]] +; CHECK-NEXT: [[C:%.*]] = or i8 [[B:%.*]], [[NOTA]] ; CHECK-NEXT: [[USE2C:%.*]] = mul i8 [[C]], 23 ; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1 ; CHECK-NEXT: [[R:%.*]] = sdiv i8 [[NOTC]], [[USE2C]] @@ -306,7 +306,7 @@ define i8 @demorgan_nor_use2ab(i8 %A, i8 %B) { ; CHECK-NEXT: [[NOTA:%.*]] = xor i8 [[A:%.*]], -1 ; CHECK-NEXT: [[USE2A:%.*]] = mul i8 [[NOTA]], 17 ; CHECK-NEXT: [[B_NOT:%.*]] = xor i8 [[B]], -1 -; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[B_NOT]], [[A]] +; CHECK-NEXT: [[NOTC:%.*]] = and i8 [[A]], [[B_NOT]] ; CHECK-NEXT: [[R1:%.*]] = sdiv i8 [[NOTC]], [[USE2B]] ; CHECK-NEXT: [[R2:%.*]] = sdiv i8 [[R1]], [[USE2A]] ; CHECK-NEXT: ret i8 [[R2]] @@ -327,7 +327,7 @@ define i8 @demorgan_nor_use2ac(i8 %A, i8 %B) { ; CHECK-LABEL: @demorgan_nor_use2ac( ; CHECK-NEXT: [[NOTA:%.*]] = xor i8 [[A:%.*]], -1 ; CHECK-NEXT: [[USE2A:%.*]] = mul i8 [[NOTA]], 17 -; CHECK-NEXT: [[C:%.*]] = or i8 [[NOTA]], [[B:%.*]] +; CHECK-NEXT: [[C:%.*]] = or i8 [[B:%.*]], [[NOTA]] ; CHECK-NEXT: [[USE2C:%.*]] = mul i8 [[C]], 23 ; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1 ; CHECK-NEXT: [[R1:%.*]] = sdiv i8 [[NOTC]], [[USE2C]] @@ -350,7 +350,7 @@ define i8 @demorgan_nor_use2bc(i8 %A, i8 %B) { ; CHECK-LABEL: @demorgan_nor_use2bc( ; CHECK-NEXT: [[USE2B:%.*]] = mul i8 [[B:%.*]], 23 ; CHECK-NEXT: [[NOTA:%.*]] = xor i8 [[A:%.*]], -1 -; CHECK-NEXT: [[C:%.*]] = or i8 [[NOTA]], [[B]] +; CHECK-NEXT: [[C:%.*]] = or i8 [[B]], [[NOTA]] ; CHECK-NEXT: [[USE2C:%.*]] = mul i8 [[C]], 23 ; CHECK-NEXT: [[NOTC:%.*]] = xor i8 [[C]], -1 ; CHECK-NEXT: [[R1:%.*]] = sdiv i8 [[NOTC]], [[USE2C]] diff --git a/llvm/test/Transforms/InstCombine/dependent-ivs.ll b/llvm/test/Transforms/InstCombine/dependent-ivs.ll index e3207daefee09..e4a042ff5fe51 100644 --- a/llvm/test/Transforms/InstCombine/dependent-ivs.ll +++ b/llvm/test/Transforms/InstCombine/dependent-ivs.ll @@ -452,7 +452,7 @@ define void @int_iv_add_wrong_start(i64 %base, i64 %end) { ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[IV_NEXT:%.*]], [[LOOP]] ], [ 1, [[ENTRY]] ] ; CHECK-NEXT: call void @use.i64(i64 [[IV2]]) ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 4 -; CHECK-NEXT: [[IV2_NEXT]] = add i64 [[IV_NEXT]], [[BASE]] +; CHECK-NEXT: [[IV2_NEXT]] = add i64 [[BASE]], [[IV_NEXT]] ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 [[IV_NEXT]], [[END]] ; CHECK-NEXT: br i1 [[CMP]], label [[EXIT:%.*]], label [[LOOP]] ; CHECK: exit: @@ -706,7 +706,7 @@ define void @different_loops(i64 %base) { ; CHECK: loop2: ; CHECK-NEXT: [[IV2:%.*]] = phi i64 [ [[IV2_NEXT:%.*]], [[LOOP2]] ], [ [[BASE]], [[LOOP1]] ] ; CHECK-NEXT: call void @use.i64(i64 [[IV2]]) -; CHECK-NEXT: [[IV2_NEXT]] = add nuw i64 [[IV_NEXT]], [[BASE]] +; CHECK-NEXT: [[IV2_NEXT]] = add nuw i64 [[BASE]], [[IV_NEXT]] ; CHECK-NEXT: [[CMP2:%.*]] = call i1 @get.i1() ; CHECK-NEXT: br i1 [[CMP2]], label [[EXIT:%.*]], label [[LOOP2]] ; CHECK: exit: diff --git a/llvm/test/Transforms/InstCombine/fadd-fsub-factor.ll b/llvm/test/Transforms/InstCombine/fadd-fsub-factor.ll index 4b9c4fd9f9544..0be7f50cfddae 100644 --- a/llvm/test/Transforms/InstCombine/fadd-fsub-factor.ll +++ b/llvm/test/Transforms/InstCombine/fadd-fsub-factor.ll @@ -474,8 +474,8 @@ define float @fdiv_fsub_denorm(float %x) { define float @lerp_commute0(float %a, float %b, float %c) { ; CHECK-LABEL: @lerp_commute0( ; CHECK-NEXT: [[TMP1:%.*]] = fsub fast float [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[TMP1]], [[C:%.*]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP2]], [[A]] +; CHECK-NEXT: [[TMP2:%.*]] = fmul fast float [[C:%.*]], [[TMP1]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[A]], [[TMP2]] ; CHECK-NEXT: ret float [[ADD]] ; %sub = fsub fast float 1.0, %c @@ -488,8 +488,8 @@ define float @lerp_commute0(float %a, float %b, float %c) { define <2 x float> @lerp_commute1(<2 x float> %a, <2 x float> %b, <2 x float> %c) { ; CHECK-LABEL: @lerp_commute1( ; CHECK-NEXT: [[TMP1:%.*]] = fsub fast <2 x float> [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <2 x float> [[TMP1]], [[C:%.*]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast <2 x float> [[TMP2]], [[A]] +; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <2 x float> [[C:%.*]], [[TMP1]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast <2 x float> [[A]], [[TMP2]] ; CHECK-NEXT: ret <2 x float> [[ADD]] ; %sub = fsub <2 x float> , %c @@ -502,8 +502,8 @@ define <2 x float> @lerp_commute1(<2 x float> %a, <2 x float> %b, <2 x float> %c define float @lerp_commute2(float %a, float %b, float %c) { ; CHECK-LABEL: @lerp_commute2( ; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc nsz float [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc nsz float [[TMP1]], [[C:%.*]] -; CHECK-NEXT: [[ADD:%.*]] = fadd reassoc nsz float [[TMP2]], [[A]] +; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc nsz float [[C:%.*]], [[TMP1]] +; CHECK-NEXT: [[ADD:%.*]] = fadd reassoc nsz float [[A]], [[TMP2]] ; CHECK-NEXT: ret float [[ADD]] ; %sub = fsub float 1.0, %c @@ -516,8 +516,8 @@ define float @lerp_commute2(float %a, float %b, float %c) { define float @lerp_commute3(float %a, float %b, float %c) { ; CHECK-LABEL: @lerp_commute3( ; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc ninf nsz float [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc ninf nsz float [[TMP1]], [[C:%.*]] -; CHECK-NEXT: [[ADD:%.*]] = fadd reassoc ninf nsz float [[TMP2]], [[A]] +; CHECK-NEXT: [[TMP2:%.*]] = fmul reassoc ninf nsz float [[C:%.*]], [[TMP1]] +; CHECK-NEXT: [[ADD:%.*]] = fadd reassoc ninf nsz float [[A]], [[TMP2]] ; CHECK-NEXT: ret float [[ADD]] ; %sub = fsub fast float 1.0, %c @@ -530,8 +530,8 @@ define float @lerp_commute3(float %a, float %b, float %c) { define double @lerp_commute4(double %a, double %b, double %c) { ; CHECK-LABEL: @lerp_commute4( ; CHECK-NEXT: [[TMP1:%.*]] = fsub fast double [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fmul fast double [[TMP1]], [[C:%.*]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast double [[TMP2]], [[A]] +; CHECK-NEXT: [[TMP2:%.*]] = fmul fast double [[C:%.*]], [[TMP1]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast double [[A]], [[TMP2]] ; CHECK-NEXT: ret double [[ADD]] ; %sub = fsub fast double 1.0, %c @@ -544,8 +544,8 @@ define double @lerp_commute4(double %a, double %b, double %c) { define double @lerp_commute5(double %a, double %b, double %c) { ; CHECK-LABEL: @lerp_commute5( ; CHECK-NEXT: [[TMP1:%.*]] = fsub fast double [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fmul fast double [[TMP1]], [[C:%.*]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast double [[TMP2]], [[A]] +; CHECK-NEXT: [[TMP2:%.*]] = fmul fast double [[C:%.*]], [[TMP1]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast double [[A]], [[TMP2]] ; CHECK-NEXT: ret double [[ADD]] ; %sub = fsub fast double 1.0, %c @@ -558,8 +558,8 @@ define double @lerp_commute5(double %a, double %b, double %c) { define half @lerp_commute6(half %a, half %b, half %c) { ; CHECK-LABEL: @lerp_commute6( ; CHECK-NEXT: [[TMP1:%.*]] = fsub fast half [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fmul fast half [[TMP1]], [[C:%.*]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP2]], [[A]] +; CHECK-NEXT: [[TMP2:%.*]] = fmul fast half [[C:%.*]], [[TMP1]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[A]], [[TMP2]] ; CHECK-NEXT: ret half [[ADD]] ; %sub = fsub fast half 1.0, %c @@ -572,8 +572,8 @@ define half @lerp_commute6(half %a, half %b, half %c) { define half @lerp_commute7(half %a, half %b, half %c) { ; CHECK-LABEL: @lerp_commute7( ; CHECK-NEXT: [[TMP1:%.*]] = fsub fast half [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fmul fast half [[TMP1]], [[C:%.*]] -; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP2]], [[A]] +; CHECK-NEXT: [[TMP2:%.*]] = fmul fast half [[C:%.*]], [[TMP1]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[A]], [[TMP2]] ; CHECK-NEXT: ret half [[ADD]] ; %sub = fsub fast half 1.0, %c @@ -586,7 +586,7 @@ define half @lerp_commute7(half %a, half %b, half %c) { define float @lerp_extra_use1(float %a, float %b, float %c) { ; CHECK-LABEL: @lerp_extra_use1( ; CHECK-NEXT: [[SUB:%.*]] = fsub fast float 1.000000e+00, [[C:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[SUB]], [[A:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[A:%.*]], [[SUB]] ; CHECK-NEXT: [[BC:%.*]] = fmul fast float [[B:%.*]], [[C]] ; CHECK-NEXT: call void @use(float [[BC]]) ; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[BC]], [[MUL]] @@ -603,7 +603,7 @@ define float @lerp_extra_use1(float %a, float %b, float %c) { define float @lerp_extra_use2(float %a, float %b, float %c) { ; CHECK-LABEL: @lerp_extra_use2( ; CHECK-NEXT: [[SUB:%.*]] = fsub fast float 1.000000e+00, [[C:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[SUB]], [[A:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[A:%.*]], [[SUB]] ; CHECK-NEXT: call void @use(float [[MUL]]) ; CHECK-NEXT: [[BC:%.*]] = fmul fast float [[B:%.*]], [[C]] ; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[BC]], [[MUL]] @@ -621,7 +621,7 @@ define float @lerp_extra_use3(float %a, float %b, float %c) { ; CHECK-LABEL: @lerp_extra_use3( ; CHECK-NEXT: [[SUB:%.*]] = fsub fast float 1.000000e+00, [[C:%.*]] ; CHECK-NEXT: call void @use(float [[SUB]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[SUB]], [[A:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[A:%.*]], [[SUB]] ; CHECK-NEXT: [[BC:%.*]] = fmul fast float [[B:%.*]], [[C]] ; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[BC]], [[MUL]] ; CHECK-NEXT: ret float [[ADD]] diff --git a/llvm/test/Transforms/InstCombine/fadd.ll b/llvm/test/Transforms/InstCombine/fadd.ll index 38508cdb09e1f..840ccaef1086a 100644 --- a/llvm/test/Transforms/InstCombine/fadd.ll +++ b/llvm/test/Transforms/InstCombine/fadd.ll @@ -83,7 +83,7 @@ define double @fmul_fneg2(double %x, double %py, double %pz) { ; CHECK-LABEL: @fmul_fneg2( ; CHECK-NEXT: [[Y:%.*]] = frem double -4.200000e+01, [[PY:%.*]] ; CHECK-NEXT: [[Z:%.*]] = frem double 4.200000e+01, [[PZ:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[Y]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[X:%.*]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = fsub double [[Z]], [[TMP1]] ; CHECK-NEXT: ret double [[R]] ; @@ -149,7 +149,7 @@ define double @fmul_fneg2_commute(double %x, double %py, double %pz) { ; CHECK-LABEL: @fmul_fneg2_commute( ; CHECK-NEXT: [[Y:%.*]] = frem double 4.100000e+01, [[PY:%.*]] ; CHECK-NEXT: [[Z:%.*]] = frem double 4.200000e+01, [[PZ:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[Y]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[X:%.*]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = fsub double [[Z]], [[TMP1]] ; CHECK-NEXT: ret double [[R]] ; @@ -207,7 +207,7 @@ define <2 x float> @fmul_fneg1_extra_use(<2 x float> %x, <2 x float> %y, <2 x fl ; CHECK-LABEL: @fmul_fneg1_extra_use( ; CHECK-NEXT: [[Z:%.*]] = frem <2 x float> , [[PZ:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[NEG]] ; CHECK-NEXT: call void @use_vec(<2 x float> [[MUL]]) ; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[Z]], [[MUL]] ; CHECK-NEXT: ret <2 x float> [[R]] @@ -299,7 +299,7 @@ define float @fmul_fneg2_extra_use2(float %x, float %py, float %z) { ; CHECK-NEXT: [[Y:%.*]] = frem float -4.200000e+01, [[PY:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]] ; CHECK-NEXT: call void @use(float [[NEG]]) -; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[Y]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[R]] ; @@ -355,7 +355,7 @@ define <2 x float> @fmul_fneg1_extra_use3(<2 x float> %x, <2 x float> %y, <2 x f ; CHECK-LABEL: @fmul_fneg1_extra_use3( ; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]] ; CHECK-NEXT: call void @use_vec(<2 x float> [[NEG]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[NEG]] ; CHECK-NEXT: call void @use_vec(<2 x float> [[MUL]]) ; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[MUL]], [[Z:%.*]] ; CHECK-NEXT: ret <2 x float> [[R]] diff --git a/llvm/test/Transforms/InstCombine/fast-basictest.ll b/llvm/test/Transforms/InstCombine/fast-basictest.ll index 3c7776a43e55e..b0e43ba432162 100644 --- a/llvm/test/Transforms/InstCombine/fast-basictest.ll +++ b/llvm/test/Transforms/InstCombine/fast-basictest.ll @@ -424,7 +424,7 @@ define float @test14_reassoc(float %arg) { define float @test15(float %b, float %a) { ; CHECK-LABEL: @test15( ; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[A:%.*]], 1.234000e+03 -; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = fsub fast float [[TMP2]], [[A]] ; CHECK-NEXT: ret float [[TMP3]] ; @@ -438,7 +438,7 @@ define float @test15(float %b, float %a) { define float @test15_unary_fneg(float %b, float %a) { ; CHECK-LABEL: @test15_unary_fneg( ; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[A:%.*]], 1.234000e+03 -; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = fsub fast float [[TMP2]], [[A]] ; CHECK-NEXT: ret float [[TMP3]] ; @@ -452,7 +452,7 @@ define float @test15_unary_fneg(float %b, float %a) { define float @test15_reassoc_nsz(float %b, float %a) { ; CHECK-LABEL: @test15_reassoc_nsz( ; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[A:%.*]], 1.234000e+03 -; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc nsz float [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc nsz float [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc nsz float [[TMP2]], [[A]] ; CHECK-NEXT: ret float [[TMP3]] ; @@ -466,7 +466,7 @@ define float @test15_reassoc_nsz(float %b, float %a) { define float @test15_reassoc(float %b, float %a) { ; CHECK-LABEL: @test15_reassoc( ; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc float [[A:%.*]], 1.234000e+03 -; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc float 0.000000e+00, [[A]] ; CHECK-NEXT: [[TMP4:%.*]] = fadd reassoc float [[TMP2]], [[TMP3]] ; CHECK-NEXT: ret float [[TMP4]] @@ -550,7 +550,7 @@ define float @test16_reassoc(float %a, float %b, float %z) { define float @test17(float %a, float %b, float %z) { ; CHECK-LABEL: @test17( ; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[Z:%.*]], 4.000000e+01 -; CHECK-NEXT: [[F:%.*]] = fmul fast float [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[F:%.*]] = fmul fast float [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[F]] ; %d = fmul fast float %z, 4.000000e+01 @@ -563,7 +563,7 @@ define float @test17(float %a, float %b, float %z) { define float @test17_unary_fneg(float %a, float %b, float %z) { ; CHECK-LABEL: @test17_unary_fneg( ; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[Z:%.*]], 4.000000e+01 -; CHECK-NEXT: [[F:%.*]] = fmul fast float [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[F:%.*]] = fmul fast float [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[F]] ; %d = fmul fast float %z, 4.000000e+01 @@ -576,7 +576,7 @@ define float @test17_unary_fneg(float %a, float %b, float %z) { define float @test17_reassoc_nsz(float %a, float %b, float %z) { ; CHECK-LABEL: @test17_reassoc_nsz( ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc nsz float [[Z:%.*]], 4.000000e+01 -; CHECK-NEXT: [[F:%.*]] = fmul reassoc nsz float [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[F:%.*]] = fmul reassoc nsz float [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[F]] ; %d = fmul reassoc nsz float %z, 4.000000e+01 @@ -591,7 +591,7 @@ define float @test17_reassoc(float %a, float %b, float %z) { ; CHECK-LABEL: @test17_reassoc( ; CHECK-NEXT: [[D:%.*]] = fmul reassoc float [[Z:%.*]], 4.000000e+01 ; CHECK-NEXT: [[C:%.*]] = fsub reassoc float 0.000000e+00, [[D]] -; CHECK-NEXT: [[E:%.*]] = fmul reassoc float [[C]], [[A:%.*]] +; CHECK-NEXT: [[E:%.*]] = fmul reassoc float [[A:%.*]], [[C]] ; CHECK-NEXT: [[F:%.*]] = fsub reassoc float 0.000000e+00, [[E]] ; CHECK-NEXT: ret float [[F]] ; @@ -607,7 +607,7 @@ define float @test17_reassoc(float %a, float %b, float %z) { define float @test17_unary_fneg_no_FMF(float %a, float %b, float %z) { ; CHECK-LABEL: @test17_unary_fneg_no_FMF( ; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[Z:%.*]], 4.000000e+01 -; CHECK-NEXT: [[F:%.*]] = fmul float [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[F:%.*]] = fmul float [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[F]] ; %d = fmul float %z, 4.000000e+01 @@ -620,7 +620,7 @@ define float @test17_unary_fneg_no_FMF(float %a, float %b, float %z) { define float @test17_reassoc_unary_fneg(float %a, float %b, float %z) { ; CHECK-LABEL: @test17_reassoc_unary_fneg( ; CHECK-NEXT: [[TMP1:%.*]] = fmul reassoc float [[Z:%.*]], 4.000000e+01 -; CHECK-NEXT: [[F:%.*]] = fmul reassoc float [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[F:%.*]] = fmul reassoc float [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[F]] ; %d = fmul reassoc float %z, 4.000000e+01 diff --git a/llvm/test/Transforms/InstCombine/fast-math.ll b/llvm/test/Transforms/InstCombine/fast-math.ll index d7c90e82ab520..32f136d53fab4 100644 --- a/llvm/test/Transforms/InstCombine/fast-math.ll +++ b/llvm/test/Transforms/InstCombine/fast-math.ll @@ -65,7 +65,7 @@ define double @fold3_reassoc_nsz(double %f1) { define double @fold3_reassoc(double %f1) { ; CHECK-LABEL: @fold3_reassoc( ; CHECK-NEXT: [[T1:%.*]] = fmul reassoc double [[F1:%.*]], 5.000000e+00 -; CHECK-NEXT: [[T2:%.*]] = fadd reassoc double [[T1]], [[F1]] +; CHECK-NEXT: [[T2:%.*]] = fadd reassoc double [[F1]], [[T1]] ; CHECK-NEXT: ret double [[T2]] ; %t1 = fmul reassoc double 5.000000e+00, %f1 @@ -175,7 +175,7 @@ define float @fold6_reassoc_nsz(float %f1) { define float @fold6_reassoc(float %f1) { ; CHECK-LABEL: @fold6_reassoc( ; CHECK-NEXT: [[T1:%.*]] = fadd reassoc float [[F1:%.*]], [[F1]] -; CHECK-NEXT: [[T2:%.*]] = fadd reassoc float [[T1]], [[F1]] +; CHECK-NEXT: [[T2:%.*]] = fadd reassoc float [[F1]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = fadd reassoc float [[T2]], [[F1]] ; CHECK-NEXT: ret float [[T3]] ; @@ -506,7 +506,7 @@ define float @fold16(float %x, float %y) { ; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = fneg float [[Y]] ; CHECK-NEXT: [[R_P:%.*]] = select i1 [[CMP]], float [[Y]], float [[TMP1]] -; CHECK-NEXT: [[R:%.*]] = fadd float [[R_P]], [[X]] +; CHECK-NEXT: [[R:%.*]] = fadd float [[X]], [[R_P]] ; CHECK-NEXT: ret float [[R]] ; %cmp = fcmp ogt float %x, %y diff --git a/llvm/test/Transforms/InstCombine/fcmp.ll b/llvm/test/Transforms/InstCombine/fcmp.ll index 8afb6463b669d..0d45baddcb2fc 100644 --- a/llvm/test/Transforms/InstCombine/fcmp.ll +++ b/llvm/test/Transforms/InstCombine/fcmp.ll @@ -1439,7 +1439,7 @@ define i1 @fcmp_fadd_neg_zero(float %x, float %y) { define i1 @fcmp_fadd_zero_switched(float %x, float %y) { ; CHECK-LABEL: @fcmp_fadd_zero_switched( -; CHECK-NEXT: [[CMP:%.*]] = fcmp ult float [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp ugt float [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret i1 [[CMP]] ; %add = fadd float %y, 0.000000e+00 diff --git a/llvm/test/Transforms/InstCombine/fdiv-sqrt.ll b/llvm/test/Transforms/InstCombine/fdiv-sqrt.ll index 9f030c5ebf7bb..c5078ff1efc5a 100644 --- a/llvm/test/Transforms/InstCombine/fdiv-sqrt.ll +++ b/llvm/test/Transforms/InstCombine/fdiv-sqrt.ll @@ -8,7 +8,7 @@ define double @sqrt_div_fast(double %x, double %y, double %z) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = fdiv fast double [[Z:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = call fast double @llvm.sqrt.f64(double [[TMP0]]) -; CHECK-NEXT: [[DIV1:%.*]] = fmul fast double [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[DIV1:%.*]] = fmul fast double [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[DIV1]] ; entry: @@ -38,7 +38,7 @@ define double @sqrt_div_reassoc_arcp(double %x, double %y, double %z) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = fdiv reassoc arcp double [[Z:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = call reassoc arcp double @llvm.sqrt.f64(double [[TMP0]]) -; CHECK-NEXT: [[DIV1:%.*]] = fmul reassoc arcp double [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[DIV1:%.*]] = fmul reassoc arcp double [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[DIV1]] ; entry: @@ -98,7 +98,7 @@ define double @sqrt_div_arcp_missing(double %x, double %y, double %z) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = fdiv reassoc double [[Z:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = call reassoc arcp double @llvm.sqrt.f64(double [[TMP0]]) -; CHECK-NEXT: [[DIV1:%.*]] = fmul reassoc arcp double [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[DIV1:%.*]] = fmul reassoc arcp double [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[DIV1]] ; entry: diff --git a/llvm/test/Transforms/InstCombine/fdiv.ll b/llvm/test/Transforms/InstCombine/fdiv.ll index ca11685c98417..12d6e6463de65 100644 --- a/llvm/test/Transforms/InstCombine/fdiv.ll +++ b/llvm/test/Transforms/InstCombine/fdiv.ll @@ -678,7 +678,7 @@ define float @pow_divisor(float %x, float %y, float %z) { ; CHECK-LABEL: @pow_divisor( ; CHECK-NEXT: [[TMP1:%.*]] = fneg reassoc arcp float [[Y:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = call reassoc arcp float @llvm.pow.f32(float [[X:%.*]], float [[TMP1]]) -; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[TMP2]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[Z:%.*]], [[TMP2]] ; CHECK-NEXT: ret float [[R]] ; %p = call float @llvm.pow.f32(float %x, float %y) @@ -744,7 +744,7 @@ define float @exp_divisor(float %y, float %z) { ; CHECK-LABEL: @exp_divisor( ; CHECK-NEXT: [[TMP1:%.*]] = fneg reassoc arcp float [[Y:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = call reassoc arcp float @llvm.exp.f32(float [[TMP1]]) -; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[TMP2]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[Z:%.*]], [[TMP2]] ; CHECK-NEXT: ret float [[R]] ; %p = call float @llvm.exp.f32(float %y) @@ -810,7 +810,7 @@ define float @exp2_divisor(float %y, float %z) { ; CHECK-LABEL: @exp2_divisor( ; CHECK-NEXT: [[TMP1:%.*]] = fneg reassoc arcp float [[Y:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = call reassoc arcp float @llvm.exp2.f32(float [[TMP1]]) -; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[TMP2]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fmul reassoc arcp float [[Z:%.*]], [[TMP2]] ; CHECK-NEXT: ret float [[R]] ; %p = call float @llvm.exp2.f32(float %y) @@ -876,7 +876,7 @@ define float @powi_divisor(float %x, i32 %y, float %z) { ; CHECK-LABEL: @powi_divisor( ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[Y:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = call reassoc ninf arcp float @llvm.powi.f32.i32(float [[X:%.*]], i32 [[TMP1]]) -; CHECK-NEXT: [[R:%.*]] = fmul reassoc ninf arcp float [[TMP2]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fmul reassoc ninf arcp float [[Z:%.*]], [[TMP2]] ; CHECK-NEXT: ret float [[R]] ; %p = call float @llvm.powi.f32.i32(float %x, i32 %y) diff --git a/llvm/test/Transforms/InstCombine/float-shrink-compare.ll b/llvm/test/Transforms/InstCombine/float-shrink-compare.ll index e6e41ad03ce59..77b6ed7c5abe8 100644 --- a/llvm/test/Transforms/InstCombine/float-shrink-compare.ll +++ b/llvm/test/Transforms/InstCombine/float-shrink-compare.ll @@ -215,7 +215,7 @@ define i1 @test7_intrin(float %x, float %y) { define i1 @test8(float %x, float %y) { ; CHECK-LABEL: @test8( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -228,7 +228,7 @@ define i1 @test8(float %x, float %y) { define i1 @test8_intrin(float %x, float %y) { ; CHECK-LABEL: @test8_intrin( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.ceil.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -241,7 +241,7 @@ define i1 @test8_intrin(float %x, float %y) { define i1 @test9(float %x, float %y) { ; CHECK-LABEL: @test9( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -254,7 +254,7 @@ define i1 @test9(float %x, float %y) { define i1 @test9_intrin(float %x, float %y) { ; CHECK-LABEL: @test9_intrin( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -319,7 +319,7 @@ define i1 @test11_intrin(float %x, float %y) { define i1 @test12(float %x, float %y) { ; CHECK-LABEL: @test12( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.rint.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -332,7 +332,7 @@ define i1 @test12(float %x, float %y) { define i1 @test13(float %x, float %y) { ; CHECK-LABEL: @test13( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.round.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -345,7 +345,7 @@ define i1 @test13(float %x, float %y) { define i1 @test13_intrin(float %x, float %y) { ; CHECK-LABEL: @test13_intrin( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.round.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -358,7 +358,7 @@ define i1 @test13_intrin(float %x, float %y) { define i1 @test13a(float %x, float %y) { ; CHECK-LABEL: @test13a( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -371,7 +371,7 @@ define i1 @test13a(float %x, float %y) { define i1 @test13a_intrin(float %x, float %y) { ; CHECK-LABEL: @test13a_intrin( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.roundeven.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -384,7 +384,7 @@ define i1 @test13a_intrin(float %x, float %y) { define i1 @test14(float %x, float %y) { ; CHECK-LABEL: @test14( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -397,7 +397,7 @@ define i1 @test14(float %x, float %y) { define i1 @test14_intrin(float %x, float %y) { ; CHECK-LABEL: @test14_intrin( ; CHECK-NEXT: [[TMP1:%.*]] = call float @llvm.trunc.f32(float [[X:%.*]]) -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x.ext = fpext float %x to double @@ -424,7 +424,7 @@ define i1 @test15(float %x, float %y, float %z) { define i1 @test16(float %x, float %y, float %z) { ; CHECK-LABEL: @test16( ; CHECK-NEXT: [[FMINF:%.*]] = call nsz float @llvm.minnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[FMINF]], [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[Z:%.*]], [[FMINF]] ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = fpext float %z to double @@ -452,7 +452,7 @@ define i1 @test17(float %x, float %y, float %z) { define i1 @test18(float %x, float %y, float %z) { ; CHECK-LABEL: @test18( ; CHECK-NEXT: [[FMAXF:%.*]] = call nsz float @llvm.maxnum.f32(float [[X:%.*]], float [[Y:%.*]]) -; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[FMAXF]], [[Z:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[Z:%.*]], [[FMAXF]] ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = fpext float %z to double @@ -480,7 +480,7 @@ define i1 @test19(float %x, float %y, float %z) { define i1 @test20(float %x, float %y) { ; CHECK-LABEL: @test20( ; CHECK-NEXT: [[FMINF:%.*]] = call nsz float @llvm.minnum.f32(float [[X:%.*]], float 1.000000e+00) -; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[FMINF]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq float [[Y:%.*]], [[FMINF]] ; CHECK-NEXT: ret i1 [[TMP1]] ; %1 = fpext float %y to double diff --git a/llvm/test/Transforms/InstCombine/fmul.ll b/llvm/test/Transforms/InstCombine/fmul.ll index ae2df634b0200..4554b4ed8844d 100644 --- a/llvm/test/Transforms/InstCombine/fmul.ll +++ b/llvm/test/Transforms/InstCombine/fmul.ll @@ -281,7 +281,7 @@ define float @neg_unary_neg_multi_use(float %x, float %y) { define float @neg_mul(float %x, float %y) { ; CHECK-LABEL: @neg_mul( ; CHECK-NEXT: [[SUB:%.*]] = fneg float [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[SUB]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[SUB]] ; CHECK-NEXT: ret float [[MUL]] ; %sub = fsub float -0.0, %x @@ -292,7 +292,7 @@ define float @neg_mul(float %x, float %y) { define float @unary_neg_mul(float %x, float %y) { ; CHECK-LABEL: @unary_neg_mul( ; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[NEG]] ; CHECK-NEXT: ret float [[MUL]] ; %neg = fneg float %x @@ -303,7 +303,7 @@ define float @unary_neg_mul(float %x, float %y) { define <2 x float> @neg_mul_vec(<2 x float> %x, <2 x float> %y) { ; CHECK-LABEL: @neg_mul_vec( ; CHECK-NEXT: [[SUB:%.*]] = fneg <2 x float> [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[SUB]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[SUB]] ; CHECK-NEXT: ret <2 x float> [[MUL]] ; %sub = fsub <2 x float> , %x @@ -314,7 +314,7 @@ define <2 x float> @neg_mul_vec(<2 x float> %x, <2 x float> %y) { define <2 x float> @unary_neg_mul_vec(<2 x float> %x, <2 x float> %y) { ; CHECK-LABEL: @unary_neg_mul_vec( ; CHECK-NEXT: [[SUB:%.*]] = fneg <2 x float> [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[SUB]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[SUB]] ; CHECK-NEXT: ret <2 x float> [[MUL]] ; %sub = fneg <2 x float> %x @@ -325,7 +325,7 @@ define <2 x float> @unary_neg_mul_vec(<2 x float> %x, <2 x float> %y) { define <2 x float> @neg_mul_vec_poison(<2 x float> %x, <2 x float> %y) { ; CHECK-LABEL: @neg_mul_vec_poison( ; CHECK-NEXT: [[SUB:%.*]] = fneg <2 x float> [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[SUB]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[SUB]] ; CHECK-NEXT: ret <2 x float> [[MUL]] ; %sub = fsub <2 x float> , %x @@ -337,7 +337,7 @@ define <2 x float> @neg_mul_vec_poison(<2 x float> %x, <2 x float> %y) { define float @neg_sink_nsz(float %x, float %y) { ; CHECK-LABEL: @neg_sink_nsz( ; CHECK-NEXT: [[SUB1:%.*]] = fneg nsz float [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[SUB1]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[SUB1]] ; CHECK-NEXT: ret float [[MUL]] ; %sub1 = fsub nsz float 0.0, %x @@ -348,7 +348,7 @@ define float @neg_sink_nsz(float %x, float %y) { define float @neg_sink_multi_use(float %x, float %y) { ; CHECK-LABEL: @neg_sink_multi_use( ; CHECK-NEXT: [[SUB1:%.*]] = fneg float [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[SUB1]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[SUB1]] ; CHECK-NEXT: [[MUL2:%.*]] = fmul float [[MUL]], [[SUB1]] ; CHECK-NEXT: ret float [[MUL2]] ; @@ -361,7 +361,7 @@ define float @neg_sink_multi_use(float %x, float %y) { define float @unary_neg_mul_multi_use(float %x, float %y) { ; CHECK-LABEL: @unary_neg_mul_multi_use( ; CHECK-NEXT: [[SUB1:%.*]] = fneg float [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[SUB1]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[SUB1]] ; CHECK-NEXT: [[MUL2:%.*]] = fmul float [[MUL]], [[SUB1]] ; CHECK-NEXT: ret float [[MUL2]] ; @@ -449,7 +449,7 @@ declare double @llvm.sqrt.f64(double) define double @sqrt_squared2(double %f) { ; CHECK-LABEL: @sqrt_squared2( ; CHECK-NEXT: [[SQRT:%.*]] = call double @llvm.sqrt.f64(double [[F:%.*]]) -; CHECK-NEXT: [[MUL2:%.*]] = fmul double [[SQRT]], [[F]] +; CHECK-NEXT: [[MUL2:%.*]] = fmul double [[F]], [[SQRT]] ; CHECK-NEXT: ret double [[MUL2]] ; %sqrt = call double @llvm.sqrt.f64(double %f) @@ -1132,7 +1132,7 @@ for.body: define double @fmul_negated_constant_expression(double %x) { ; CHECK-LABEL: @fmul_negated_constant_expression( ; CHECK-NEXT: [[FSUB:%.*]] = fneg double bitcast (i64 ptrtoint (ptr getelementptr inbounds (i8, ptr @g, i64 16) to i64) to double) -; CHECK-NEXT: [[R:%.*]] = fmul double [[FSUB]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = fmul double [[X:%.*]], [[FSUB]] ; CHECK-NEXT: ret double [[R]] ; %fsub = fsub double -0.000000e+00, bitcast (i64 ptrtoint (ptr getelementptr inbounds ({ [2 x ptr] }, ptr @g, i64 0, i32 0, i64 2) to i64) to double) diff --git a/llvm/test/Transforms/InstCombine/fold-ext-eq-c-with-op.ll b/llvm/test/Transforms/InstCombine/fold-ext-eq-c-with-op.ll index 4d02d492d2aa7..248c802d03f9d 100644 --- a/llvm/test/Transforms/InstCombine/fold-ext-eq-c-with-op.ll +++ b/llvm/test/Transforms/InstCombine/fold-ext-eq-c-with-op.ll @@ -31,7 +31,7 @@ define i8 @fold_add_zext_eq_0_fail_multiuse_exp(i8 %x) { ; CHECK-LABEL: @fold_add_zext_eq_0_fail_multiuse_exp( ; CHECK-NEXT: [[X_EQ:%.*]] = icmp eq i8 [[X:%.*]], 0 ; CHECK-NEXT: [[X_EQ_EXT:%.*]] = zext i1 [[X_EQ]] to i8 -; CHECK-NEXT: [[R:%.*]] = add i8 [[X_EQ_EXT]], [[X]] +; CHECK-NEXT: [[R:%.*]] = add i8 [[X]], [[X_EQ_EXT]] ; CHECK-NEXT: call void @use.i8(i8 [[X_EQ_EXT]]) ; CHECK-NEXT: ret i8 [[R]] ; @@ -46,7 +46,7 @@ define i8 @fold_add_sext_eq_4_fail_wrong_cond(i8 %x, i8 %y) { ; CHECK-LABEL: @fold_add_sext_eq_4_fail_wrong_cond( ; CHECK-NEXT: [[X_EQ:%.*]] = icmp eq i8 [[Y:%.*]], 4 ; CHECK-NEXT: [[X_EQ_EXT:%.*]] = sext i1 [[X_EQ]] to i8 -; CHECK-NEXT: [[R:%.*]] = add i8 [[X_EQ_EXT]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = add i8 [[X:%.*]], [[X_EQ_EXT]] ; CHECK-NEXT: call void @use.i8(i8 [[X_EQ_EXT]]) ; CHECK-NEXT: ret i8 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll b/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll index 1fd570bf2635b..d16f36927d71a 100644 --- a/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll +++ b/llvm/test/Transforms/InstCombine/fold-inc-of-add-of-not-x-and-y-to-sub-x-from-y.ll @@ -92,7 +92,7 @@ define i32 @t5(i32 %x, i32 %y) { define i32 @t6(i32 %x, i32 %y) { ; CHECK-LABEL: @t6( ; CHECK-NEXT: [[T0:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], [[Y:%.*]] +; CHECK-NEXT: [[T1:%.*]] = add i32 [[Y:%.*]], [[T0]] ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = sub i32 [[Y]], [[X]] ; CHECK-NEXT: ret i32 [[T2]] @@ -108,7 +108,7 @@ define i32 @t7(i32 %x, i32 %y) { ; CHECK-LABEL: @t7( ; CHECK-NEXT: [[T0:%.*]] = xor i32 [[X:%.*]], -1 ; CHECK-NEXT: call void @use32(i32 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], [[Y:%.*]] +; CHECK-NEXT: [[T1:%.*]] = add i32 [[Y:%.*]], [[T0]] ; CHECK-NEXT: call void @use32(i32 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = sub i32 [[Y]], [[X]] ; CHECK-NEXT: ret i32 [[T2]] @@ -202,7 +202,7 @@ define i32 @n11(i32 %x, i32 %y) { define i32 @n12(i32 %x, i32 %y) { ; CHECK-LABEL: @n12( ; CHECK-NEXT: [[T0:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[T1:%.*]] = add i32 [[T0]], [[Y:%.*]] +; CHECK-NEXT: [[T1:%.*]] = add i32 [[Y:%.*]], [[T0]] ; CHECK-NEXT: [[T2:%.*]] = add i32 [[T1]], 2 ; CHECK-NEXT: ret i32 [[T2]] ; diff --git a/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll b/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll index dedd12f8cc7a3..1c28b151825c1 100644 --- a/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll +++ b/llvm/test/Transforms/InstCombine/fold-select-fmul-if-zero.ll @@ -428,7 +428,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32(float %x, float %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -442,7 +442,7 @@ define float @fmul_by_fabs_var_if_0_oeq_zero_f32(float %x, float %y) { ; CHECK-NEXT: [[Y_FABS:%.*]] = call float @llvm.fabs.f32(float [[Y:%.*]]) ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y_FABS]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %y.fabs = call float @llvm.fabs.f32(float %y) @@ -468,7 +468,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_fmul(float %x, float %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_fmul( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nsz float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nsz float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -482,7 +482,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul(float %x, float %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_ninf_fmul( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul ninf nsz float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul ninf nsz float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -496,7 +496,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_fmul(float %x, float %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_fmul( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan nsz float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan nsz float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -510,7 +510,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nnan_ninf_fmul(float %x, float %y) { ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nnan_ninf_fmul( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -524,7 +524,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_ninf_select(float %x, float ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_nsz_nnan_ninf_select( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf nsz i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -559,7 +559,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x, float % ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -572,7 +572,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted(float %x ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz_commuted( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -586,7 +586,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_ne ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -599,7 +599,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_ne ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_nnan_ninf_select_known_never_negzero_negsub( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -623,7 +623,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero(flo ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -636,7 +636,7 @@ define float @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero_nsu ; CHECK-LABEL: @fmul_by_var_if_0_oeq_zero_f32_fmul_known_never_nan_inf_negzero_nsub( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[Y:%.*]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -693,7 +693,7 @@ define float @fmul_by_self_if_0_oeq_zero_f32(float %x) { ; CHECK-LABEL: @fmul_by_self_if_0_oeq_zero_f32( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -706,7 +706,7 @@ define float @fmul_by_self_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz(float %x) { ; CHECK-LABEL: @fmul_by_self_if_0_oeq_zero_f32_fmul_nnan_ninf_nsz( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul nnan ninf nsz float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 @@ -719,7 +719,7 @@ define float @fmul_by_self_if_0_oeq_zero_f32_select_nnan_ninf_nsz(float %x) { ; CHECK-LABEL: @fmul_by_self_if_0_oeq_zero_f32_select_nnan_ninf_nsz( ; CHECK-NEXT: [[X_IS_ZERO:%.*]] = fcmp oeq float [[X:%.*]], 0.000000e+00 ; CHECK-NEXT: [[SCALED_X:%.*]] = select nnan ninf nsz i1 [[X_IS_ZERO]], float [[X]], float 1.000000e+00 -; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[SCALED_X]], [[X]] +; CHECK-NEXT: [[SCALED_IF_DENORMAL:%.*]] = fmul float [[X]], [[SCALED_X]] ; CHECK-NEXT: ret float [[SCALED_IF_DENORMAL]] ; %x.is.zero = fcmp oeq float %x, 0.0 diff --git a/llvm/test/Transforms/InstCombine/fold-signbit-test-power2.ll b/llvm/test/Transforms/InstCombine/fold-signbit-test-power2.ll index f5024664f58c3..a5c7cb3306ed0 100644 --- a/llvm/test/Transforms/InstCombine/fold-signbit-test-power2.ll +++ b/llvm/test/Transforms/InstCombine/fold-signbit-test-power2.ll @@ -124,7 +124,7 @@ define i1 @pow2_or_zero_is_negative_extra_use(i8 %x) { ; CHECK-LABEL: @pow2_or_zero_is_negative_extra_use( ; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[X:%.*]] ; CHECK-NEXT: call void @use(i8 [[NEG]]) -; CHECK-NEXT: [[POW2_OR_ZERO:%.*]] = and i8 [[NEG]], [[X]] +; CHECK-NEXT: [[POW2_OR_ZERO:%.*]] = and i8 [[X]], [[NEG]] ; CHECK-NEXT: call void @use(i8 [[POW2_OR_ZERO]]) ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[X]], -128 ; CHECK-NEXT: ret i1 [[CMP]] diff --git a/llvm/test/Transforms/InstCombine/fpextend.ll b/llvm/test/Transforms/InstCombine/fpextend.ll index 19f512d717a97..c9adbe10d8db4 100644 --- a/llvm/test/Transforms/InstCombine/fpextend.ll +++ b/llvm/test/Transforms/InstCombine/fpextend.ll @@ -142,7 +142,7 @@ define float @test9(half %x, half %y) nounwind { define float @test10(half %x, float %y) nounwind { ; CHECK-LABEL: @test10( ; CHECK-NEXT: [[TMP1:%.*]] = fpext half [[X:%.*]] to float -; CHECK-NEXT: [[T56:%.*]] = fmul float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[T56:%.*]] = fmul float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[T56]] ; %t1 = fpext half %x to double @@ -167,7 +167,7 @@ define float @test11(half %x) nounwind { define float @test12(float %x, half %y) nounwind { ; CHECK-LABEL: @test12( ; CHECK-NEXT: [[TMP1:%.*]] = fpext half [[Y:%.*]] to float -; CHECK-NEXT: [[T34:%.*]] = fadd float [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[T34:%.*]] = fadd float [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[T34]] ; %t1 = fpext float %x to double @@ -440,8 +440,8 @@ define half @bf16_to_f32_to_f16(bfloat %a) nounwind { define bfloat @bf16_frem(bfloat %x) { ; CHECK-LABEL: @bf16_frem( -; CHECK-NEXT: [[FREM:%.*]] = frem bfloat [[X:%.*]], 0xR40C9 -; CHECK-NEXT: ret bfloat [[FREM]] +; CHECK-NEXT: [[TMP1:%.*]] = frem bfloat [[X:%.*]], 0xR40C9 +; CHECK-NEXT: ret bfloat [[TMP1]] ; %t1 = fpext bfloat %x to float %t2 = frem float %t1, 6.281250e+00 diff --git a/llvm/test/Transforms/InstCombine/fptrunc.ll b/llvm/test/Transforms/InstCombine/fptrunc.ll index c78df0b83d9cd..825868b107033 100644 --- a/llvm/test/Transforms/InstCombine/fptrunc.ll +++ b/llvm/test/Transforms/InstCombine/fptrunc.ll @@ -4,7 +4,7 @@ define float @fadd_fpext_op0(float %x, double %y) { ; CHECK-LABEL: @fadd_fpext_op0( ; CHECK-NEXT: [[EXT:%.*]] = fpext float [[X:%.*]] to double -; CHECK-NEXT: [[BO:%.*]] = fadd reassoc double [[EXT]], [[Y:%.*]] +; CHECK-NEXT: [[BO:%.*]] = fadd reassoc double [[Y:%.*]], [[EXT]] ; CHECK-NEXT: [[R:%.*]] = fptrunc double [[BO]] to float ; CHECK-NEXT: ret float [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/free-inversion.ll b/llvm/test/Transforms/InstCombine/free-inversion.ll index a89887a586b58..ebb9310ee0a78 100644 --- a/llvm/test/Transforms/InstCombine/free-inversion.ll +++ b/llvm/test/Transforms/InstCombine/free-inversion.ll @@ -30,7 +30,7 @@ define i8 @xor_2(i8 %a, i1 %c, i8 %x, i8 %y) { ; CHECK-LABEL: @xor_2( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -124 ; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[C:%.*]], i8 [[X:%.*]], i8 [[TMP1]] -; CHECK-NEXT: [[NOT_AB:%.*]] = xor i8 [[TMP2]], [[A:%.*]] +; CHECK-NEXT: [[NOT_AB:%.*]] = xor i8 [[A:%.*]], [[TMP2]] ; CHECK-NEXT: ret i8 [[NOT_AB]] ; %nx = xor i8 %x, -1 @@ -45,7 +45,7 @@ define i8 @xor_fail(i8 %a, i1 %c, i8 %x, i8 %y) { ; CHECK-LABEL: @xor_fail( ; CHECK-NEXT: [[NX:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: [[B:%.*]] = select i1 [[C:%.*]], i8 [[NX]], i8 [[Y:%.*]] -; CHECK-NEXT: [[AB:%.*]] = xor i8 [[B]], [[A:%.*]] +; CHECK-NEXT: [[AB:%.*]] = xor i8 [[A:%.*]], [[B]] ; CHECK-NEXT: [[NOT_AB:%.*]] = xor i8 [[AB]], -1 ; CHECK-NEXT: ret i8 [[NOT_AB]] ; @@ -91,7 +91,7 @@ define i8 @add_fail(i8 %a, i1 %c, i8 %x, i8 %y) { ; CHECK-NEXT: [[NX:%.*]] = xor i8 [[X:%.*]], [[A:%.*]] ; CHECK-NEXT: [[YY:%.*]] = xor i8 [[Y:%.*]], 123 ; CHECK-NEXT: [[B:%.*]] = select i1 [[C:%.*]], i8 [[NX]], i8 [[YY]] -; CHECK-NEXT: [[AB:%.*]] = add i8 [[B]], [[A]] +; CHECK-NEXT: [[AB:%.*]] = add i8 [[A]], [[B]] ; CHECK-NEXT: [[NOT_AB:%.*]] = xor i8 [[AB]], -1 ; CHECK-NEXT: ret i8 [[NOT_AB]] ; @@ -605,7 +605,7 @@ define i32 @test_inv_free_i32(i1 %c1, i1 %c2, i32 %c3, i32 %c4) { ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: [[TMP0:%.*]] = phi i32 [ 0, [[B1]] ], [ -1, [[B2]] ], [ [[C3:%.*]], [[B3]] ] -; CHECK-NEXT: [[COND:%.*]] = xor i32 [[TMP0]], [[C4:%.*]] +; CHECK-NEXT: [[COND:%.*]] = xor i32 [[C4:%.*]], [[TMP0]] ; CHECK-NEXT: ret i32 [[COND]] ; entry: @@ -682,7 +682,7 @@ define i32 @test_inv_free_i32_newinst(i1 %c1, i1 %c2, i32 %c3, i32 %c4) { ; CHECK-NEXT: br label [[EXIT]] ; CHECK: exit: ; CHECK-NEXT: [[VAL:%.*]] = phi i32 [ -1, [[B1]] ], [ 0, [[B2]] ], [ [[ASHR]], [[B3]] ] -; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[VAL]], [[C4:%.*]] +; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[C4:%.*]], [[VAL]] ; CHECK-NEXT: [[COND:%.*]] = xor i32 [[TMP0]], -1 ; CHECK-NEXT: ret i32 [[COND]] ; diff --git a/llvm/test/Transforms/InstCombine/fsh.ll b/llvm/test/Transforms/InstCombine/fsh.ll index 505a228367254..f1fba6cb272f9 100644 --- a/llvm/test/Transforms/InstCombine/fsh.ll +++ b/llvm/test/Transforms/InstCombine/fsh.ll @@ -725,7 +725,7 @@ define i32 @fsh_orconst_rotate(i32 %a) { define i32 @fsh_rotate_5(i8 %x, i32 %y) { ; CHECK-LABEL: @fsh_rotate_5( ; CHECK-NEXT: [[T1:%.*]] = zext i8 [[X:%.*]] to i32 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[T1]], [[Y:%.*]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[T1]] ; CHECK-NEXT: [[OR2:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR1]], i32 [[OR1]], i32 5) ; CHECK-NEXT: ret i32 [[OR2]] ; @@ -741,7 +741,7 @@ define i32 @fsh_rotate_5(i8 %x, i32 %y) { define i32 @fsh_rotate_18(i8 %x, i32 %y) { ; CHECK-LABEL: @fsh_rotate_18( ; CHECK-NEXT: [[T1:%.*]] = zext i8 [[X:%.*]] to i32 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[T1]], [[Y:%.*]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[T1]] ; CHECK-NEXT: [[OR2:%.*]] = call i32 @llvm.fshl.i32(i32 [[OR1]], i32 [[OR1]], i32 18) ; CHECK-NEXT: ret i32 [[OR2]] ; diff --git a/llvm/test/Transforms/InstCombine/fsub.ll b/llvm/test/Transforms/InstCombine/fsub.ll index f1e7086e697e8..cffc63405ddcb 100644 --- a/llvm/test/Transforms/InstCombine/fsub.ll +++ b/llvm/test/Transforms/InstCombine/fsub.ll @@ -86,7 +86,7 @@ define float @unary_neg_sub_nsz_extra_use(float %x, float %y) { define float @sub_sub_nsz(float %x, float %y, float %z) { ; CHECK-LABEL: @sub_sub_nsz( ; CHECK-NEXT: [[TMP1:%.*]] = fsub nsz float [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[T2:%.*]] = fadd nsz float [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[T2:%.*]] = fadd nsz float [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[T2]] ; %t1 = fsub float %x, %y @@ -219,7 +219,7 @@ define <2 x float> @neg_op1_vec_poison(<2 x float> %x, <2 x float> %y) { define double @neg_ext_op1(float %a, double %b) { ; CHECK-LABEL: @neg_ext_op1( ; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double -; CHECK-NEXT: [[T3:%.*]] = fadd double [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = fadd double [[B:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[T3]] ; %t1 = fsub float -0.0, %a @@ -231,7 +231,7 @@ define double @neg_ext_op1(float %a, double %b) { define double @unary_neg_ext_op1(float %a, double %b) { ; CHECK-LABEL: @unary_neg_ext_op1( ; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double -; CHECK-NEXT: [[T3:%.*]] = fadd double [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = fadd double [[B:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[T3]] ; %t1 = fneg float %a @@ -245,7 +245,7 @@ define double @unary_neg_ext_op1(float %a, double %b) { define <2 x float> @neg_trunc_op1(<2 x double> %a, <2 x float> %b) { ; CHECK-LABEL: @neg_trunc_op1( ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc <2 x double> [[A:%.*]] to <2 x float> -; CHECK-NEXT: [[T3:%.*]] = fadd <2 x float> [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = fadd <2 x float> [[B:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x float> [[T3]] ; %t1 = fsub <2 x double> , %a @@ -257,7 +257,7 @@ define <2 x float> @neg_trunc_op1(<2 x double> %a, <2 x float> %b) { define <2 x float> @unary_neg_trunc_op1(<2 x double> %a, <2 x float> %b) { ; CHECK-LABEL: @unary_neg_trunc_op1( ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc <2 x double> [[A:%.*]] to <2 x float> -; CHECK-NEXT: [[T3:%.*]] = fadd <2 x float> [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = fadd <2 x float> [[B:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x float> [[T3]] ; %t1 = fneg <2 x double> %a @@ -271,7 +271,7 @@ define <2 x float> @unary_neg_trunc_op1(<2 x double> %a, <2 x float> %b) { define double @neg_ext_op1_fast(float %a, double %b) { ; CHECK-LABEL: @neg_ext_op1_fast( ; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double -; CHECK-NEXT: [[T3:%.*]] = fadd fast double [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = fadd fast double [[B:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[T3]] ; %t1 = fsub float -0.0, %a @@ -283,7 +283,7 @@ define double @neg_ext_op1_fast(float %a, double %b) { define double @unary_neg_ext_op1_fast(float %a, double %b) { ; CHECK-LABEL: @unary_neg_ext_op1_fast( ; CHECK-NEXT: [[TMP1:%.*]] = fpext float [[A:%.*]] to double -; CHECK-NEXT: [[T3:%.*]] = fadd fast double [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = fadd fast double [[B:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[T3]] ; %t1 = fneg float %a @@ -332,7 +332,7 @@ define float @neg_trunc_op1_extra_use(double %a, float %b) { ; CHECK-LABEL: @neg_trunc_op1_extra_use( ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[A:%.*]] to float ; CHECK-NEXT: [[T2:%.*]] = fneg float [[TMP1]] -; CHECK-NEXT: [[T3:%.*]] = fadd float [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = fadd float [[B:%.*]], [[TMP1]] ; CHECK-NEXT: call void @use(float [[T2]]) ; CHECK-NEXT: ret float [[T3]] ; @@ -347,7 +347,7 @@ define float @unary_neg_trunc_op1_extra_use(double %a, float %b) { ; CHECK-LABEL: @unary_neg_trunc_op1_extra_use( ; CHECK-NEXT: [[TMP1:%.*]] = fptrunc double [[A:%.*]] to float ; CHECK-NEXT: [[T2:%.*]] = fneg float [[TMP1]] -; CHECK-NEXT: [[T3:%.*]] = fadd float [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = fadd float [[B:%.*]], [[TMP1]] ; CHECK-NEXT: call void @use(float [[T2]]) ; CHECK-NEXT: ret float [[T3]] ; @@ -407,7 +407,7 @@ define float @PR37605(float %conv) { define double @fsub_fdiv_fneg1(double %x, double %y, double %z) { ; CHECK-LABEL: @fsub_fdiv_fneg1( ; CHECK-NEXT: [[TMP1:%.*]] = fdiv double [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[R:%.*]] = fadd double [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fadd double [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[R]] ; %neg = fsub double -0.000000e+00, %x @@ -419,7 +419,7 @@ define double @fsub_fdiv_fneg1(double %x, double %y, double %z) { define <2 x double> @fsub_fdiv_fneg2(<2 x double> %x, <2 x double> %y, <2 x double> %z) { ; CHECK-LABEL: @fsub_fdiv_fneg2( ; CHECK-NEXT: [[TMP1:%.*]] = fdiv <2 x double> [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[R:%.*]] = fadd <2 x double> [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fadd <2 x double> [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x double> [[R]] ; %neg = fsub <2 x double> , %x @@ -431,7 +431,7 @@ define <2 x double> @fsub_fdiv_fneg2(<2 x double> %x, <2 x double> %y, <2 x doub define double @fsub_fmul_fneg1(double %x, double %y, double %z) { ; CHECK-LABEL: @fsub_fmul_fneg1( ; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[R:%.*]] = fadd double [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fadd double [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[R]] ; %neg = fsub double -0.000000e+00, %x @@ -443,7 +443,7 @@ define double @fsub_fmul_fneg1(double %x, double %y, double %z) { define double @fsub_fmul_fneg2(double %x, double %y, double %z) { ; CHECK-LABEL: @fsub_fmul_fneg2( ; CHECK-NEXT: [[TMP1:%.*]] = fmul double [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[R:%.*]] = fadd double [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fadd double [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[R]] ; %neg = fsub double -0.000000e+00, %x @@ -487,7 +487,7 @@ declare void @use_vec(<2 x float>) define <2 x float> @fsub_fmul_fneg1_extra_use(<2 x float> %x, <2 x float> %y, <2 x float> %z) { ; CHECK-LABEL: @fsub_fmul_fneg1_extra_use( ; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[NEG]] ; CHECK-NEXT: call void @use_vec(<2 x float> [[MUL]]) ; CHECK-NEXT: [[R:%.*]] = fsub <2 x float> [[Z:%.*]], [[MUL]] ; CHECK-NEXT: ret <2 x float> [[R]] @@ -502,7 +502,7 @@ define <2 x float> @fsub_fmul_fneg1_extra_use(<2 x float> %x, <2 x float> %y, <2 define float @fsub_fmul_fneg2_extra_use(float %x, float %y, float %z) { ; CHECK-LABEL: @fsub_fmul_fneg2_extra_use( ; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[NEG]] ; CHECK-NEXT: call void @use(float [[MUL]]) ; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[MUL]] ; CHECK-NEXT: ret float [[R]] @@ -519,7 +519,7 @@ define float @fsub_fdiv_fneg1_extra_use2(float %x, float %y, float %z) { ; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]] ; CHECK-NEXT: call void @use(float [[NEG]]) ; CHECK-NEXT: [[TMP1:%.*]] = fdiv float [[X]], [[Y:%.*]] -; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fadd float [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[R]] ; %neg = fsub float -0.000000e+00, %x @@ -534,7 +534,7 @@ define float @fsub_fdiv_fneg2_extra_use2(float %x, float %y, float %z) { ; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]] ; CHECK-NEXT: call void @use(float [[NEG]]) ; CHECK-NEXT: [[TMP1:%.*]] = fdiv float [[Y:%.*]], [[X]] -; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fadd float [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[R]] ; %neg = fsub float -0.000000e+00, %x @@ -549,7 +549,7 @@ define <2 x float> @fsub_fmul_fneg1_extra_use2(<2 x float> %x, <2 x float> %y, < ; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]] ; CHECK-NEXT: call void @use_vec(<2 x float> [[NEG]]) ; CHECK-NEXT: [[TMP1:%.*]] = fmul <2 x float> [[X]], [[Y:%.*]] -; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fadd <2 x float> [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x float> [[R]] ; %neg = fsub <2 x float> , %x @@ -564,7 +564,7 @@ define float @fsub_fmul_fneg2_extra_use2(float %x, float %y, float %z) { ; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]] ; CHECK-NEXT: call void @use(float [[NEG]]) ; CHECK-NEXT: [[TMP1:%.*]] = fmul float [[X]], [[Y:%.*]] -; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = fadd float [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[R]] ; %neg = fsub float -0.000000e+00, %x @@ -612,7 +612,7 @@ define <2 x float> @fsub_fmul_fneg1_extra_use3(<2 x float> %x, <2 x float> %y, < ; CHECK-LABEL: @fsub_fmul_fneg1_extra_use3( ; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]] ; CHECK-NEXT: call void @use_vec(<2 x float> [[NEG]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul <2 x float> [[Y:%.*]], [[NEG]] ; CHECK-NEXT: call void @use_vec(<2 x float> [[MUL]]) ; CHECK-NEXT: [[R:%.*]] = fsub <2 x float> [[Z:%.*]], [[MUL]] ; CHECK-NEXT: ret <2 x float> [[R]] @@ -629,7 +629,7 @@ define float @fsub_fmul_fneg2_extra_use3(float %x, float %y, float %z) { ; CHECK-LABEL: @fsub_fmul_fneg2_extra_use3( ; CHECK-NEXT: [[NEG:%.*]] = fneg float [[X:%.*]] ; CHECK-NEXT: call void @use(float [[NEG]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul float [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul float [[Y:%.*]], [[NEG]] ; CHECK-NEXT: call void @use(float [[MUL]]) ; CHECK-NEXT: [[R:%.*]] = fsub float [[Z:%.*]], [[MUL]] ; CHECK-NEXT: ret float [[R]] @@ -805,7 +805,7 @@ define float @fsub_fadd_fsub_reassoc(float %w, float %x, float %y, float %z) { define <2 x float> @fsub_fadd_fsub_reassoc_commute(<2 x float> %w, <2 x float> %x, <2 x float> %y, <2 x float> %z) { ; CHECK-LABEL: @fsub_fadd_fsub_reassoc_commute( ; CHECK-NEXT: [[D:%.*]] = fdiv <2 x float> [[Y:%.*]], -; CHECK-NEXT: [[TMP1:%.*]] = fadd fast <2 x float> [[D]], [[W:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = fadd fast <2 x float> [[W:%.*]], [[D]] ; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <2 x float> [[X:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[S2:%.*]] = fsub fast <2 x float> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x float> [[S2]] @@ -823,7 +823,7 @@ define float @fsub_fadd_fsub_reassoc_twice(float %v, float %w, float %x, float % ; CHECK-LABEL: @fsub_fadd_fsub_reassoc_twice( ; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc nsz float [[W:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc nsz float [[X:%.*]], [[V:%.*]] -; CHECK-NEXT: [[TMP3:%.*]] = fadd reassoc nsz float [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = fadd reassoc nsz float [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[S3:%.*]] = fsub reassoc nsz float [[TMP2]], [[TMP3]] ; CHECK-NEXT: ret float [[S3]] ; diff --git a/llvm/test/Transforms/InstCombine/funnel.ll b/llvm/test/Transforms/InstCombine/funnel.ll index a54e6e4642b75..fa0d59b226998 100644 --- a/llvm/test/Transforms/InstCombine/funnel.ll +++ b/llvm/test/Transforms/InstCombine/funnel.ll @@ -464,10 +464,10 @@ define i32 @fshl_concat_i8_i8_different_slot(i8 %x, i8 %y, ptr %addr) { define i32 @fshl_concat_unknown_source(i32 %zext.x, i32 %zext.y, ptr %addr) { ; CHECK-LABEL: @fshl_concat_unknown_source( ; CHECK-NEXT: [[SLX:%.*]] = shl i32 [[ZEXT_X:%.*]], 16 -; CHECK-NEXT: [[XY:%.*]] = or i32 [[SLX]], [[ZEXT_Y:%.*]] +; CHECK-NEXT: [[XY:%.*]] = or i32 [[ZEXT_Y:%.*]], [[SLX]] ; CHECK-NEXT: store i32 [[XY]], ptr [[ADDR:%.*]], align 4 ; CHECK-NEXT: [[SLY:%.*]] = shl i32 [[ZEXT_Y]], 16 -; CHECK-NEXT: [[YX:%.*]] = or i32 [[SLY]], [[ZEXT_X]] +; CHECK-NEXT: [[YX:%.*]] = or i32 [[ZEXT_X]], [[SLY]] ; CHECK-NEXT: ret i32 [[YX]] ; %slx = shl i32 %zext.x, 16 diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll index a9addfcb182f7..c805a64d5cd07 100644 --- a/llvm/test/Transforms/InstCombine/getelementptr.ll +++ b/llvm/test/Transforms/InstCombine/getelementptr.ll @@ -269,7 +269,7 @@ define <2 x i1> @test13_fixed_scalable(i64 %X, ptr %P, <2 x i64> %y) nounwind { ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 4 ; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i64 0 ; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i64> [[DOTSPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer -; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw <2 x i64> [[DOTSPLAT]], [[Y:%.*]] +; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw <2 x i64> [[Y:%.*]], [[DOTSPLAT]] ; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i64> [[A_IDX]], [[B_IDX]] ; CHECK-NEXT: ret <2 x i1> [[C]] ; @@ -288,7 +288,7 @@ define @test13_scalable_scalable(i64 %X, ptr %P, poison, i64 [[TMP2]], i64 0 ; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector [[DOTSPLATINSERT1]], poison, zeroinitializer -; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw [[DOTSPLAT2]], [[Y:%.*]] +; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw [[Y:%.*]], [[DOTSPLAT2]] ; CHECK-NEXT: [[C:%.*]] = icmp eq [[A_IDX]], [[B_IDX]] ; CHECK-NEXT: ret [[C]] ; diff --git a/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll b/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll index e4cae13519783..6049997db4d1a 100644 --- a/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll +++ b/llvm/test/Transforms/InstCombine/hoist-negation-out-of-bias-calculation.ll @@ -15,7 +15,7 @@ define i8 @t0(i8 %x, i8 %y) { ; CHECK-LABEL: @t0( ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[NEGBIAS:%.*]] = sub i8 0, [[TMP2]] ; CHECK-NEXT: ret i8 [[NEGBIAS]] ; @@ -45,7 +45,7 @@ define i8 @t1_commutative(i8 %y) { define <2 x i8> @t2_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @t2_vec( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[NEGBIAS:%.*]] = sub <2 x i8> zeroinitializer, [[TMP2]] ; CHECK-NEXT: ret <2 x i8> [[NEGBIAS]] ; @@ -58,7 +58,7 @@ define <2 x i8> @t2_vec(<2 x i8> %x, <2 x i8> %y) { define <2 x i8> @t3_vec_poison(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @t3_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[NEGBIAS:%.*]] = sub <2 x i8> zeroinitializer, [[TMP2]] ; CHECK-NEXT: ret <2 x i8> [[NEGBIAS]] ; @@ -76,7 +76,7 @@ define i8 @n4_extrause0(i8 %x, i8 %y) { ; CHECK-LABEL: @n4_extrause0( ; CHECK-NEXT: [[NEGY:%.*]] = sub i8 0, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[NEGY]]) -; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[NEGY]], [[X:%.*]] +; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[X:%.*]], [[NEGY]] ; CHECK-NEXT: [[NEGBIAS:%.*]] = sub i8 [[UNBIASEDX]], [[X]] ; CHECK-NEXT: ret i8 [[NEGBIAS]] ; @@ -89,7 +89,7 @@ define i8 @n4_extrause0(i8 %x, i8 %y) { define i8 @n5_extrause1(i8 %x, i8 %y) { ; CHECK-LABEL: @n5_extrause1( ; CHECK-NEXT: [[NEGY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[NEGY]], [[X:%.*]] +; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[X:%.*]], [[NEGY]] ; CHECK-NEXT: call void @use8(i8 [[UNBIASEDX]]) ; CHECK-NEXT: [[NEGBIAS:%.*]] = sub i8 [[UNBIASEDX]], [[X]] ; CHECK-NEXT: ret i8 [[NEGBIAS]] @@ -104,7 +104,7 @@ define i8 @n6_extrause2(i8 %x, i8 %y) { ; CHECK-LABEL: @n6_extrause2( ; CHECK-NEXT: [[NEGY:%.*]] = sub i8 0, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[NEGY]]) -; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[NEGY]], [[X:%.*]] +; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[X:%.*]], [[NEGY]] ; CHECK-NEXT: call void @use8(i8 [[UNBIASEDX]]) ; CHECK-NEXT: [[NEGBIAS:%.*]] = sub i8 [[UNBIASEDX]], [[X]] ; CHECK-NEXT: ret i8 [[NEGBIAS]] @@ -122,7 +122,7 @@ define i8 @n6_extrause2(i8 %x, i8 %y) { define i8 @n7(i8 %x, i8 %y) { ; CHECK-LABEL: @n7( ; CHECK-NEXT: [[NEGY_NOT:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[NEGBIAS:%.*]] = and i8 [[NEGY_NOT]], [[X:%.*]] +; CHECK-NEXT: [[NEGBIAS:%.*]] = and i8 [[X:%.*]], [[NEGY_NOT]] ; CHECK-NEXT: ret i8 [[NEGBIAS]] ; %negy = sub i8 0, %y @@ -147,7 +147,7 @@ define i8 @n8(i8 %x, i8 %y) { define i8 @n9(i8 %x0, i8 %x1, i8 %y) { ; CHECK-LABEL: @n9( ; CHECK-NEXT: [[NEGY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[NEGY]], [[X1:%.*]] +; CHECK-NEXT: [[UNBIASEDX:%.*]] = and i8 [[X1:%.*]], [[NEGY]] ; CHECK-NEXT: [[NEGBIAS:%.*]] = sub i8 [[UNBIASEDX]], [[X0:%.*]] ; CHECK-NEXT: ret i8 [[NEGBIAS]] ; diff --git a/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll b/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll index 200e7ba8e6773..f92b10b0ccb37 100644 --- a/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll +++ b/llvm/test/Transforms/InstCombine/hoist-xor-by-constant-from-xor-by-value.ll @@ -64,7 +64,7 @@ define i8 @t4_extrause(i8 %x, i8 %y) { define i8 @t5_commutativity(i8 %x) { ; CHECK-LABEL: @t5_commutativity( ; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8() -; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = xor i8 [[TMP1]], 42 ; CHECK-NEXT: ret i8 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/icmp-add.ll b/llvm/test/Transforms/InstCombine/icmp-add.ll index 2ceb44b89eb9e..0c141d4b8e73a 100644 --- a/llvm/test/Transforms/InstCombine/icmp-add.ll +++ b/llvm/test/Transforms/InstCombine/icmp-add.ll @@ -207,7 +207,7 @@ define i1 @cvt_icmp_neg_1_sext_plus_zext_eq(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @cvt_icmp_neg_1_sext_plus_zext_eq( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true -; CHECK-NEXT: [[T:%.*]] = and i1 [[TMP0]], [[ARG:%.*]] +; CHECK-NEXT: [[T:%.*]] = and i1 [[ARG:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[T]] ; bb: @@ -237,7 +237,7 @@ define i1 @cvt_icmp_1_sext_plus_zext_eq(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @cvt_icmp_1_sext_plus_zext_eq( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[T:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[T:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[T]] ; bb: @@ -458,7 +458,7 @@ define i1 @cvt_icmp_neg_1_sext_plus_zext_ne(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @cvt_icmp_neg_1_sext_plus_zext_ne( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[T:%.*]] = or i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[T:%.*]] = or i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[T]] ; bb: @@ -487,7 +487,7 @@ define i1 @cvt_icmp_1_sext_plus_zext_ne(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @cvt_icmp_1_sext_plus_zext_ne( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true -; CHECK-NEXT: [[T:%.*]] = or i1 [[TMP0]], [[ARG:%.*]] +; CHECK-NEXT: [[T:%.*]] = or i1 [[ARG:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[T]] ; bb: @@ -557,7 +557,7 @@ define i1 @cvt_icmp_neg_1_zext_plus_sext_eq(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @cvt_icmp_neg_1_zext_plus_sext_eq( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[T:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[T:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[T]] ; bb: @@ -587,7 +587,7 @@ define i1 @cvt_icmp_1_zext_plus_sext_eq(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @cvt_icmp_1_zext_plus_sext_eq( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true -; CHECK-NEXT: [[T:%.*]] = and i1 [[TMP0]], [[ARG:%.*]] +; CHECK-NEXT: [[T:%.*]] = and i1 [[ARG:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[T]] ; bb: @@ -628,7 +628,7 @@ define i1 @cvt_icmp_neg_1_zext_plus_sext_ne(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @cvt_icmp_neg_1_zext_plus_sext_ne( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true -; CHECK-NEXT: [[T:%.*]] = or i1 [[TMP0]], [[ARG:%.*]] +; CHECK-NEXT: [[T:%.*]] = or i1 [[ARG:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[T]] ; bb: @@ -657,7 +657,7 @@ define i1 @cvt_icmp_1_zext_plus_sext_ne(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @cvt_icmp_1_zext_plus_sext_ne( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[T:%.*]] = or i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[T:%.*]] = or i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[T]] ; bb: @@ -849,7 +849,7 @@ define i1 @test_sext_zext_cvt_neg_2_ult_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_sext_zext_cvt_neg_2_ult_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[ARG_NOT:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG_NOT]], [[ARG1:%.*]] +; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[ARG_NOT]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -864,7 +864,7 @@ define i1 @test_sext_zext_cvt_neg_1_ult_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_sext_zext_cvt_neg_1_ult_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = or i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -892,7 +892,7 @@ define i1 @test_sext_zext_cvt_2_ult_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_sext_zext_cvt_2_ult_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[ARG_NOT:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG_NOT]], [[ARG1:%.*]] +; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[ARG_NOT]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -907,7 +907,7 @@ define i1 @test_zext_sext_cvt_neg_1_ult_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_zext_sext_cvt_neg_1_ult_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = or i1 [[TMP0]], [[ARG:%.*]] +; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -1047,7 +1047,7 @@ define i1 @test_zext_sext_cvt_neg_2_ugt_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_zext_sext_cvt_neg_2_ugt_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[I4:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -1089,7 +1089,7 @@ define i1 @test_zext_sext_cvt_1_ugt_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_zext_sext_cvt_1_ugt_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[TMP1]] ; bb: @@ -1104,7 +1104,7 @@ define i1 @test_zext_sext_cvt_2_ugt_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_zext_sext_cvt_2_ugt_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[I4:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -1256,7 +1256,7 @@ define i1 @test_zext_sext_cvt_neg_1_sgt_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_zext_sext_cvt_neg_1_sgt_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[ARG1_NOT:%.*]] = xor i1 [[ARG1:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1_NOT]], [[ARG:%.*]] +; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG:%.*]], [[ARG1_NOT]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -1271,7 +1271,7 @@ define i1 @test_zext_sext_cvt_0_sgt_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_zext_sext_cvt_0_sgt_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = and i1 [[TMP0]], [[ARG:%.*]] +; CHECK-NEXT: [[I4:%.*]] = and i1 [[ARG:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -1420,7 +1420,7 @@ define i1 @test_zext_sext_cvt_0_slt_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_zext_sext_cvt_0_slt_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[TMP1]] ; bb: @@ -1435,7 +1435,7 @@ define i1 @test_zext_sext_cvt_1_slt_icmp(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_zext_sext_cvt_1_slt_icmp( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = or i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -1617,7 +1617,7 @@ define i1 @test_cvt_icmp19(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_cvt_icmp19( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = and i1 [[TMP0]], [[ARG:%.*]] +; CHECK-NEXT: [[I4:%.*]] = and i1 [[ARG:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -1646,7 +1646,7 @@ define i1 @test_cvt_icmp21(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_cvt_icmp21( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[ARG_NOT:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG_NOT]], [[ARG1:%.*]] +; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[ARG_NOT]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -1661,7 +1661,7 @@ define i1 @test_cvt_icmp22(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_cvt_icmp22( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = or i1 [[TMP0]], [[ARG1:%.*]] +; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -1689,7 +1689,7 @@ define i1 @test_cvt_icmp24(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_cvt_icmp24( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[ARG_NOT:%.*]] = xor i1 [[ARG:%.*]], true -; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG_NOT]], [[ARG1:%.*]] +; CHECK-NEXT: [[I4:%.*]] = or i1 [[ARG1:%.*]], [[ARG_NOT]] ; CHECK-NEXT: ret i1 [[I4]] ; bb: @@ -1704,7 +1704,7 @@ define i1 @test_cvt_icmp25(i1 %arg, i1 %arg1) { ; CHECK-LABEL: @test_cvt_icmp25( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[ARG1:%.*]], true -; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[TMP0]], [[ARG:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[ARG:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[TMP1]] ; bb: @@ -2390,7 +2390,7 @@ define <2 x i1> @icmp_eq_add_non_splat2(<2 x i32> %a) { define i1 @without_nsw_nuw(i8 %x, i8 %y) { ; CHECK-LABEL: @without_nsw_nuw( ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X:%.*]], 2 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[TOBOOL]] ; %t1 = add i8 %x, 37 @@ -2402,7 +2402,7 @@ define i1 @without_nsw_nuw(i8 %x, i8 %y) { define i1 @with_nsw_nuw(i8 %x, i8 %y) { ; CHECK-LABEL: @with_nsw_nuw( ; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i8 [[X:%.*]], 2 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[TOBOOL]] ; %t1 = add nsw nuw i8 %x, 37 @@ -2414,7 +2414,7 @@ define i1 @with_nsw_nuw(i8 %x, i8 %y) { define i1 @with_nsw_large(i8 %x, i8 %y) { ; CHECK-LABEL: @with_nsw_large( ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i8 [[X:%.*]], 2 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[TOBOOL]] ; %t1 = add nsw i8 %x, 37 @@ -2438,7 +2438,7 @@ define i1 @with_nsw_small(i8 %x, i8 %y) { define i1 @with_nuw_large(i8 %x, i8 %y) { ; CHECK-LABEL: @with_nuw_large( ; CHECK-NEXT: [[TMP1:%.*]] = add nuw i8 [[X:%.*]], 2 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[TOBOOL]] ; %t1 = add nuw i8 %x, 37 @@ -2462,7 +2462,7 @@ define i1 @with_nuw_small(i8 %x, i8 %y) { define i1 @with_nuw_large_negative(i8 %x, i8 %y) { ; CHECK-LABEL: @with_nuw_large_negative( ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X:%.*]], -2 -; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[TOBOOL]] ; %t1 = add nuw i8 %x, -37 @@ -2751,7 +2751,7 @@ define i32 @decrement_min(i32 %x) { define i1 @icmp_add_add_C(i32 %a, i32 %b) { ; CHECK-LABEL: @icmp_add_add_C( ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[B:%.*]] -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %add1 = add i32 %a, %b @@ -2763,7 +2763,7 @@ define i1 @icmp_add_add_C(i32 %a, i32 %b) { define i1 @icmp_add_add_C_pred(i32 %a, i32 %b) { ; CHECK-LABEL: @icmp_add_add_C_pred( ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[B:%.*]] -; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ule i32 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %add1 = add i32 %a, %b @@ -2837,7 +2837,7 @@ define <2 x i1> @icmp_add_add_C_vector_undef(<2 x i8> %a, <2 x i8> %b) { define i1 @icmp_add_add_C_comm1(i32 %a, i32 %b) { ; CHECK-LABEL: @icmp_add_add_C_comm1( ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[B:%.*]] -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %add1 = add i32 %b, %a @@ -2923,7 +2923,7 @@ define i1 @icmp_add_add_C_extra_use2(i32 %a, i32 %b) { ; CHECK-NEXT: [[ADD1:%.*]] = add i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: call void @use(i32 [[ADD1]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i32 0, [[B]] -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], [[A]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[A]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %add1 = add i32 %a, %b diff --git a/llvm/test/Transforms/InstCombine/icmp-and-add-sub-xor-p2.ll b/llvm/test/Transforms/InstCombine/icmp-and-add-sub-xor-p2.ll index c8a3dfcd68cd4..711d59c1ebfd5 100644 --- a/llvm/test/Transforms/InstCombine/icmp-and-add-sub-xor-p2.ll +++ b/llvm/test/Transforms/InstCombine/icmp-and-add-sub-xor-p2.ll @@ -6,10 +6,10 @@ declare void @use.v2i8(<2 x i8>) define i1 @src_add_eq_p2(i8 %x, i8 %yy) { ; CHECK-LABEL: @src_add_eq_p2( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[YY:%.*]] -; CHECK-NEXT: [[Y:%.*]] = and i8 [[NY]], [[YY]] +; CHECK-NEXT: [[Y:%.*]] = and i8 [[YY]], [[NY]] ; CHECK-NEXT: [[X1:%.*]] = add i8 [[Y]], [[X:%.*]] ; CHECK-NEXT: call void @use.i8(i8 [[X1]]) -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[Y]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -25,8 +25,8 @@ define i1 @src_add_eq_p2(i8 %x, i8 %yy) { define i1 @src_add_eq_p2_fail_multiuse(i8 %x, i8 %yy) { ; CHECK-LABEL: @src_add_eq_p2_fail_multiuse( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[YY:%.*]] -; CHECK-NEXT: [[Y:%.*]] = and i8 [[NY]], [[YY]] -; CHECK-NEXT: [[X1:%.*]] = add i8 [[Y]], [[X:%.*]] +; CHECK-NEXT: [[Y:%.*]] = and i8 [[YY]], [[NY]] +; CHECK-NEXT: [[X1:%.*]] = add i8 [[X:%.*]], [[Y]] ; CHECK-NEXT: call void @use.i8(i8 [[X1]]) ; CHECK-NEXT: [[V:%.*]] = and i8 [[X1]], [[Y]] ; CHECK-NEXT: call void @use.i8(i8 [[V]]) @@ -46,10 +46,10 @@ define i1 @src_add_eq_p2_fail_multiuse(i8 %x, i8 %yy) { define i1 @src_xor_ne_zero(i8 %x, i8 %yy) { ; CHECK-LABEL: @src_xor_ne_zero( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[YY:%.*]] -; CHECK-NEXT: [[Y:%.*]] = and i8 [[NY]], [[YY]] -; CHECK-NEXT: [[X1:%.*]] = xor i8 [[Y]], [[X:%.*]] +; CHECK-NEXT: [[Y:%.*]] = and i8 [[YY]], [[NY]] +; CHECK-NEXT: [[X1:%.*]] = xor i8 [[X:%.*]], [[Y]] ; CHECK-NEXT: call void @use.i8(i8 [[X1]]) -; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[Y]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP1]], [[Y]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -65,9 +65,9 @@ define i1 @src_xor_ne_zero(i8 %x, i8 %yy) { define i1 @src_xor_ne_zero_fail_different_p2(i8 %x, i8 %yy) { ; CHECK-LABEL: @src_xor_ne_zero_fail_different_p2( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[YY:%.*]] -; CHECK-NEXT: [[Y:%.*]] = and i8 [[NY]], [[YY]] +; CHECK-NEXT: [[Y:%.*]] = and i8 [[YY]], [[NY]] ; CHECK-NEXT: [[Y2:%.*]] = shl i8 [[Y]], 1 -; CHECK-NEXT: [[X1:%.*]] = xor i8 [[Y]], [[X:%.*]] +; CHECK-NEXT: [[X1:%.*]] = xor i8 [[X:%.*]], [[Y]] ; CHECK-NEXT: call void @use.i8(i8 [[X1]]) ; CHECK-NEXT: [[V:%.*]] = and i8 [[X1]], [[Y2]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[V]], 0 @@ -86,10 +86,10 @@ define i1 @src_xor_ne_zero_fail_different_p2(i8 %x, i8 %yy) { define <2 x i1> @src_sub_ne_p2(<2 x i8> %x, <2 x i8> %yy) { ; CHECK-LABEL: @src_sub_ne_p2( ; CHECK-NEXT: [[NY:%.*]] = sub <2 x i8> zeroinitializer, [[YY:%.*]] -; CHECK-NEXT: [[Y:%.*]] = and <2 x i8> [[NY]], [[YY]] +; CHECK-NEXT: [[Y:%.*]] = and <2 x i8> [[YY]], [[NY]] ; CHECK-NEXT: [[X1:%.*]] = sub <2 x i8> [[X:%.*]], [[Y]] ; CHECK-NEXT: call void @use.v2i8(<2 x i8> [[X1]]) -; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[Y]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp ne <2 x i8> [[TMP1]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[R]] ; @@ -107,7 +107,7 @@ define <2 x i1> @src_sub_eq_zero(<2 x i8> %x, <2 x i8> %yy) { ; CHECK-NEXT: [[Y:%.*]] = shl <2 x i8> , [[YY:%.*]] ; CHECK-NEXT: [[X1:%.*]] = sub <2 x i8> [[X:%.*]], [[Y]] ; CHECK-NEXT: call void @use.v2i8(<2 x i8> [[X1]]) -; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[Y]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i8> [[X]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp eq <2 x i8> [[TMP1]], [[Y]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll index 0aace5f52c96c..46fd96193909d 100644 --- a/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll +++ b/llvm/test/Transforms/InstCombine/icmp-and-lowbit-mask.ll @@ -137,7 +137,7 @@ define i1 @src_is_mask_or(i8 %x_in, i8 %y) { define i1 @src_is_mask_xor(i8 %x_in, i8 %y) { ; CHECK-LABEL: @src_is_mask_xor( ; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[MASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[MASK:%.*]] = xor i8 [[Y]], [[Y_M1]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123 ; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[MASK]] ; CHECK-NEXT: ret i1 [[R]] @@ -153,7 +153,7 @@ define i1 @src_is_mask_xor(i8 %x_in, i8 %y) { define i1 @src_is_mask_xor_fail_notmask(i8 %x_in, i8 %y) { ; CHECK-LABEL: @src_is_mask_xor_fail_notmask( ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[NOTMASK:%.*]] = xor i8 [[TMP1]], [[Y]] +; CHECK-NEXT: [[NOTMASK:%.*]] = xor i8 [[Y]], [[TMP1]] ; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[X_IN:%.*]], -124 ; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[NOTMASK]], [[TMP2]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP3]], -1 @@ -171,7 +171,7 @@ define i1 @src_is_mask_xor_fail_notmask(i8 %x_in, i8 %y) { define i1 @src_is_mask_select(i8 %x_in, i8 %y, i1 %cond) { ; CHECK-LABEL: @src_is_mask_select( ; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]] ; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15 ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123 ; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[MASK]] @@ -191,7 +191,7 @@ define i1 @src_is_mask_select_fail_wrong_pattern(i8 %x_in, i8 %y, i1 %cond, i8 % ; CHECK-LABEL: @src_is_mask_select_fail_wrong_pattern( ; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 ; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]] ; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15 ; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[AND]], [[Z:%.*]] @@ -246,7 +246,7 @@ define i1 @src_is_mask_shl_lshr_fail_not_allones(i8 %x_in, i8 %y, i1 %cond) { define i1 @src_is_mask_lshr(i8 %x_in, i8 %y, i8 %z, i1 %cond) { ; CHECK-LABEL: @src_is_mask_lshr( ; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]] ; CHECK-NEXT: [[SMASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15 ; CHECK-NEXT: [[MASK:%.*]] = lshr i8 [[SMASK]], [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123 @@ -266,7 +266,7 @@ define i1 @src_is_mask_lshr(i8 %x_in, i8 %y, i8 %z, i1 %cond) { define i1 @src_is_mask_ashr(i8 %x_in, i8 %y, i8 %z, i1 %cond) { ; CHECK-LABEL: @src_is_mask_ashr( ; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]] ; CHECK-NEXT: [[SMASK:%.*]] = select i1 [[COND:%.*]], i8 [[YMASK]], i8 15 ; CHECK-NEXT: [[MASK:%.*]] = ashr i8 [[SMASK]], [[Z:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123 @@ -302,7 +302,7 @@ define i1 @src_is_mask_p2_m1(i8 %x_in, i8 %y) { define i1 @src_is_mask_umax(i8 %x_in, i8 %y) { ; CHECK-LABEL: @src_is_mask_umax( ; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]] ; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umax.i8(i8 [[YMASK]], i8 3) ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123 ; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[MASK]] @@ -321,7 +321,7 @@ define i1 @src_is_mask_umax(i8 %x_in, i8 %y) { define i1 @src_is_mask_umin(i8 %x_in, i8 %y, i8 %z) { ; CHECK-LABEL: @src_is_mask_umin( ; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]] ; CHECK-NEXT: [[ZMASK:%.*]] = lshr i8 15, [[Z:%.*]] ; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umin.i8(i8 [[YMASK]], i8 [[ZMASK]]) ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123 @@ -342,7 +342,7 @@ define i1 @src_is_mask_umin(i8 %x_in, i8 %y, i8 %z) { define i1 @src_is_mask_umin_fail_mismatch(i8 %x_in, i8 %y) { ; CHECK-LABEL: @src_is_mask_umin_fail_mismatch( ; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]] ; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.umin.i8(i8 [[YMASK]], i8 -32) ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], -124 ; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[MASK]], [[TMP1]] @@ -362,7 +362,7 @@ define i1 @src_is_mask_umin_fail_mismatch(i8 %x_in, i8 %y) { define i1 @src_is_mask_smax(i8 %x_in, i8 %y) { ; CHECK-LABEL: @src_is_mask_smax( ; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]] ; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.smax.i8(i8 [[YMASK]], i8 -1) ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123 ; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP1]], [[MASK]] @@ -381,7 +381,7 @@ define i1 @src_is_mask_smax(i8 %x_in, i8 %y) { define i1 @src_is_mask_smin(i8 %x_in, i8 %y) { ; CHECK-LABEL: @src_is_mask_smin( ; CHECK-NEXT: [[Y_M1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y_M1]], [[Y]] +; CHECK-NEXT: [[YMASK:%.*]] = xor i8 [[Y]], [[Y_M1]] ; CHECK-NEXT: [[MASK:%.*]] = call i8 @llvm.smin.i8(i8 [[YMASK]], i8 0) ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], 123 ; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP1]], [[MASK]] @@ -456,7 +456,7 @@ define i1 @src_is_notmask_x_xor_neg_x(i8 %x_in, i8 %y, i1 %cond) { ; CHECK-LABEL: @src_is_notmask_x_xor_neg_x( ; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[TMP1]], [[Y]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[Y]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[TMP2]], i8 7 ; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[TMP3]] ; CHECK-NEXT: ret i1 [[R]] @@ -474,7 +474,7 @@ define i1 @src_is_notmask_x_xor_neg_x_inv(i8 %x_in, i8 %y, i1 %cond) { ; CHECK-LABEL: @src_is_notmask_x_xor_neg_x_inv( ; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[TMP1]], [[Y]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i8 [[Y]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = select i1 [[COND:%.*]], i8 [[TMP2]], i8 7 ; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X]], [[TMP3]] ; CHECK-NEXT: ret i1 [[R]] @@ -590,7 +590,7 @@ define i1 @src_is_notmask_neg_p2_fail_not_invertable(i8 %x_in, i8 %y) { ; CHECK-LABEL: @src_is_notmask_neg_p2_fail_not_invertable( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X_IN:%.*]], -124 ; CHECK-NEXT: [[TMP2:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[TMP2]], [[Y]] +; CHECK-NEXT: [[TMP3:%.*]] = or i8 [[Y]], [[TMP2]] ; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[TMP3]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -607,7 +607,7 @@ define i1 @src_is_notmask_xor_fail(i8 %x_in, i8 %y) { ; CHECK-LABEL: @src_is_notmask_xor_fail( ; CHECK-NEXT: [[X:%.*]] = xor i8 [[X_IN:%.*]], 123 ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[NOTMASK_REV:%.*]] = xor i8 [[TMP1]], [[Y]] +; CHECK-NEXT: [[NOTMASK_REV:%.*]] = xor i8 [[Y]], [[TMP1]] ; CHECK-NEXT: [[NOTMASK:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[NOTMASK_REV]]) ; CHECK-NEXT: [[AND:%.*]] = and i8 [[X]], [[NOTMASK]] ; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[AND]], [[X]] @@ -675,7 +675,7 @@ define i1 @src_x_and_mask_slt(i8 %x, i8 %y, i1 %cond) { ; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0 ; CHECK-NEXT: [[MASK_POS:%.*]] = icmp sgt i8 [[MASK]], -1 ; CHECK-NEXT: call void @llvm.assume(i1 [[MASK_POS]]) -; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[MASK]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[X:%.*]], [[MASK]] ; CHECK-NEXT: ret i1 [[R]] ; %mask0 = lshr i8 -1, %y @@ -693,7 +693,7 @@ define i1 @src_x_and_mask_sge(i8 %x, i8 %y, i1 %cond) { ; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0 ; CHECK-NEXT: [[MASK_POS:%.*]] = icmp sgt i8 [[MASK]], -1 ; CHECK-NEXT: call void @llvm.assume(i1 [[MASK_POS]]) -; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[MASK]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[X:%.*]], [[MASK]] ; CHECK-NEXT: ret i1 [[R]] ; %mask0 = lshr i8 -1, %y @@ -709,7 +709,7 @@ define i1 @src_x_and_mask_slt_fail_maybe_neg(i8 %x, i8 %y, i1 %cond) { ; CHECK-LABEL: @src_x_and_mask_slt_fail_maybe_neg( ; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[MASK]] ; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[AND]], [[X]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -724,7 +724,7 @@ define i1 @src_x_and_mask_sge_fail_maybe_neg(i8 %x, i8 %y, i1 %cond) { ; CHECK-LABEL: @src_x_and_mask_sge_fail_maybe_neg( ; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[MASK]], [[X:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[MASK]] ; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[AND]], [[X]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -821,7 +821,7 @@ define i1 @src_x_and_nmask_slt_fail_maybe_z(i8 %x, i8 %y, i1 %cond) { ; CHECK-LABEL: @src_x_and_nmask_slt_fail_maybe_z( ; CHECK-NEXT: [[NOT_MASK0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[NOT_MASK:%.*]] = select i1 [[COND:%.*]], i8 [[NOT_MASK0]], i8 0 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[NOT_MASK]], [[X:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[NOT_MASK]] ; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[AND]], [[NOT_MASK]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -836,7 +836,7 @@ define i1 @src_x_and_nmask_sge_fail_maybe_z(i8 %x, i8 %y, i1 %cond) { ; CHECK-LABEL: @src_x_and_nmask_sge_fail_maybe_z( ; CHECK-NEXT: [[NOT_MASK0:%.*]] = shl nsw i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[NOT_MASK:%.*]] = select i1 [[COND:%.*]], i8 [[NOT_MASK0]], i8 0 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[NOT_MASK]], [[X:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[NOT_MASK]] ; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[AND]], [[NOT_MASK]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -874,7 +874,7 @@ define i1 @src_x_or_mask_ne(i8 %x, i8 %y, i1 %cond) { ; CHECK-LABEL: @src_x_or_mask_ne( ; CHECK-NEXT: [[MASK0:%.*]] = lshr i8 -1, [[Y:%.*]] ; CHECK-NEXT: [[MASK:%.*]] = select i1 [[COND:%.*]], i8 [[MASK0]], i8 0 -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[MASK]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[MASK]] ; CHECK-NEXT: ret i1 [[R]] ; %mask0 = lshr i8 -1, %y diff --git a/llvm/test/Transforms/InstCombine/icmp-and-shift.ll b/llvm/test/Transforms/InstCombine/icmp-and-shift.ll index 2f797d726afe3..684ece21b1166 100644 --- a/llvm/test/Transforms/InstCombine/icmp-and-shift.ll +++ b/llvm/test/Transforms/InstCombine/icmp-and-shift.ll @@ -496,7 +496,7 @@ define i1 @eq_and_lshr_minval_commute(i8 %px, i8 %y) { define i1 @eq_and_shl_two(i8 %x, i8 %y) { ; CHECK-LABEL: @eq_and_shl_two( ; CHECK-NEXT: [[POW2_OR_ZERO:%.*]] = shl i8 2, [[Y:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i8 [[POW2_OR_ZERO]], [[X:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[POW2_OR_ZERO]] ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[AND]], [[POW2_OR_ZERO]] ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -510,7 +510,7 @@ define i1 @eq_and_shl_two(i8 %x, i8 %y) { define i1 @slt_and_shl_one(i8 %x, i8 %y) { ; CHECK-LABEL: @slt_and_shl_one( ; CHECK-NEXT: [[POW2:%.*]] = shl nuw i8 1, [[Y:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i8 [[POW2]], [[X:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[POW2]] ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[AND]], [[POW2]] ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -609,7 +609,7 @@ define i1 @fold_ne_rhs_fail_shift_not_1s(i8 %x, i8 %yy) { define i1 @test_shr_and_1_ne_0(i32 %a, i32 %b) { ; CHECK-LABEL: @test_shr_and_1_ne_0( ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 1, [[B:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -710,7 +710,7 @@ define i1 @test_const_shr_and_1_ne_0_multi_use_lshr_negative(i32 %b) { ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 42, [[B:%.*]] ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], 1 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[AND]], 0 -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[SHR]], [[B]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[B]], [[SHR]] ; CHECK-NEXT: [[RET:%.*]] = and i1 [[CMP1]], [[CMP2]] ; CHECK-NEXT: ret i1 [[RET]] ; @@ -727,7 +727,7 @@ define i1 @test_const_shr_and_1_ne_0_multi_use_and_negative(i32 %b) { ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 42, [[B:%.*]] ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], 1 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[AND]], 0 -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[AND]], [[B]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[B]], [[AND]] ; CHECK-NEXT: [[RET:%.*]] = and i1 [[CMP1]], [[CMP2]] ; CHECK-NEXT: ret i1 [[RET]] ; diff --git a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll index a595ddb07db56..76f8c926e9bec 100644 --- a/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll +++ b/llvm/test/Transforms/InstCombine/icmp-custom-dl.ll @@ -102,7 +102,7 @@ define i1 @test60_addrspacecast_larger(ptr addrspace(1) %foo, i32 %i, i16 %j) { ; CHECK-LABEL: @test60_addrspacecast_larger( ; CHECK-NEXT: [[I_TR:%.*]] = trunc i32 [[I:%.*]] to i16 ; CHECK-NEXT: [[TMP1:%.*]] = shl i16 [[I_TR]], 2 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[TMP1]], [[J:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i16 [[J:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %bit = addrspacecast ptr addrspace(1) %foo to ptr addrspace(2) diff --git a/llvm/test/Transforms/InstCombine/icmp-equality-rotate.ll b/llvm/test/Transforms/InstCombine/icmp-equality-rotate.ll index 30c97a7f25275..154958b0e3fad 100644 --- a/llvm/test/Transforms/InstCombine/icmp-equality-rotate.ll +++ b/llvm/test/Transforms/InstCombine/icmp-equality-rotate.ll @@ -7,7 +7,7 @@ declare void @use.i8(i8) define i1 @cmpeq_rorr_to_rorl(i8 %x, i8 %C) { ; CHECK-LABEL: @cmpeq_rorr_to_rorl( ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[C:%.*]]) -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %x_rorr = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 %C) @@ -65,7 +65,7 @@ define i1 @cmpne_rorr_rorr(i8 %x, i8 %C0, i8 %C1) { ; CHECK-LABEL: @cmpne_rorr_rorr( ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 [[C0:%.*]], [[C1:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[TMP1]]) -; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP2]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X]], [[TMP2]] ; CHECK-NEXT: ret i1 [[R]] ; %x_rorr0 = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 %C0) @@ -78,7 +78,7 @@ define i1 @cmpne_rorrX_rorrY(i8 %x, i8 %y, i8 %C0, i8 %C1) { ; CHECK-LABEL: @cmpne_rorrX_rorrY( ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 [[C0:%.*]], [[C1:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.fshr.i8(i8 [[X:%.*]], i8 [[X]], i8 [[TMP1]]) -; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i1 [[R]] ; %x_rorr0 = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 %C0) @@ -135,7 +135,7 @@ define i1 @cmpeq_rorlXC_rorlYC_multiuse1(i8 %x, i8 %y) { ; CHECK-NEXT: [[Y_RORL1:%.*]] = call i8 @llvm.fshl.i8(i8 [[Y:%.*]], i8 [[Y]], i8 3) ; CHECK-NEXT: call void @use.i8(i8 [[Y_RORL1]]) ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 3) -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %x_rorl0 = call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 6) diff --git a/llvm/test/Transforms/InstCombine/icmp-equality-xor.ll b/llvm/test/Transforms/InstCombine/icmp-equality-xor.ll index e8a78df6d5f75..b8e8ed0eaf1da 100644 --- a/llvm/test/Transforms/InstCombine/icmp-equality-xor.ll +++ b/llvm/test/Transforms/InstCombine/icmp-equality-xor.ll @@ -84,7 +84,7 @@ define i1 @cmpeq_xor_cst1_multiuse(i32 %a, i32 %b) { define i1 @cmpeq_xor_cst1_commuted(i32 %a, i32 %b) { ; CHECK-LABEL: @cmpeq_xor_cst1_commuted( ; CHECK-NEXT: [[B2:%.*]] = mul i32 [[B:%.*]], [[B]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[B2]], [[A:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], [[B2]] ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], 10 ; CHECK-NEXT: ret i1 [[CMP]] ; diff --git a/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll b/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll index 7fc42c65d758b..1f012d82bc23f 100644 --- a/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll +++ b/llvm/test/Transforms/InstCombine/icmp-ext-ext.ll @@ -39,7 +39,7 @@ define i1 @zext_zext_eq(i8 %x, i8 %y) { define i1 @zext_zext_sle_op0_narrow(i8 %x, i16 %y) { ; CHECK-LABEL: @zext_zext_sle_op0_narrow( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[X:%.*]] to i16 -; CHECK-NEXT: [[C:%.*]] = icmp ule i16 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp uge i16 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %a = zext i8 %x to i32 @@ -51,7 +51,7 @@ define i1 @zext_zext_sle_op0_narrow(i8 %x, i16 %y) { define i1 @zext_zext_ule_op0_wide(i9 %x, i8 %y) { ; CHECK-LABEL: @zext_zext_ule_op0_wide( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i9 -; CHECK-NEXT: [[C:%.*]] = icmp uge i9 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ule i9 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %a = zext i9 %x to i32 @@ -96,7 +96,7 @@ define i1 @sext_sext_ne(i8 %x, i8 %y) { define i1 @sext_sext_sge_op0_narrow(i5 %x, i8 %y) { ; CHECK-LABEL: @sext_sext_sge_op0_narrow( ; CHECK-NEXT: [[TMP1:%.*]] = sext i5 [[X:%.*]] to i8 -; CHECK-NEXT: [[C:%.*]] = icmp sge i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp sle i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %a = sext i5 %x to i32 @@ -108,7 +108,7 @@ define i1 @sext_sext_sge_op0_narrow(i5 %x, i8 %y) { define <2 x i1> @sext_sext_uge_op0_wide(<2 x i16> %x, <2 x i8> %y) { ; CHECK-LABEL: @sext_sext_uge_op0_wide( ; CHECK-NEXT: [[TMP1:%.*]] = sext <2 x i8> [[Y:%.*]] to <2 x i16> -; CHECK-NEXT: [[C:%.*]] = icmp ule <2 x i16> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp uge <2 x i16> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[C]] ; %a = sext <2 x i16> %x to <2 x i32> @@ -208,7 +208,7 @@ define i1 @zext_sext_sle_op0_narrow(i8 %x, i16 %y) { define i1 @zext_nneg_sext_sle_op0_narrow(i8 %x, i16 %y) { ; CHECK-LABEL: @zext_nneg_sext_sle_op0_narrow( ; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[X:%.*]] to i16 -; CHECK-NEXT: [[C:%.*]] = icmp sle i16 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp sge i16 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %a = zext nneg i8 %x to i32 @@ -233,7 +233,7 @@ define i1 @zext_sext_ule_op0_wide(i9 %x, i8 %y) { define i1 @zext_nneg_sext_ule_op0_wide(i9 %x, i8 %y) { ; CHECK-LABEL: @zext_nneg_sext_ule_op0_wide( ; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i9 -; CHECK-NEXT: [[C:%.*]] = icmp uge i9 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ule i9 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %a = zext nneg i9 %x to i32 @@ -333,7 +333,7 @@ define i1 @sext_zext_sge_op0_narrow(i5 %x, i8 %y) { define i1 @sext_zext_nneg_sge_op0_narrow(i5 %x, i8 %y) { ; CHECK-LABEL: @sext_zext_nneg_sge_op0_narrow( ; CHECK-NEXT: [[TMP1:%.*]] = sext i5 [[X:%.*]] to i8 -; CHECK-NEXT: [[C:%.*]] = icmp sge i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp sle i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %a = sext i5 %x to i32 @@ -359,7 +359,7 @@ define i1 @sext_zext_uge_op0_wide(i16 %x, i8 %y) { define i1 @sext_zext_nneg_uge_op0_wide(i16 %x, i8 %y) { ; CHECK-LABEL: @sext_zext_nneg_uge_op0_wide( ; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i16 -; CHECK-NEXT: [[C:%.*]] = icmp ule i16 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp uge i16 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %a = sext i16 %x to i32 @@ -411,7 +411,7 @@ define i1 @zext_sext_sle_known_nonneg_op0_narrow(i8 %x, i16 %y) { ; CHECK-LABEL: @zext_sext_sle_known_nonneg_op0_narrow( ; CHECK-NEXT: [[N:%.*]] = and i8 [[X:%.*]], 12 ; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i8 [[N]] to i16 -; CHECK-NEXT: [[C:%.*]] = icmp sle i16 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp sge i16 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %n = and i8 %x, 12 @@ -438,7 +438,7 @@ define i1 @zext_sext_ule_known_nonneg_op0_wide(i9 %x, i8 %y) { define i1 @sext_zext_slt_known_nonneg(i8 %x, i8 %y) { ; CHECK-LABEL: @sext_zext_slt_known_nonneg( ; CHECK-NEXT: [[N:%.*]] = and i8 [[Y:%.*]], 126 -; CHECK-NEXT: [[C:%.*]] = icmp sgt i8 [[N]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp slt i8 [[X:%.*]], [[N]] ; CHECK-NEXT: ret i1 [[C]] ; %a = sext i8 %x to i32 @@ -451,7 +451,7 @@ define i1 @sext_zext_slt_known_nonneg(i8 %x, i8 %y) { define i1 @sext_zext_ult_known_nonneg(i8 %x, i8 %y) { ; CHECK-LABEL: @sext_zext_ult_known_nonneg( ; CHECK-NEXT: [[N:%.*]] = lshr i8 [[Y:%.*]], 6 -; CHECK-NEXT: [[C:%.*]] = icmp ugt i8 [[N]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ult i8 [[X:%.*]], [[N]] ; CHECK-NEXT: ret i1 [[C]] ; %a = sext i8 %x to i32 @@ -464,7 +464,7 @@ define i1 @sext_zext_ult_known_nonneg(i8 %x, i8 %y) { define i1 @sext_zext_ne_known_nonneg(i8 %x, i8 %y) { ; CHECK-LABEL: @sext_zext_ne_known_nonneg( ; CHECK-NEXT: [[N:%.*]] = udiv i8 [[Y:%.*]], 6 -; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[N]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[X:%.*]], [[N]] ; CHECK-NEXT: ret i1 [[C]] ; %a = sext i8 %x to i32 @@ -492,7 +492,7 @@ define i1 @sext_zext_uge_known_nonneg_op0_wide(i16 %x, i8 %y) { ; CHECK-LABEL: @sext_zext_uge_known_nonneg_op0_wide( ; CHECK-NEXT: [[N:%.*]] = and i8 [[Y:%.*]], 12 ; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i8 [[N]] to i16 -; CHECK-NEXT: [[C:%.*]] = icmp ule i16 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp uge i16 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %a = sext i16 %x to i32 diff --git a/llvm/test/Transforms/InstCombine/icmp-gep.ll b/llvm/test/Transforms/InstCombine/icmp-gep.ll index ce64ab1c6305a..01bee5a0f9cbd 100644 --- a/llvm/test/Transforms/InstCombine/icmp-gep.ll +++ b/llvm/test/Transforms/InstCombine/icmp-gep.ll @@ -329,7 +329,7 @@ define i1 @test60_as1(ptr addrspace(1) %foo, i64 %i, i64 %j) { define i1 @test60_addrspacecast(ptr %foo, i64 %i, i64 %j) { ; CHECK-LABEL: @test60_addrspacecast( ; CHECK-NEXT: [[GEP1_IDX:%.*]] = shl nsw i64 [[I:%.*]], 2 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[GEP1_IDX]], [[J:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[J:%.*]], [[GEP1_IDX]] ; CHECK-NEXT: ret i1 [[CMP]] ; %bit = addrspacecast ptr %foo to ptr addrspace(3) @@ -359,7 +359,7 @@ define i1 @test60_addrspacecast_larger(ptr addrspace(1) %foo, i32 %i, i16 %j) { ; CHECK-LABEL: @test60_addrspacecast_larger( ; CHECK-NEXT: [[I_TR:%.*]] = trunc i32 [[I:%.*]] to i16 ; CHECK-NEXT: [[TMP1:%.*]] = shl i16 [[I_TR]], 2 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i16 [[TMP1]], [[J:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i16 [[J:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %bit = addrspacecast ptr addrspace(1) %foo to ptr addrspace(2) @@ -515,10 +515,10 @@ define i1 @test_scalable_xy(ptr %foo, i64 %i, i64 %j) { ; CHECK-LABEL: @test_scalable_xy( ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 4 -; CHECK-NEXT: [[GEP1_IDX:%.*]] = mul nsw i64 [[TMP2]], [[I:%.*]] +; CHECK-NEXT: [[GEP1_IDX:%.*]] = mul nsw i64 [[I:%.*]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[TMP3]], 2 -; CHECK-NEXT: [[GEP2_IDX:%.*]] = mul nsw i64 [[TMP4]], [[J:%.*]] +; CHECK-NEXT: [[GEP2_IDX:%.*]] = mul nsw i64 [[J:%.*]], [[TMP4]] ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i64 [[GEP2_IDX]], [[GEP1_IDX]] ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -534,10 +534,10 @@ define i1 @test_scalable_ij(ptr %foo, i64 %i, i64 %j) { ; CHECK-LABEL: @test_scalable_ij( ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 4 -; CHECK-NEXT: [[GEP1_IDX:%.*]] = mul nsw i64 [[TMP2]], [[I:%.*]] +; CHECK-NEXT: [[GEP1_IDX:%.*]] = mul nsw i64 [[I:%.*]], [[TMP2]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[TMP3]], 2 -; CHECK-NEXT: [[GEP2_IDX:%.*]] = mul nsw i64 [[TMP4]], [[J:%.*]] +; CHECK-NEXT: [[GEP2_IDX:%.*]] = mul nsw i64 [[J:%.*]], [[TMP4]] ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i64 [[GEP1_IDX]], [[GEP2_IDX]] ; CHECK-NEXT: ret i1 [[CMP]] ; diff --git a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll index aa23a6d27f69b..07536f271ceb1 100644 --- a/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll +++ b/llvm/test/Transforms/InstCombine/icmp-mul-zext.ll @@ -16,7 +16,7 @@ define i32 @sterix(i32, i8, i64) { ; CHECK-NEXT: [[TOBOOL_NOT:%.*]] = icmp ult i64 [[MUL3]], 4294967296 ; CHECK-NEXT: br i1 [[TOBOOL_NOT]], label [[LOR_RHS:%.*]], label [[LOR_END:%.*]] ; CHECK: lor.rhs: -; CHECK-NEXT: [[AND:%.*]] = and i64 [[MUL3]], [[TMP2]] +; CHECK-NEXT: [[AND:%.*]] = and i64 [[TMP2]], [[MUL3]] ; CHECK-NEXT: [[TOBOOL7_NOT:%.*]] = icmp eq i64 [[AND]], 0 ; CHECK-NEXT: [[TMP3:%.*]] = zext i1 [[TOBOOL7_NOT]] to i32 ; CHECK-NEXT: br label [[LOR_END]] @@ -128,12 +128,12 @@ define i1 @PR46561(i1 %a, i1 %x, i1 %y, i8 %z) { ; CHECK-NEXT: br i1 [[A:%.*]], label [[COND_TRUE:%.*]], label [[END:%.*]] ; CHECK: cond.true: ; CHECK-NEXT: [[MULBOOL:%.*]] = and i1 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = trunc i8 [[Z:%.*]] to i1 -; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[MULBOOL]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = xor i1 [[TMP2]], true +; CHECK-NEXT: [[TMP0:%.*]] = trunc i8 [[Z:%.*]] to i1 +; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[MULBOOL]], [[TMP0]] +; CHECK-NEXT: [[TMP2:%.*]] = xor i1 [[TMP1]], true ; CHECK-NEXT: br label [[END]] ; CHECK: end: -; CHECK-NEXT: [[P:%.*]] = phi i1 [ [[TMP3]], [[COND_TRUE]] ], [ false, [[ENTRY:%.*]] ] +; CHECK-NEXT: [[P:%.*]] = phi i1 [ [[TMP2]], [[COND_TRUE]] ], [ false, [[ENTRY:%.*]] ] ; CHECK-NEXT: ret i1 [[P]] ; entry: diff --git a/llvm/test/Transforms/InstCombine/icmp-mul.ll b/llvm/test/Transforms/InstCombine/icmp-mul.ll index 12c77367b10f7..3ba21abb069ba 100644 --- a/llvm/test/Transforms/InstCombine/icmp-mul.ll +++ b/llvm/test/Transforms/InstCombine/icmp-mul.ll @@ -1111,7 +1111,7 @@ define i1 @mul_xy_z_assumeodd_eq(i8 %x, i8 %y, i8 %z) { define <2 x i1> @reused_mul_nsw_xy_z_setnonzero_vec_ne(<2 x i8> %x, <2 x i8> %y, <2 x i8> %zi) { ; CHECK-LABEL: @reused_mul_nsw_xy_z_setnonzero_vec_ne( ; CHECK-NEXT: [[Z:%.*]] = or <2 x i8> [[ZI:%.*]], -; CHECK-NEXT: [[MULY:%.*]] = mul nsw <2 x i8> [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[MULY:%.*]] = mul nsw <2 x i8> [[Y:%.*]], [[Z]] ; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i8> [[Y]], [[X:%.*]] ; CHECK-NEXT: call void @usev2xi8(<2 x i8> [[MULY]]) ; CHECK-NEXT: ret <2 x i1> [[CMP]] @@ -1127,8 +1127,8 @@ define <2 x i1> @reused_mul_nsw_xy_z_setnonzero_vec_ne(<2 x i8> %x, <2 x i8> %y, define i1 @mul_mixed_nuw_nsw_xy_z_setodd_ult(i8 %x, i8 %y, i8 %zi) { ; CHECK-LABEL: @mul_mixed_nuw_nsw_xy_z_setodd_ult( ; CHECK-NEXT: [[Z:%.*]] = or i8 [[ZI:%.*]], 1 -; CHECK-NEXT: [[MULX:%.*]] = mul nsw i8 [[Z]], [[X:%.*]] -; CHECK-NEXT: [[MULY:%.*]] = mul nuw nsw i8 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[MULX:%.*]] = mul nsw i8 [[X:%.*]], [[Z]] +; CHECK-NEXT: [[MULY:%.*]] = mul nuw nsw i8 [[Y:%.*]], [[Z]] ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[MULX]], [[MULY]] ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -1212,7 +1212,7 @@ define i1 @reused_mul_nuw_xy_z_selectnonzero_ugt(i8 %x, i8 %y, i8 %z) { define <2 x i1> @mul_mixed_nsw_nuw_xy_z_setnonzero_vec_ule(<2 x i8> %x, <2 x i8> %y, <2 x i8> %zi) { ; CHECK-LABEL: @mul_mixed_nsw_nuw_xy_z_setnonzero_vec_ule( ; CHECK-NEXT: [[Z:%.*]] = or <2 x i8> [[ZI:%.*]], -; CHECK-NEXT: [[MULX:%.*]] = mul nuw <2 x i8> [[Z]], [[X:%.*]] +; CHECK-NEXT: [[MULX:%.*]] = mul nuw <2 x i8> [[X:%.*]], [[Z]] ; CHECK-NEXT: [[MULY:%.*]] = mul nsw <2 x i8> [[Z]], [[Y:%.*]] ; CHECK-NEXT: [[CMP:%.*]] = icmp ule <2 x i8> [[MULY]], [[MULX]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] diff --git a/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll index 75badabda01ae..09c9c1ebc8315 100644 --- a/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll +++ b/llvm/test/Transforms/InstCombine/icmp-of-and-x.ll @@ -228,7 +228,7 @@ define i1 @icmp_sle_negx_y_fail_maybe_zero(i8 %x, i8 %y) { define i1 @icmp_eq_x_invertable_y_todo(i8 %x, i1 %y) { ; CHECK-LABEL: @icmp_eq_x_invertable_y_todo( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[Y:%.*]], i8 -8, i8 -25 -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -253,9 +253,9 @@ define i1 @icmp_eq_x_invertable_y(i8 %x, i8 %y) { define i1 @icmp_eq_x_invertable_y_fail_multiuse(i8 %x, i8 %y) { ; CHECK-LABEL: @icmp_eq_x_invertable_y_fail_multiuse( ; CHECK-NEXT: [[YY:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[AND:%.*]] = and i8 [[YY]], [[X:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i8 [[X:%.*]], [[YY]] ; CHECK-NEXT: call void @use.i8(i8 [[AND]]) -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[AND]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[AND]] ; CHECK-NEXT: ret i1 [[R]] ; %yy = xor i8 %y, -1 @@ -268,7 +268,7 @@ define i1 @icmp_eq_x_invertable_y_fail_multiuse(i8 %x, i8 %y) { define i1 @icmp_eq_x_invertable_y2_todo(i8 %x, i1 %y) { ; CHECK-LABEL: @icmp_eq_x_invertable_y2_todo( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[Y:%.*]], i8 -8, i8 -25 -; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP2]], -1 ; CHECK-NEXT: ret i1 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/icmp-of-or-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-or-x.ll index 7ff111c42a9e0..93eeab4732185 100644 --- a/llvm/test/Transforms/InstCombine/icmp-of-or-x.ll +++ b/llvm/test/Transforms/InstCombine/icmp-of-or-x.ll @@ -95,7 +95,7 @@ define i1 @or_eq_notY_eq_0(i8 %x, i8 %y) { define i1 @or_eq_notY_eq_0_fail_multiuse(i8 %x, i8 %y) { ; CHECK-LABEL: @or_eq_notY_eq_0_fail_multiuse( ; CHECK-NEXT: [[NY:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i8 [[NY]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i8 [[X:%.*]], [[NY]] ; CHECK-NEXT: call void @use.i8(i8 [[OR]]) ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[OR]], [[NY]] ; CHECK-NEXT: ret i1 [[CMP]] @@ -122,7 +122,7 @@ define i1 @or_ne_notY_eq_1s(i8 %x, i8 %y) { define i1 @or_ne_notY_eq_1s_fail_bad_not(i8 %x, i8 %y) { ; CHECK-LABEL: @or_ne_notY_eq_1s_fail_bad_not( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = or i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[TMP2]], -1 ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -307,7 +307,7 @@ define i1 @or_simplify_uge(i8 %y_in, i8 %rhs_in, i1 %c) { define i1 @or_simplify_ule_fail(i8 %y_in, i8 %rhs_in) { ; CHECK-LABEL: @or_simplify_ule_fail( ; CHECK-NEXT: [[RHS:%.*]] = and i8 [[RHS_IN:%.*]], 127 -; CHECK-NEXT: [[Y:%.*]] = or i8 [[RHS]], [[Y_IN:%.*]] +; CHECK-NEXT: [[Y:%.*]] = or i8 [[Y_IN:%.*]], [[RHS]] ; CHECK-NEXT: [[LBO:%.*]] = or i8 [[Y]], 64 ; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[LBO]], [[RHS]] ; CHECK-NEXT: ret i1 [[R]] @@ -352,7 +352,7 @@ define i1 @or_simplify_ult(i8 %y_in, i8 %rhs_in) { define i1 @or_simplify_ugt_fail(i8 %y_in, i8 %rhs_in) { ; CHECK-LABEL: @or_simplify_ugt_fail( ; CHECK-NEXT: [[RHS:%.*]] = or i8 [[RHS_IN:%.*]], 1 -; CHECK-NEXT: [[LBO:%.*]] = or i8 [[RHS]], [[Y_IN:%.*]] +; CHECK-NEXT: [[LBO:%.*]] = or i8 [[Y_IN:%.*]], [[RHS]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[LBO]], [[RHS]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -377,7 +377,7 @@ define i1 @pr64610(ptr %b) { define i1 @icmp_eq_x_invertable_y2_todo(i8 %x, i1 %y, i8 %z) { ; CHECK-LABEL: @icmp_eq_x_invertable_y2_todo( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[Y:%.*]], i8 -8, i8 [[Z:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll b/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll index c0cd3e775f68a..f2a02fac90b17 100644 --- a/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll +++ b/llvm/test/Transforms/InstCombine/icmp-of-trunc-ext.ll @@ -50,7 +50,7 @@ define i1 @icmp_trunc_x_trunc_y_illegal_trunc_to_legal_anyways(i123 %x, i32 %y) ; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]]) ; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i123 [[X]] to i32 -; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %x_lb_only = icmp ult i123 %x, 65536 @@ -70,7 +70,7 @@ define i1 @icmp_trunc_x_trunc_y_2_illegal_anyways(i33 %x, i63 %y) { ; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]]) ; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i33 [[X]] to i63 -; CHECK-NEXT: [[R:%.*]] = icmp ugt i63 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i63 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %x_lb_only = icmp ult i33 %x, 512 @@ -90,7 +90,7 @@ define i1 @icmp_trunc_x_trunc_y_3(i64 %x, i32 %y) { ; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]]) ; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i64 [[X]] to i32 -; CHECK-NEXT: [[R:%.*]] = icmp uge i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i32 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %x_lb_only = icmp ult i64 %x, 123 @@ -152,7 +152,7 @@ define i1 @icmp_trunc_x_trunc_y_swap0(i33 %x, i32 %y) { ; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]]) ; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i33 [[X]] to i32 -; CHECK-NEXT: [[R:%.*]] = icmp ule i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp uge i32 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %x_lb_only = icmp ult i33 %x, 65536 @@ -172,7 +172,7 @@ define i1 @icmp_trunc_x_trunc_y_swap1(i33 %x, i32 %y) { ; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]]) ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]]) ; CHECK-NEXT: [[TMP1:%.*]] = trunc nuw nsw i33 [[X]] to i32 -; CHECK-NEXT: [[R:%.*]] = icmp uge i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i32 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %x_lb_only = icmp ult i33 %x, 65536 @@ -190,7 +190,7 @@ define i1 @icmp_trunc_x_zext_y(i32 %x, i8 %y) { ; CHECK-NEXT: [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536 ; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]]) ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %x_lb_only = icmp ult i32 %x, 65536 @@ -206,7 +206,7 @@ define i1 @icmp_trunc_x_zext_y_2(i32 %x, i8 %y) { ; CHECK-NEXT: [[X_LB_ONLY:%.*]] = icmp ult i32 [[X:%.*]], 65536 ; CHECK-NEXT: call void @llvm.assume(i1 [[X_LB_ONLY]]) ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[R:%.*]] = icmp uge i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %x_lb_only = icmp ult i32 %x, 65536 @@ -222,7 +222,7 @@ define i1 @icmp_trunc_x_zext_y_3(i6 %x, i32 %y) { ; CHECK-NEXT: [[Y_LB_ONLY:%.*]] = icmp ult i32 [[Y:%.*]], 65536 ; CHECK-NEXT: call void @llvm.assume(i1 [[Y_LB_ONLY]]) ; CHECK-NEXT: [[TMP1:%.*]] = zext i6 [[X:%.*]] to i32 -; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i32 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %y_lb_only = icmp ult i32 %y, 65536 @@ -412,7 +412,7 @@ define i1 @trunc_equality_either(i16 %x, i16 %y) { define i1 @trunc_unsigned_nuw_zext(i32 %x, i8 %y) { ; CHECK-LABEL: @trunc_unsigned_nuw_zext( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %xt = trunc nuw i32 %x to i16 @@ -437,7 +437,7 @@ define i1 @trunc_unsigned_nuw_sext(i32 %x, i8 %y) { define i1 @trunc_unsigned_nsw_zext(i32 %x, i8 %y) { ; CHECK-LABEL: @trunc_unsigned_nsw_zext( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %xt = trunc nsw i32 %x to i16 @@ -449,7 +449,7 @@ define i1 @trunc_unsigned_nsw_zext(i32 %x, i8 %y) { define i1 @trunc_unsigned_nsw_sext(i32 %x, i8 %y) { ; CHECK-LABEL: @trunc_unsigned_nsw_sext( ; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %xt = trunc nsw i32 %x to i16 @@ -461,7 +461,7 @@ define i1 @trunc_unsigned_nsw_sext(i32 %x, i8 %y) { define i1 @trunc_signed_nsw_sext(i32 %x, i8 %y) { ; CHECK-LABEL: @trunc_signed_nsw_sext( ; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %xt = trunc nsw i32 %x to i16 @@ -473,7 +473,7 @@ define i1 @trunc_signed_nsw_sext(i32 %x, i8 %y) { define i1 @trunc_signed_nsw_zext(i32 %x, i8 %y) { ; CHECK-LABEL: @trunc_signed_nsw_zext( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp sgt i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp slt i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %xt = trunc nsw i32 %x to i16 @@ -511,7 +511,7 @@ define i1 @trunc_signed_nuw_zext(i32 %x, i8 %y) { define i1 @trunc_equality_nuw_zext(i32 %x, i8 %y) { ; CHECK-LABEL: @trunc_equality_nuw_zext( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %xt = trunc nuw i32 %x to i16 @@ -536,7 +536,7 @@ define i1 @trunc_equality_nuw_sext(i32 %x, i8 %y) { define i1 @trunc_equality_nsw_zext(i32 %x, i8 %y) { ; CHECK-LABEL: @trunc_equality_nsw_zext( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %xt = trunc nsw i32 %x to i16 @@ -548,7 +548,7 @@ define i1 @trunc_equality_nsw_zext(i32 %x, i8 %y) { define i1 @trunc_equality_nsw_sext(i32 %x, i8 %y) { ; CHECK-LABEL: @trunc_equality_nsw_sext( ; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %xt = trunc nsw i32 %x to i16 @@ -560,7 +560,7 @@ define i1 @trunc_equality_nsw_sext(i32 %x, i8 %y) { define i1 @trunc_equality_both_sext(i32 %x, i8 %y) { ; CHECK-LABEL: @trunc_equality_both_sext( ; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[Y:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %xt = trunc nuw nsw i32 %x to i16 @@ -572,7 +572,7 @@ define i1 @trunc_equality_both_sext(i32 %x, i8 %y) { define i1 @test_eq1(i32 %x, i16 %y) { ; CHECK-LABEL: @test_eq1( ; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[Y:%.*]] to i32 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[COND]] ; %conv1 = trunc nsw i32 %x to i8 @@ -586,7 +586,7 @@ define i1 @test_eq1(i32 %x, i16 %y) { define i1 @test_eq2(i32 %x, i16 %y) { ; CHECK-LABEL: @test_eq2( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[X:%.*]] to i16 -; CHECK-NEXT: [[COND:%.*]] = icmp eq i16 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[COND:%.*]] = icmp eq i16 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[COND]] ; %conv1 = trunc nsw i32 %x to i8 @@ -598,7 +598,7 @@ define i1 @test_eq2(i32 %x, i16 %y) { define i1 @test_ult(i32 %x, i16 %y) { ; CHECK-LABEL: @test_ult( ; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[Y:%.*]] to i32 -; CHECK-NEXT: [[COND:%.*]] = icmp ugt i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[COND]] ; %conv1 = trunc nsw i32 %x to i8 @@ -610,7 +610,7 @@ define i1 @test_ult(i32 %x, i16 %y) { define i1 @test_slt(i32 %x, i16 %y) { ; CHECK-LABEL: @test_slt( ; CHECK-NEXT: [[TMP1:%.*]] = sext i16 [[Y:%.*]] to i32 -; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[COND:%.*]] = icmp slt i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[COND]] ; %conv1 = trunc nsw i32 %x to i8 @@ -622,7 +622,7 @@ define i1 @test_slt(i32 %x, i16 %y) { define i1 @test_ult_nuw(i32 %x, i16 %y) { ; CHECK-LABEL: @test_ult_nuw( ; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[Y:%.*]] to i32 -; CHECK-NEXT: [[COND:%.*]] = icmp ugt i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[COND]] ; %conv1 = trunc nuw nsw i32 %x to i8 @@ -634,7 +634,7 @@ define i1 @test_ult_nuw(i32 %x, i16 %y) { define i1 @test_slt_nuw(i32 %x, i16 %y) { ; CHECK-LABEL: @test_slt_nuw( ; CHECK-NEXT: [[TMP1:%.*]] = zext i16 [[Y:%.*]] to i32 -; CHECK-NEXT: [[COND:%.*]] = icmp sgt i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[COND:%.*]] = icmp slt i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[COND]] ; %conv1 = trunc nuw nsw i32 %x to i8 diff --git a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll index fd61c8a301662..a4e7acbca930d 100644 --- a/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll +++ b/llvm/test/Transforms/InstCombine/icmp-of-xor-x.ll @@ -10,7 +10,7 @@ define i1 @test_xor1(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test_xor1( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: call void @use.i8(i8 [[XOR]]) -; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -27,7 +27,7 @@ define i1 @test_xor2(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test_xor2( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use.i8(i8 [[XOR]]) -; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], [[Y]] ; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -44,7 +44,7 @@ define i1 @test_xor3(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test_xor3( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: call void @use.i8(i8 [[XOR]]) -; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[TMP1]], [[Z:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -60,7 +60,7 @@ define i1 @test_xor3(i8 %x, i8 %y, i8 %z) { define i1 @test_xor_ne(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test_xor_ne( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %nz = xor i8 %z, -1 @@ -73,7 +73,7 @@ define i1 @test_xor_ne(i8 %x, i8 %y, i8 %z) { define i1 @test_xor_eq(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test_xor_eq( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP1]], [[Z:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Z:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %nz = xor i8 %z, -1 @@ -88,7 +88,7 @@ define i1 @test_xor4(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test_xor4( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: call void @use.i8(i8 [[XOR]]) -; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]] ; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[TMP1]], [[Z:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -104,7 +104,7 @@ define i1 @test_xor5(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test_xor5( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: call void @use.i8(i8 [[XOR]]) -; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]] ; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[TMP1]], [[Z:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -120,7 +120,7 @@ define i1 @test_xor6(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test_xor6( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: call void @use.i8(i8 [[XOR]]) -; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]] ; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[Z:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -136,7 +136,7 @@ define i1 @test_xor7(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test_xor7( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: call void @use.i8(i8 [[XOR]]) -; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]] ; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[Z:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -152,7 +152,7 @@ define i1 @test_xor8(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test_xor8( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: call void @use.i8(i8 [[XOR]]) -; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[X]] ; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[TMP1]], [[Z:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -167,7 +167,7 @@ define i1 @test_xor8(i8 %x, i8 %y, i8 %z) { ; test (~a ^ b) < ~a define i1 @test_slt_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test_slt_xor( -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt i32 [[TMP1]], [[X]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -180,7 +180,7 @@ define i1 @test_slt_xor(i32 %x, i32 %y) { ; test (a ^ ~b) <= ~b define i1 @test_sle_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test_sle_xor( -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[R:%.*]] = icmp sge i32 [[TMP1]], [[Y]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -193,7 +193,7 @@ define i1 @test_sle_xor(i32 %x, i32 %y) { ; test ~a > (~a ^ b) define i1 @test_sgt_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test_sgt_xor( -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP1]], [[X]] ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -205,7 +205,7 @@ define i1 @test_sgt_xor(i32 %x, i32 %y) { define i1 @test_sge_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test_sge_xor( -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[X]] ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -217,7 +217,7 @@ define i1 @test_sge_xor(i32 %x, i32 %y) { define i1 @test_ult_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test_ult_xor( -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[TMP1]], [[X]] ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -229,7 +229,7 @@ define i1 @test_ult_xor(i32 %x, i32 %y) { define i1 @test_ule_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test_ule_xor( -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[TMP1]], [[X]] ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -241,7 +241,7 @@ define i1 @test_ule_xor(i32 %x, i32 %y) { define i1 @test_ugt_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test_ugt_xor( -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP1]], [[X]] ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -253,7 +253,7 @@ define i1 @test_ugt_xor(i32 %x, i32 %y) { define i1 @test_uge_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test_uge_xor( -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[CMP:%.*]] = icmp ule i32 [[TMP1]], [[X]] ; CHECK-NEXT: ret i1 [[CMP]] ; @@ -386,7 +386,7 @@ define <2 x i1> @xor_sgt(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @xor_sgt( ; CHECK-NEXT: [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], ; CHECK-NEXT: [[Y1:%.*]] = or disjoint <2 x i8> [[YZ]], -; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[Y1]], [[X:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[X:%.*]], [[Y1]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; @@ -401,7 +401,7 @@ define <2 x i1> @xor_sgt_fail_no_known_msb(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @xor_sgt_fail_no_known_msb( ; CHECK-NEXT: [[YZ:%.*]] = and <2 x i8> [[Y:%.*]], ; CHECK-NEXT: [[Y1:%.*]] = or disjoint <2 x i8> [[YZ]], -; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[Y1]], [[X:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor <2 x i8> [[X:%.*]], [[Y1]] ; CHECK-NEXT: [[R:%.*]] = icmp sgt <2 x i8> [[XOR]], [[X]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; @@ -415,7 +415,7 @@ define <2 x i1> @xor_sgt_fail_no_known_msb(<2 x i8> %x, <2 x i8> %y) { define i1 @xor_slt_2(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @xor_slt_2( ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X:%.*]], 88 -; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[XOR]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[X]], [[XOR]] ; CHECK-NEXT: ret i1 [[R]] ; %xor = xor i8 %x, 88 diff --git a/llvm/test/Transforms/InstCombine/icmp-or-of-select-with-zero.ll b/llvm/test/Transforms/InstCombine/icmp-or-of-select-with-zero.ll index 90e0461f8b789..75301ce5d72a7 100644 --- a/llvm/test/Transforms/InstCombine/icmp-or-of-select-with-zero.ll +++ b/llvm/test/Transforms/InstCombine/icmp-or-of-select-with-zero.ll @@ -271,7 +271,7 @@ define i1 @src_tv_ne_invert(i1 %c1, i8 %a, i8 %b, i8 %x, i8 %yy) { ; CHECK-NEXT: [[C0:%.*]] = xor i1 [[NOT_C0]], true ; CHECK-NEXT: [[Y:%.*]] = add nuw i8 [[YY:%.*]], 1 ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[NOT_C0]], i8 [[Y]], i8 0 -; CHECK-NEXT: [[CC:%.*]] = or i1 [[C0]], [[C1:%.*]] +; CHECK-NEXT: [[CC:%.*]] = or i1 [[C1:%.*]], [[C0]] ; CHECK-NEXT: [[SEL_OTHER:%.*]] = select i1 [[CC]], i8 [[Y]], i8 [[B]] ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i8 [[X:%.*]], 0 ; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[NOT_C0]] diff --git a/llvm/test/Transforms/InstCombine/icmp-or.ll b/llvm/test/Transforms/InstCombine/icmp-or.ll index bedaf591fb070..36b3216196f84 100644 --- a/llvm/test/Transforms/InstCombine/icmp-or.ll +++ b/llvm/test/Transforms/InstCombine/icmp-or.ll @@ -172,7 +172,7 @@ define i1 @eq_const_mask_not_same(i8 %x, i8 %y) { define i1 @eq_const_mask_wrong_opcode(i8 %x, i8 %y) { ; CHECK-LABEL: @eq_const_mask_wrong_opcode( ; CHECK-NEXT: [[B0:%.*]] = or i8 [[X:%.*]], 5 -; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[B0]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], [[B0]] ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP1]], 5 ; CHECK-NEXT: ret i1 [[CMP]] ; diff --git a/llvm/test/Transforms/InstCombine/icmp-range.ll b/llvm/test/Transforms/InstCombine/icmp-range.ll index 9ed2f2a4860c6..8b690826a7bf9 100644 --- a/llvm/test/Transforms/InstCombine/icmp-range.ll +++ b/llvm/test/Transforms/InstCombine/icmp-range.ll @@ -152,7 +152,7 @@ define i1 @test_two_ranges(ptr nocapture readonly %arg1, ptr nocapture readonly ; Values' ranges overlap each other, so it can not be simplified. define i1 @test_two_attribute_ranges(i32 range(i32 5, 10) %arg1, i32 range(i32 8, 16) %arg2) { ; CHECK-LABEL: @test_two_attribute_ranges( -; CHECK-NEXT: [[RVAL:%.*]] = icmp ult i32 [[ARG1:%.*]], [[ARG2:%.*]] +; CHECK-NEXT: [[RVAL:%.*]] = icmp ult i32 [[ARG2:%.*]], [[ARG1:%.*]] ; CHECK-NEXT: ret i1 [[RVAL]] ; %rval = icmp ult i32 %arg2, %arg1 @@ -249,7 +249,7 @@ define <2 x i1> @test_two_ranges_vec_true(ptr nocapture readonly %arg1, ptr noca ; Values' ranges overlap each other, so it can not be simplified. define <2 x i1> @test_two_argument_ranges_vec(<2 x i32> range(i32 5, 10) %arg1, <2 x i32> range(i32 8, 16) %arg2) { ; CHECK-LABEL: @test_two_argument_ranges_vec( -; CHECK-NEXT: [[RVAL:%.*]] = icmp ult <2 x i32> [[VAL2:%.*]], [[VAL1:%.*]] +; CHECK-NEXT: [[RVAL:%.*]] = icmp ult <2 x i32> [[ARG2:%.*]], [[ARG1:%.*]] ; CHECK-NEXT: ret <2 x i1> [[RVAL]] ; %rval = icmp ult <2 x i32> %arg2, %arg1 @@ -281,9 +281,9 @@ declare range(i32 1, 6) i32 @create_range3() ; Values' ranges overlap each other, so it can not be simplified. define i1 @test_two_return_attribute_ranges_not_simplified() { ; CHECK-LABEL: @test_two_return_attribute_ranges_not_simplified( -; CHECK-NEXT: [[ARG2:%.*]] = call range(i32 5, 10) i32 @create_range1() -; CHECK-NEXT: [[ARG1:%.*]] = call i32 @create_range2() -; CHECK-NEXT: [[RVAL:%.*]] = icmp ult i32 [[ARG1]], [[ARG2]] +; CHECK-NEXT: [[VAL1:%.*]] = call range(i32 5, 10) i32 @create_range1() +; CHECK-NEXT: [[VAL2:%.*]] = call i32 @create_range2() +; CHECK-NEXT: [[RVAL:%.*]] = icmp ult i32 [[VAL2]], [[VAL1]] ; CHECK-NEXT: ret i1 [[RVAL]] ; %val1 = call range(i32 5, 10) i32 @create_range1() @@ -296,7 +296,7 @@ define i1 @test_two_return_attribute_ranges_not_simplified() { define i1 @test_two_return_attribute_ranges_one_in_call() { ; CHECK-LABEL: @test_two_return_attribute_ranges_one_in_call( ; CHECK-NEXT: [[VAL1:%.*]] = call range(i32 1, 6) i32 @create_range1() -; CHECK-NEXT: [[ARG1:%.*]] = call i32 @create_range2() +; CHECK-NEXT: [[VAL2:%.*]] = call i32 @create_range2() ; CHECK-NEXT: ret i1 false ; %val1 = call range(i32 1, 6) i32 @create_range1() @@ -309,7 +309,7 @@ define i1 @test_two_return_attribute_ranges_one_in_call() { define i1 @test_two_return_attribute_ranges() { ; CHECK-LABEL: @test_two_return_attribute_ranges( ; CHECK-NEXT: [[VAL1:%.*]] = call i32 @create_range3() -; CHECK-NEXT: [[ARG1:%.*]] = call i32 @create_range2() +; CHECK-NEXT: [[VAL2:%.*]] = call i32 @create_range2() ; CHECK-NEXT: ret i1 false ; %val1 = call i32 @create_range3() @@ -370,7 +370,7 @@ define <2 x i1> @ult_zext(<2 x i1> %b, <2 x i8> %p) { define i1 @uge_zext(i1 %b, i8 %x) { ; CHECK-LABEL: @uge_zext( ; CHECK-NEXT: [[Z:%.*]] = zext i1 [[B:%.*]] to i8 -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[Z]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[Z]] ; CHECK-NEXT: ret i1 [[R]] ; %z = zext i1 %b to i8 @@ -399,7 +399,7 @@ define i1 @ugt_zext_use(i1 %b, i8 %x) { ; CHECK-LABEL: @ugt_zext_use( ; CHECK-NEXT: [[Z:%.*]] = zext i1 [[B:%.*]] to i8 ; CHECK-NEXT: call void @use(i8 [[Z]]) -; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Z]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[Z]] ; CHECK-NEXT: ret i1 [[R]] ; %z = zext i1 %b to i8 @@ -413,7 +413,7 @@ define i1 @ugt_zext_use(i1 %b, i8 %x) { define i1 @ult_zext_not_i1(i2 %b, i8 %x) { ; CHECK-LABEL: @ult_zext_not_i1( ; CHECK-NEXT: [[Z:%.*]] = zext i2 [[B:%.*]] to i8 -; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Z]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[Z]] ; CHECK-NEXT: ret i1 [[R]] ; %z = zext i2 %b to i8 @@ -600,7 +600,7 @@ define <2 x i1> @ule_sext(<2 x i1> %b, <2 x i8> %p) { define i1 @ugt_sext(i1 %b, i8 %x) { ; CHECK-LABEL: @ugt_sext( ; CHECK-NEXT: [[S:%.*]] = sext i1 [[B:%.*]] to i8 -; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[S]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[S]] ; CHECK-NEXT: ret i1 [[R]] ; %s = sext i1 %b to i8 @@ -629,7 +629,7 @@ define i1 @uge_sext_use(i1 %b, i8 %x) { ; CHECK-LABEL: @uge_sext_use( ; CHECK-NEXT: [[S:%.*]] = sext i1 [[B:%.*]] to i8 ; CHECK-NEXT: call void @use(i8 [[S]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[S]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[S]] ; CHECK-NEXT: ret i1 [[R]] ; %s = sext i1 %b to i8 @@ -643,7 +643,7 @@ define i1 @uge_sext_use(i1 %b, i8 %x) { define i1 @ule_sext_not_i1(i2 %b, i8 %x) { ; CHECK-LABEL: @ule_sext_not_i1( ; CHECK-NEXT: [[S:%.*]] = sext i2 [[B:%.*]] to i8 -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[S]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[S]] ; CHECK-NEXT: ret i1 [[R]] ; %s = sext i2 %b to i8 @@ -869,7 +869,7 @@ define i1 @zext_sext_add_icmp_i128(i1 %a, i1 %b) { define i1 @zext_sext_add_icmp_eq_minus1(i1 %a, i1 %b) { ; CHECK-LABEL: @zext_sext_add_icmp_eq_minus1( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true -; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i1 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zext.a = zext i1 %a to i8 @@ -885,7 +885,7 @@ define i1 @zext_sext_add_icmp_eq_minus1(i1 %a, i1 %b) { define i1 @zext_sext_add_icmp_ne_minus1(i1 %a, i1 %b) { ; CHECK-LABEL: @zext_sext_add_icmp_ne_minus1( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true -; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = or i1 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zext.a = zext i1 %a to i8 @@ -899,8 +899,8 @@ define i1 @zext_sext_add_icmp_ne_minus1(i1 %a, i1 %b) { define i1 @zext_sext_add_icmp_sgt_minus1(i1 %a, i1 %b) { ; CHECK-LABEL: @zext_sext_add_icmp_sgt_minus1( -; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true -; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[B_NOT:%.*]] = xor i1 [[B:%.*]], true +; CHECK-NEXT: [[R:%.*]] = or i1 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret i1 [[R]] ; %zext.a = zext i1 %a to i8 @@ -915,7 +915,7 @@ define i1 @zext_sext_add_icmp_sgt_minus1(i1 %a, i1 %b) { define i1 @zext_sext_add_icmp_ult_minus1(i1 %a, i1 %b) { ; CHECK-LABEL: @zext_sext_add_icmp_ult_minus1( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true -; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = or i1 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zext.a = zext i1 %a to i8 @@ -930,7 +930,7 @@ define i1 @zext_sext_add_icmp_ult_minus1(i1 %a, i1 %b) { define i1 @zext_sext_add_icmp_sgt_0(i1 %a, i1 %b) { ; CHECK-LABEL: @zext_sext_add_icmp_sgt_0( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true -; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i1 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zext.a = zext i1 %a to i8 @@ -945,8 +945,8 @@ define i1 @zext_sext_add_icmp_sgt_0(i1 %a, i1 %b) { define i1 @zext_sext_add_icmp_slt_0(i1 %a, i1 %b) { ; CHECK-LABEL: @zext_sext_add_icmp_slt_0( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true -; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[B:%.*]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[B:%.*]], [[TMP1]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %zext.a = zext i1 %a to i8 %sext.b = sext i1 %b to i8 @@ -960,7 +960,7 @@ define i1 @zext_sext_add_icmp_slt_0(i1 %a, i1 %b) { define i1 @zext_sext_add_icmp_eq_1(i1 %a, i1 %b) { ; CHECK-LABEL: @zext_sext_add_icmp_eq_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true -; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i1 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zext.a = zext i1 %a to i8 @@ -975,7 +975,7 @@ define i1 @zext_sext_add_icmp_eq_1(i1 %a, i1 %b) { define i1 @zext_sext_add_icmp_ne_1(i1 %a, i1 %b) { ; CHECK-LABEL: @zext_sext_add_icmp_ne_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true -; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[R:%.*]] = or i1 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zext.a = zext i1 %a to i8 @@ -990,7 +990,7 @@ define i1 @zext_sext_add_icmp_ne_1(i1 %a, i1 %b) { define i1 @zext_sext_add_icmp_slt_1(i1 %a, i1 %b) { ; CHECK-LABEL: @zext_sext_add_icmp_slt_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true -; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[R:%.*]] = or i1 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %zext.a = zext i1 %a to i8 @@ -1005,8 +1005,8 @@ define i1 @zext_sext_add_icmp_slt_1(i1 %a, i1 %b) { define i1 @zext_sext_add_icmp_ugt_1(i1 %a, i1 %b) { ; CHECK-LABEL: @zext_sext_add_icmp_ugt_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[A:%.*]], true -; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[B:%.*]] -; CHECK-NEXT: ret i1 [[R]] +; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[B:%.*]], [[TMP1]] +; CHECK-NEXT: ret i1 [[TMP2]] ; %zext.a = zext i1 %a to i8 %sext.b = sext i1 %b to i8 @@ -1018,7 +1018,7 @@ define i1 @zext_sext_add_icmp_ugt_1(i1 %a, i1 %b) { define <2 x i1> @vector_zext_sext_add_icmp_slt_1(<2 x i1> %a, <2 x i1> %b) { ; CHECK-LABEL: @vector_zext_sext_add_icmp_slt_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[A:%.*]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[B:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; %zext.a = zext <2 x i1> %a to <2 x i8> @@ -1601,7 +1601,7 @@ define i1 @icmp_ne_sext_sgt_zero_nofold(i32 %a) { ; CHECK-LABEL: @icmp_ne_sext_sgt_zero_nofold( ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[A:%.*]], 0 ; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32 -; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[CONV]], [[A]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP1]] ; %cmp = icmp sgt i32 %a, 0 @@ -1614,7 +1614,7 @@ define i1 @icmp_slt_sext_ne_zero_nofold(i32 %a) { ; CHECK-LABEL: @icmp_slt_sext_ne_zero_nofold( ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 0 ; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32 -; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[CONV]], [[A]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[A]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP1]] ; %cmp = icmp ne i32 %a, 0 @@ -1627,7 +1627,7 @@ define i1 @icmp_ne_sext_slt_allones_nofold(i32 %a) { ; CHECK-LABEL: @icmp_ne_sext_slt_allones_nofold( ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A:%.*]], -1 ; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32 -; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[CONV]], [[A]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP1]] ; %cmp = icmp slt i32 %a, -1 @@ -1640,7 +1640,7 @@ define i1 @icmp_slt_sext_ne_allones_nofold(i32 %a) { ; CHECK-LABEL: @icmp_slt_sext_ne_allones_nofold( ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], -1 ; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32 -; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[CONV]], [[A]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[A]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP1]] ; %cmp = icmp ne i32 %a, -1 @@ -1653,7 +1653,7 @@ define i1 @icmp_ne_sext_slt_otherwise_nofold(i32 %a) { ; CHECK-LABEL: @icmp_ne_sext_slt_otherwise_nofold( ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A:%.*]], 2 ; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32 -; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[CONV]], [[A]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP1]] ; %cmp = icmp slt i32 %a, 2 @@ -1666,7 +1666,7 @@ define i1 @icmp_slt_sext_ne_otherwise_nofold(i32 %a) { ; CHECK-LABEL: @icmp_slt_sext_ne_otherwise_nofold( ; CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[A:%.*]], 2 ; CHECK-NEXT: [[CONV:%.*]] = sext i1 [[CMP]] to i32 -; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[CONV]], [[A]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[A]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP1]] ; %cmp = icmp ne i32 %a, 2 diff --git a/llvm/test/Transforms/InstCombine/icmp-rotate.ll b/llvm/test/Transforms/InstCombine/icmp-rotate.ll index 2580bb6a865c7..eeaa1c7861097 100644 --- a/llvm/test/Transforms/InstCombine/icmp-rotate.ll +++ b/llvm/test/Transforms/InstCombine/icmp-rotate.ll @@ -213,7 +213,7 @@ define i1 @amounts_mismatch(i8 %x, i8 %y, i8 %z, i8 %w) { ; CHECK-LABEL: @amounts_mismatch( ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 [[Z:%.*]], [[W:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[TMP1]]) -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i1 [[R]] ; %f = tail call i8 @llvm.fshl.i8(i8 %x, i8 %x, i8 %z) diff --git a/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll b/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll index 8d393a7ae28c9..d23634f8caf55 100644 --- a/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll +++ b/llvm/test/Transforms/InstCombine/icmp-select-implies-common-op.ll @@ -4,9 +4,9 @@ define i1 @sgt_3_impliesF_eq_2(i8 %x, i8 %y) { ; CHECK-LABEL: @sgt_3_impliesF_eq_2( ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 4 -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[SEL:%.*]], [[X]] -; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false -; CHECK-NEXT: ret i1 [[CMP3]] +; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i8 [[Y:%.*]], [[X]] +; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP]], i1 [[CMP21]], i1 false +; CHECK-NEXT: ret i1 [[CMP2]] ; %cmp = icmp sgt i8 %x, 3 %sel = select i1 %cmp, i8 2, i8 %y @@ -17,9 +17,9 @@ define i1 @sgt_3_impliesF_eq_2(i8 %x, i8 %y) { define i1 @sgt_3_impliesT_sgt_2(i8 %x, i8 %y) { ; CHECK-LABEL: @sgt_3_impliesT_sgt_2( ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], 4 -; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i8 [[SEL:%.*]], [[X]] -; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false -; CHECK-NEXT: ret i1 [[CMP3]] +; CHECK-NEXT: [[CMP21:%.*]] = icmp sgt i8 [[Y:%.*]], [[X]] +; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP]], i1 [[CMP21]], i1 false +; CHECK-NEXT: ret i1 [[CMP2]] ; %cmp = icmp sgt i8 %x, 3 %sel = select i1 %cmp, i8 2, i8 %y @@ -30,9 +30,9 @@ define i1 @sgt_3_impliesT_sgt_2(i8 %x, i8 %y) { define i1 @sgt_x_impliesF_eq_smin_todo(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @sgt_x_impliesF_eq_smin_todo( ; CHECK-NEXT: [[CMP:%.*]] = icmp sle i8 [[X:%.*]], [[Z:%.*]] -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[SEL:%.*]], [[X]] -; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false -; CHECK-NEXT: ret i1 [[CMP3]] +; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i8 [[Y:%.*]], [[X]] +; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP]], i1 [[CMP21]], i1 false +; CHECK-NEXT: ret i1 [[CMP2]] ; %cmp = icmp sgt i8 %x, %z %sel = select i1 %cmp, i8 -128, i8 %y @@ -43,9 +43,9 @@ define i1 @sgt_x_impliesF_eq_smin_todo(i8 %x, i8 %y, i8 %z) { define i1 @slt_x_impliesT_ne_smin_todo(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @slt_x_impliesT_ne_smin_todo( ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[X:%.*]], [[Z:%.*]] -; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[SEL:%.*]], [[X]] -; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 true, i1 [[CMP2]] -; CHECK-NEXT: ret i1 [[CMP3]] +; CHECK-NEXT: [[CMP21:%.*]] = icmp ne i8 [[Y:%.*]], [[X]] +; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP]], i1 true, i1 [[CMP21]] +; CHECK-NEXT: ret i1 [[CMP2]] ; %cmp = icmp slt i8 %x, %z %sel = select i1 %cmp, i8 127, i8 %y @@ -56,9 +56,9 @@ define i1 @slt_x_impliesT_ne_smin_todo(i8 %x, i8 %y, i8 %z) { define i1 @ult_x_impliesT_eq_umax_todo(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @ult_x_impliesT_eq_umax_todo( ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[Z:%.*]], [[X:%.*]] -; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[SEL:%.*]], [[X]] -; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 true, i1 [[CMP2]] -; CHECK-NEXT: ret i1 [[CMP3]] +; CHECK-NEXT: [[CMP21:%.*]] = icmp ne i8 [[Y:%.*]], [[X]] +; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP]], i1 true, i1 [[CMP21]] +; CHECK-NEXT: ret i1 [[CMP2]] ; %cmp = icmp ugt i8 %z, %x %sel = select i1 %cmp, i8 255, i8 %y @@ -68,10 +68,10 @@ define i1 @ult_x_impliesT_eq_umax_todo(i8 %x, i8 %y, i8 %z) { define i1 @ult_1_impliesF_eq_1(i8 %x, i8 %y) { ; CHECK-LABEL: @ult_1_impliesF_eq_1( -; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[SEL:%.*]], 0 -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[X:%.*]], [[SEL]] -; CHECK-NEXT: [[CMP3:%.*]] = select i1 [[CMP]], i1 [[CMP2]], i1 false -; CHECK-NEXT: ret i1 [[CMP3]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ne i8 [[X:%.*]], 0 +; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i8 [[Y:%.*]], [[X]] +; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP]], i1 [[CMP21]], i1 false +; CHECK-NEXT: ret i1 [[CMP2]] ; %cmp = icmp ult i8 %x, 1 %sel = select i1 %cmp, i8 1, i8 %y @@ -83,7 +83,7 @@ define i1 @ugt_x_impliesF_eq_umin_todo(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @ugt_x_impliesF_eq_umin_todo( ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i8 [[Z:%.*]], [[X:%.*]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 0, i8 [[Y:%.*]] -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[SEL]], [[X]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i8 [[X]], [[SEL]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %cmp = icmp ugt i8 %z, %x diff --git a/llvm/test/Transforms/InstCombine/icmp-select.ll b/llvm/test/Transforms/InstCombine/icmp-select.ll index 59d2a1b165c0f..fb68c6ee94207 100644 --- a/llvm/test/Transforms/InstCombine/icmp-select.ll +++ b/llvm/test/Transforms/InstCombine/icmp-select.ll @@ -35,7 +35,7 @@ define i1 @icmp_select_var_commuted(i8 %x, i8 %y, i8 %_z) { ; CHECK-LABEL: @icmp_select_var_commuted( ; CHECK-NEXT: [[Z:%.*]] = udiv i8 42, [[_Z:%.*]] ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[X:%.*]], 0 -; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i8 [[Z]], [[Y:%.*]] +; CHECK-NEXT: [[CMP21:%.*]] = icmp eq i8 [[Y:%.*]], [[Z]] ; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP21]] ; CHECK-NEXT: ret i1 [[CMP2]] ; @@ -122,7 +122,7 @@ define i1 @icmp_select_var_pred_ult(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @icmp_select_var_pred_ult( ; CHECK-NEXT: [[Z1:%.*]] = add nuw i8 [[Z:%.*]], 2 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[X:%.*]], 0 -; CHECK-NEXT: [[CMP21:%.*]] = icmp ugt i8 [[Z1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP21:%.*]] = icmp ult i8 [[Y:%.*]], [[Z1]] ; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP21]] ; CHECK-NEXT: ret i1 [[CMP2]] ; @@ -137,7 +137,7 @@ define i1 @icmp_select_var_pred_uge(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @icmp_select_var_pred_uge( ; CHECK-NEXT: [[Z1:%.*]] = add nuw i8 [[Z:%.*]], 2 ; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i8 [[X:%.*]], 0 -; CHECK-NEXT: [[CMP21:%.*]] = icmp ule i8 [[Z1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP21:%.*]] = icmp uge i8 [[Y:%.*]], [[Z1]] ; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP1]], i1 [[CMP21]], i1 false ; CHECK-NEXT: ret i1 [[CMP2]] ; @@ -152,7 +152,7 @@ define i1 @icmp_select_var_pred_uge_commuted(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @icmp_select_var_pred_uge_commuted( ; CHECK-NEXT: [[Z1:%.*]] = add nuw i8 [[Z:%.*]], 2 ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[X:%.*]], 0 -; CHECK-NEXT: [[CMP21:%.*]] = icmp uge i8 [[Z1]], [[Y:%.*]] +; CHECK-NEXT: [[CMP21:%.*]] = icmp ule i8 [[Y:%.*]], [[Z1]] ; CHECK-NEXT: [[CMP2:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP21]] ; CHECK-NEXT: ret i1 [[CMP2]] ; diff --git a/llvm/test/Transforms/InstCombine/icmp-sub.ll b/llvm/test/Transforms/InstCombine/icmp-sub.ll index 5645dededf2e4..8cb3c1c181cec 100644 --- a/llvm/test/Transforms/InstCombine/icmp-sub.ll +++ b/llvm/test/Transforms/InstCombine/icmp-sub.ll @@ -622,7 +622,7 @@ define i1 @PR60818_eq_multi_use(i32 %a) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[A:%.*]] ; CHECK-NEXT: call void @use(i32 [[SUB]]) -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[SUB]], [[A]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A]], [[SUB]] ; CHECK-NEXT: ret i1 [[CMP]] ; entry: @@ -637,7 +637,7 @@ define i1 @PR60818_sgt(i32 %a) { ; CHECK-LABEL: @PR60818_sgt( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[A:%.*]] -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[SUB]], [[A]] +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[A]], [[SUB]] ; CHECK-NEXT: ret i1 [[CMP]] ; entry: diff --git a/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll b/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll index 27b02c8c6e936..ba47ed02edbdf 100644 --- a/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll +++ b/llvm/test/Transforms/InstCombine/icmp-uge-of-not-of-shl-allones-by-bits-and-val-to-icmp-eq-of-lshr-val-by-bits-and-0.ll @@ -138,7 +138,7 @@ define i1 @oneuse1(i8 %val, i8 %bits) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[BITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T1]], [[VAL:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[VAL:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = shl i8 -1, %bits @@ -154,7 +154,7 @@ define i1 @oneuse2(i8 %val, i8 %bits) { ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T1]], [[VAL:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[VAL:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = shl i8 -1, %bits @@ -173,7 +173,7 @@ define i1 @n0(i8 %val, i8 %bits) { ; CHECK-LABEL: @n0( ; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[BITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T1]], [[VAL:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[VAL:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = shl i8 1, %bits ; constant is not -1 @@ -199,7 +199,7 @@ define <2 x i1> @n2_vec_nonsplat(<2 x i8> %val, <2 x i8> %bits) { ; CHECK-LABEL: @n2_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = shl <2 x i8> , [[BITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor <2 x i8> [[T0]], -; CHECK-NEXT: [[R:%.*]] = icmp uge <2 x i8> [[T1]], [[VAL:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule <2 x i8> [[VAL:%.*]], [[T1]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; %t0 = shl <2 x i8> , %bits ; again, wrong constant @@ -225,7 +225,7 @@ define i1 @n3(i8 %val, i8 %bits) { ; CHECK-LABEL: @n3( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[BITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[T1]], [[VAL:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[VAL:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = shl i8 -1, %bits diff --git a/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll b/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll index 8441033d4857e..37aa85202e562 100644 --- a/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll +++ b/llvm/test/Transforms/InstCombine/icmp-ult-of-not-of-shl-allones-by-bits-and-val-to-icmp-ne-of-lshr-val-by-bits-and-0.ll @@ -138,7 +138,7 @@ define i1 @oneuse1(i8 %val, i8 %bits) { ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[BITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T1]], [[VAL:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[VAL:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = shl i8 -1, %bits @@ -154,7 +154,7 @@ define i1 @oneuse2(i8 %val, i8 %bits) { ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 ; CHECK-NEXT: call void @use8(i8 [[T1]]) -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T1]], [[VAL:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[VAL:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = shl i8 -1, %bits @@ -173,7 +173,7 @@ define i1 @n0(i8 %val, i8 %bits) { ; CHECK-LABEL: @n0( ; CHECK-NEXT: [[T0:%.*]] = shl nuw i8 1, [[BITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T1]], [[VAL:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[VAL:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = shl i8 1, %bits ; constant is not -1 @@ -199,7 +199,7 @@ define <2 x i1> @n2_vec_nonsplat(<2 x i8> %val, <2 x i8> %bits) { ; CHECK-LABEL: @n2_vec_nonsplat( ; CHECK-NEXT: [[T0:%.*]] = shl <2 x i8> , [[BITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor <2 x i8> [[T0]], -; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i8> [[T1]], [[VAL:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt <2 x i8> [[VAL:%.*]], [[T1]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; %t0 = shl <2 x i8> , %bits ; again, wrong constant @@ -225,7 +225,7 @@ define i1 @n3(i8 %val, i8 %bits) { ; CHECK-LABEL: @n3( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i8 -1, [[BITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[T1]], [[VAL:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[VAL:%.*]], [[T1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = shl i8 -1, %bits diff --git a/llvm/test/Transforms/InstCombine/icmp.ll b/llvm/test/Transforms/InstCombine/icmp.ll index 8fc4a40141931..e492055fea8b8 100644 --- a/llvm/test/Transforms/InstCombine/icmp.ll +++ b/llvm/test/Transforms/InstCombine/icmp.ll @@ -581,7 +581,7 @@ define i1 @test28_extra_uses(i32 %x, i32 %y, i32 %z) { define i1 @ugt_sub(i32 %xsrc, i32 %y) { ; CHECK-LABEL: @ugt_sub( ; CHECK-NEXT: [[X:%.*]] = udiv i32 [[XSRC:%.*]], 42 -; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP]] ; %x = udiv i32 %xsrc, 42 ; thwart complexity-based canonicalization @@ -1266,7 +1266,7 @@ define i1 @test62_as1(ptr addrspace(1) %a) { define i1 @low_mask_eq_zext(i8 %a, i32 %b) { ; CHECK-LABEL: @low_mask_eq_zext( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B:%.*]] to i8 -; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %z = zext i8 %a to i32 @@ -1278,7 +1278,7 @@ define i1 @low_mask_eq_zext(i8 %a, i32 %b) { define i1 @low_mask_eq_zext_commute(i8 %a, i32 %b) { ; CHECK-LABEL: @low_mask_eq_zext_commute( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B:%.*]] to i8 -; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %t = and i32 %b, 255 @@ -1322,7 +1322,7 @@ define i1 @low_mask_eq_zext_use1(i8 %a, i32 %b) { ; CHECK-NEXT: [[T:%.*]] = and i32 [[B:%.*]], 255 ; CHECK-NEXT: call void @use_i32(i32 [[T]]) ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B]] to i8 -; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %t = and i32 %b, 255 @@ -1337,7 +1337,7 @@ define i1 @low_mask_eq_zext_use2(i8 %a, i32 %b) { ; CHECK-NEXT: [[Z:%.*]] = zext i8 [[A:%.*]] to i32 ; CHECK-NEXT: call void @use_i32(i32 [[Z]]) ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[B:%.*]] to i8 -; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[TMP1]], [[A]] +; CHECK-NEXT: [[C:%.*]] = icmp eq i8 [[A]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %t = and i32 %b, 255 @@ -1367,7 +1367,7 @@ define i1 @low_mask_eq_zext_use3(i8 %a, i32 %b) { define <2 x i1> @low_mask_eq_zext_vec_splat(<2 x i8> %a, <2 x i32> %b) { ; CHECK-LABEL: @low_mask_eq_zext_vec_splat( ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[B:%.*]] to <2 x i8> -; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i8> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i8> [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[C]] ; %t = and <2 x i32> %b, @@ -1769,7 +1769,7 @@ define i1 @icmp_mul0_ne0(i32 %x) { define i1 @icmp_add20_eq_add57(i32 %x, i32 %y) { ; CHECK-LABEL: @icmp_add20_eq_add57( ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[Y:%.*]], 37 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %1 = add i32 %x, 20 @@ -1781,7 +1781,7 @@ define i1 @icmp_add20_eq_add57(i32 %x, i32 %y) { define <2 x i1> @icmp_add20_eq_add57_splat(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @icmp_add20_eq_add57_splat( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %1 = add <2 x i32> %x, @@ -1793,7 +1793,7 @@ define <2 x i1> @icmp_add20_eq_add57_splat(<2 x i32> %x, <2 x i32> %y) { define <2 x i1> @icmp_add20_eq_add57_poison(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @icmp_add20_eq_add57_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %1 = add <2 x i32> %x, @@ -1805,7 +1805,7 @@ define <2 x i1> @icmp_add20_eq_add57_poison(<2 x i32> %x, <2 x i32> %y) { define <2 x i1> @icmp_add20_eq_add57_vec_nonsplat(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @icmp_add20_eq_add57_vec_nonsplat( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x i32> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %1 = add <2 x i32> %x, @@ -1853,7 +1853,7 @@ define <2 x i1> @icmp_sub57_ne_sub20_vec_poison(<2 x i32> %x, <2 x i32> %y) { define <2 x i1> @icmp_sub57_ne_sub20_vec_nonsplat(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @icmp_sub57_ne_sub20_vec_nonsplat( ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ne <2 x i32> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %1 = add <2 x i32> %x, @@ -1905,7 +1905,7 @@ define i1 @icmp_add1_sle(i32 %x, i32 %y) { define i1 @icmp_add20_sge_add57(i32 %x, i32 %y) { ; CHECK-LABEL: @icmp_add20_sge_add57( ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i32 [[Y:%.*]], 37 -; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[CMP]] ; %1 = add nsw i32 %x, 20 @@ -1917,7 +1917,7 @@ define i1 @icmp_add20_sge_add57(i32 %x, i32 %y) { define <2 x i1> @icmp_add20_sge_add57_splat(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @icmp_add20_sge_add57_splat( ; CHECK-NEXT: [[TMP1:%.*]] = add nsw <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[CMP:%.*]] = icmp sle <2 x i32> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sge <2 x i32> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %1 = add nsw <2 x i32> %x, @@ -1929,7 +1929,7 @@ define <2 x i1> @icmp_add20_sge_add57_splat(<2 x i32> %x, <2 x i32> %y) { define <2 x i1> @icmp_add20_sge_add57_poison(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @icmp_add20_sge_add57_poison( ; CHECK-NEXT: [[TMP1:%.*]] = add nsw <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[CMP:%.*]] = icmp sle <2 x i32> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sge <2 x i32> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %1 = add nsw <2 x i32> %x, @@ -3192,7 +3192,7 @@ define i1 @icmp_and_or_lshr(i32 %x, i32 %y) { ; CHECK-LABEL: @icmp_and_or_lshr( ; CHECK-NEXT: [[SHF1:%.*]] = shl nuw i32 1, [[Y:%.*]] ; CHECK-NEXT: [[OR2:%.*]] = or i32 [[SHF1]], 1 -; CHECK-NEXT: [[AND3:%.*]] = and i32 [[OR2]], [[X:%.*]] +; CHECK-NEXT: [[AND3:%.*]] = and i32 [[X:%.*]], [[OR2]] ; CHECK-NEXT: [[RET:%.*]] = icmp ne i32 [[AND3]], 0 ; CHECK-NEXT: ret i1 [[RET]] ; @@ -3634,7 +3634,7 @@ define i1 @f10(i16 %p) { define i1 @cmp_sgt_rhs_dec(float %x, i32 %i) { ; CHECK-LABEL: @cmp_sgt_rhs_dec( ; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[CONV]], [[I:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[I:%.*]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP]] ; %conv = fptosi float %x to i32 @@ -3646,7 +3646,7 @@ define i1 @cmp_sgt_rhs_dec(float %x, i32 %i) { define i1 @cmp_sle_rhs_dec(float %x, i32 %i) { ; CHECK-LABEL: @cmp_sle_rhs_dec( ; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[CONV]], [[I:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[I:%.*]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP]] ; %conv = fptosi float %x to i32 @@ -3658,7 +3658,7 @@ define i1 @cmp_sle_rhs_dec(float %x, i32 %i) { define i1 @cmp_sge_rhs_inc(float %x, i32 %i) { ; CHECK-LABEL: @cmp_sge_rhs_inc( ; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[CONV]], [[I:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I:%.*]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP]] ; %conv = fptosi float %x to i32 @@ -3670,7 +3670,7 @@ define i1 @cmp_sge_rhs_inc(float %x, i32 %i) { define i1 @cmp_slt_rhs_inc(float %x, i32 %i) { ; CHECK-LABEL: @cmp_slt_rhs_inc( ; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp sle i32 [[CONV]], [[I:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp sge i32 [[I:%.*]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP]] ; %conv = fptosi float %x to i32 @@ -3823,7 +3823,7 @@ define i1 @icmp_add1_ule(i32 %x, i32 %y) { define i1 @cmp_uge_rhs_inc(float %x, i32 %i) { ; CHECK-LABEL: @cmp_uge_rhs_inc( ; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[CONV]], [[I:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[I:%.*]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP]] ; %conv = fptosi float %x to i32 @@ -3835,7 +3835,7 @@ define i1 @cmp_uge_rhs_inc(float %x, i32 %i) { define i1 @cmp_ult_rhs_inc(float %x, i32 %i) { ; CHECK-LABEL: @cmp_ult_rhs_inc( ; CHECK-NEXT: [[CONV:%.*]] = fptosi float [[X:%.*]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp ule i32 [[CONV]], [[I:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp uge i32 [[I:%.*]], [[CONV]] ; CHECK-NEXT: ret i1 [[CMP]] ; %conv = fptosi float %x to i32 @@ -4655,7 +4655,7 @@ define <2 x i1> @zext_bool_and_eq1(<2 x i1> %x, <2 x i8> %y) { define i1 @zext_bool_or_eq0(i1 %x, i8 %y) { ; CHECK-LABEL: @zext_bool_or_eq0( ; CHECK-NEXT: [[ZX:%.*]] = zext i1 [[X:%.*]] to i8 -; CHECK-NEXT: [[A:%.*]] = or i8 [[ZX]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = or i8 [[Y:%.*]], [[ZX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[A]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -4671,7 +4671,7 @@ define i1 @zext_bool_and_eq0_use(i1 %x, i64 %y) { ; CHECK-LABEL: @zext_bool_and_eq0_use( ; CHECK-NEXT: [[ZX:%.*]] = zext i1 [[X:%.*]] to i64 ; CHECK-NEXT: call void @use_i64(i64 [[ZX]]) -; CHECK-NEXT: [[A:%.*]] = and i64 [[ZX]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = and i64 [[Y:%.*]], [[ZX]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i64 [[A]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -4704,7 +4704,7 @@ define i1 @zext_bool_and_ne0_use(i1 %x, i64 %y) { define i1 @zext_notbool_and_ne0(i2 %x, i8 %y) { ; CHECK-LABEL: @zext_notbool_and_ne0( ; CHECK-NEXT: [[ZX:%.*]] = zext i2 [[X:%.*]] to i8 -; CHECK-NEXT: [[A:%.*]] = and i8 [[ZX]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = and i8 [[Y:%.*]], [[ZX]] ; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[A]], 0 ; CHECK-NEXT: ret i1 [[R]] ; @@ -5055,7 +5055,7 @@ define i1 @or_positive_sgt_zero_multi_use(i8 %a) { define i1 @disjoint_or_sgt_1(i8 %a, i8 %b) { ; CHECK-LABEL: @disjoint_or_sgt_1( ; CHECK-NEXT: [[B1:%.*]] = add nsw i8 [[B:%.*]], 2 -; CHECK-NEXT: [[ICMP_:%.*]] = icmp sle i8 [[B1]], [[A:%.*]] +; CHECK-NEXT: [[ICMP_:%.*]] = icmp sge i8 [[A:%.*]], [[B1]] ; CHECK-NEXT: ret i1 [[ICMP_]] ; %a1 = or disjoint i8 %a, 1 @@ -5093,7 +5093,7 @@ define i1 @disjoint_or_sgt_3(i8 %a, i8 %b) { define i1 @disjoint_or_ugt_1(i8 %a, i8 %b) { ; CHECK-LABEL: @disjoint_or_ugt_1( ; CHECK-NEXT: [[B1:%.*]] = add nsw i8 [[B:%.*]], 2 -; CHECK-NEXT: [[ICMP_:%.*]] = icmp ule i8 [[B1]], [[A:%.*]] +; CHECK-NEXT: [[ICMP_:%.*]] = icmp uge i8 [[A:%.*]], [[B1]] ; CHECK-NEXT: ret i1 [[ICMP_]] ; %a1 = or disjoint i8 %a, 1 @@ -5146,7 +5146,7 @@ define i1 @deduce_nuw_flag_2(i8 %a, i8 %b) { ; CHECK-LABEL: @deduce_nuw_flag_2( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = add nuw i8 [[B:%.*]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP0]], [[A:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[CMP]] ; entry: @@ -5174,7 +5174,7 @@ define i1 @dont_deduce_nuw_flag_2(i8 %a, i8 %b) { ; CHECK-LABEL: @dont_deduce_nuw_flag_2( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = add i8 [[B:%.*]], -1 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[TMP0]], [[A:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[TMP0]] ; CHECK-NEXT: ret i1 [[CMP]] ; entry: diff --git a/llvm/test/Transforms/InstCombine/implies.ll b/llvm/test/Transforms/InstCombine/implies.ll index c02d84d3f8371..047b2aa816e0b 100644 --- a/llvm/test/Transforms/InstCombine/implies.ll +++ b/llvm/test/Transforms/InstCombine/implies.ll @@ -137,7 +137,7 @@ F: define i1 @src_or_distjoint_implies_sle_fail(i8 %x, i8 %y, i1 %other) { ; CHECK-LABEL: @src_or_distjoint_implies_sle_fail( ; CHECK-NEXT: [[X2:%.*]] = or disjoint i8 [[X:%.*]], 24 -; CHECK-NEXT: [[COND_NOT:%.*]] = icmp slt i8 [[X2]], [[Y:%.*]] +; CHECK-NEXT: [[COND_NOT:%.*]] = icmp sgt i8 [[Y:%.*]], [[X2]] ; CHECK-NEXT: br i1 [[COND_NOT]], label [[F:%.*]], label [[T:%.*]] ; CHECK: T: ; CHECK-NEXT: [[X1:%.*]] = or disjoint i8 [[X]], 23 @@ -268,7 +268,7 @@ F: define i1 @src_or_implies_ule(i8 %x, i8 %y, i8 %z, i1 %other) { ; CHECK-LABEL: @src_or_implies_ule( ; CHECK-NEXT: [[OR:%.*]] = or i8 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[COND_NOT:%.*]] = icmp ugt i8 [[OR]], [[Z:%.*]] +; CHECK-NEXT: [[COND_NOT:%.*]] = icmp ult i8 [[Z:%.*]], [[OR]] ; CHECK-NEXT: br i1 [[COND_NOT]], label [[F:%.*]], label [[T:%.*]] ; CHECK: T: ; CHECK-NEXT: ret i1 true diff --git a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll index fff05a416dece..abb36b6a785e5 100644 --- a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll +++ b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-scalar.ll @@ -53,7 +53,7 @@ define i4 @in_constant_varx_6_invmask(i4 %x, i4 %mask) { define i4 @in_constant_mone_vary_invmask(i4 %y, i4 %mask) { ; CHECK-LABEL: @in_constant_mone_vary_invmask( ; CHECK-NEXT: [[MASK_NOT:%.*]] = xor i4 [[MASK:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = or i4 [[MASK_NOT]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = or i4 [[Y:%.*]], [[MASK_NOT]] ; CHECK-NEXT: ret i4 [[R]] ; %notmask = xor i4 %mask, -1 diff --git a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll index a76662c4bc439..0440199dadb87 100644 --- a/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll +++ b/llvm/test/Transforms/InstCombine/invert-variable-mask-in-masked-merge-vector.ll @@ -95,7 +95,7 @@ define <3 x i4> @in_constant_varx_6_invmask_poison(<3 x i4> %x, <3 x i4> %mask) define <2 x i4> @in_constant_mone_vary_invmask(<2 x i4> %y, <2 x i4> %mask) { ; CHECK-LABEL: @in_constant_mone_vary_invmask( ; CHECK-NEXT: [[MASK_NOT:%.*]] = xor <2 x i4> [[MASK:%.*]], -; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[MASK_NOT]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = or <2 x i4> [[Y:%.*]], [[MASK_NOT]] ; CHECK-NEXT: ret <2 x i4> [[R]] ; %notmask = xor <2 x i4> %mask, diff --git a/llvm/test/Transforms/InstCombine/ispow2.ll b/llvm/test/Transforms/InstCombine/ispow2.ll index 3f2c31d05f3ed..c21ad95f83a1c 100644 --- a/llvm/test/Transforms/InstCombine/ispow2.ll +++ b/llvm/test/Transforms/InstCombine/ispow2.ll @@ -161,7 +161,7 @@ define i1 @is_pow2or0_negate_op_extra_use1(i32 %x) { define i1 @is_pow2or0_negate_op_extra_use2(i32 %x) { ; CHECK-LABEL: @is_pow2or0_negate_op_extra_use2( ; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i32 [[NEG]], [[X]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X]], [[NEG]] ; CHECK-NEXT: call void @use(i32 [[AND]]) ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], [[X]] ; CHECK-NEXT: ret i1 [[CMP]] @@ -1190,7 +1190,7 @@ define <2 x i1> @isnot_pow2nor0_wrong_pred3_ctpop_commute_vec(<2 x i8> %x) { define i1 @is_pow2_fail_pr63327(i32 %x) { ; CHECK-LABEL: @is_pow2_fail_pr63327( ; CHECK-NEXT: [[NX:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[X_AND_NX:%.*]] = and i32 [[NX]], [[X]] +; CHECK-NEXT: [[X_AND_NX:%.*]] = and i32 [[X]], [[NX]] ; CHECK-NEXT: [[R:%.*]] = icmp sge i32 [[X_AND_NX]], [[X]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -1244,7 +1244,7 @@ define i1 @blsmsk_is_p2_or_z_fail(i32 %xx, i32 %yy) { define i1 @blsmsk_isnt_p2_or_z_fail(i32 %x) { ; CHECK-LABEL: @blsmsk_isnt_p2_or_z_fail( ; CHECK-NEXT: [[XM1:%.*]] = add i32 [[X:%.*]], -1 -; CHECK-NEXT: [[Y:%.*]] = xor i32 [[XM1]], [[X]] +; CHECK-NEXT: [[Y:%.*]] = xor i32 [[X]], [[XM1]] ; CHECK-NEXT: [[R:%.*]] = icmp ule i32 [[Y]], [[X]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -1259,7 +1259,7 @@ declare void @use.i32(i32) define i1 @blsmsk_isnt_p2_or_z_fail_multiuse(i32 %x) { ; CHECK-LABEL: @blsmsk_isnt_p2_or_z_fail_multiuse( ; CHECK-NEXT: [[XM1:%.*]] = add i32 [[X:%.*]], -1 -; CHECK-NEXT: [[Y:%.*]] = xor i32 [[XM1]], [[X]] +; CHECK-NEXT: [[Y:%.*]] = xor i32 [[X]], [[XM1]] ; CHECK-NEXT: call void @use.i32(i32 [[Y]]) ; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[Y]], [[X]] ; CHECK-NEXT: ret i1 [[R]] @@ -1274,7 +1274,7 @@ define i1 @blsmsk_isnt_p2_or_z_fail_multiuse(i32 %x) { define i1 @blsmsk_isnt_p2_or_z_fail_wrong_add(i32 %x, i32 %z) { ; CHECK-LABEL: @blsmsk_isnt_p2_or_z_fail_wrong_add( ; CHECK-NEXT: [[XM1:%.*]] = add i32 [[Z:%.*]], -1 -; CHECK-NEXT: [[Y:%.*]] = xor i32 [[XM1]], [[X:%.*]] +; CHECK-NEXT: [[Y:%.*]] = xor i32 [[X:%.*]], [[XM1]] ; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[Y]], [[X]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -1288,7 +1288,7 @@ define i1 @blsmsk_isnt_p2_or_z_fail_wrong_add(i32 %x, i32 %z) { define i1 @blsmsk_isnt_p2_or_z_fail_bad_xor(i32 %x, i32 %z) { ; CHECK-LABEL: @blsmsk_isnt_p2_or_z_fail_bad_xor( ; CHECK-NEXT: [[XM1:%.*]] = add i32 [[X:%.*]], -1 -; CHECK-NEXT: [[Y:%.*]] = xor i32 [[XM1]], [[Z:%.*]] +; CHECK-NEXT: [[Y:%.*]] = xor i32 [[Z:%.*]], [[XM1]] ; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[Y]], [[X]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -1302,7 +1302,7 @@ define i1 @blsmsk_isnt_p2_or_z_fail_bad_xor(i32 %x, i32 %z) { define i1 @blsmsk_is_p2_or_z_fail_bad_cmp(i32 %x, i32 %z) { ; CHECK-LABEL: @blsmsk_is_p2_or_z_fail_bad_cmp( ; CHECK-NEXT: [[XM1:%.*]] = add i32 [[X:%.*]], -1 -; CHECK-NEXT: [[Y:%.*]] = xor i32 [[XM1]], [[X]] +; CHECK-NEXT: [[Y:%.*]] = xor i32 [[X]], [[XM1]] ; CHECK-NEXT: [[R:%.*]] = icmp uge i32 [[Y]], [[Z:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/known-bits.ll b/llvm/test/Transforms/InstCombine/known-bits.ll index c7445a6ce2fe2..3482a8e975992 100644 --- a/llvm/test/Transforms/InstCombine/known-bits.ll +++ b/llvm/test/Transforms/InstCombine/known-bits.ll @@ -1018,7 +1018,7 @@ define i1 @extract_value_sadd_fail(i8 %xx, i8 %yy) { define i1 @extract_value_usub(i8 %x, i8 %zz) { ; CHECK-LABEL: @extract_value_usub( ; CHECK-NEXT: [[Z:%.*]] = add nuw i8 [[ZZ:%.*]], 1 -; CHECK-NEXT: [[Y:%.*]] = add i8 [[Z]], [[X:%.*]] +; CHECK-NEXT: [[Y:%.*]] = add i8 [[X:%.*]], [[Z]] ; CHECK-NEXT: [[SUB_UOV:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[X]], i8 [[Y]]) ; CHECK-NEXT: [[SUB:%.*]] = extractvalue { i8, i1 } [[SUB_UOV]], 0 ; CHECK-NEXT: [[UOV:%.*]] = extractvalue { i8, i1 } [[SUB_UOV]], 1 @@ -1062,7 +1062,7 @@ define i1 @extract_value_usub_fail(i8 %x, i8 %z) { define i1 @extract_value_ssub(i8 %x, i8 %zz) { ; CHECK-LABEL: @extract_value_ssub( ; CHECK-NEXT: [[Z:%.*]] = add nuw i8 [[ZZ:%.*]], 1 -; CHECK-NEXT: [[Y:%.*]] = add i8 [[Z]], [[X:%.*]] +; CHECK-NEXT: [[Y:%.*]] = add i8 [[X:%.*]], [[Z]] ; CHECK-NEXT: [[SUB_SOV:%.*]] = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 [[Y]], i8 [[X]]) ; CHECK-NEXT: [[SUB:%.*]] = extractvalue { i8, i1 } [[SUB_SOV]], 0 ; CHECK-NEXT: [[SOV:%.*]] = extractvalue { i8, i1 } [[SUB_SOV]], 1 @@ -1586,7 +1586,7 @@ define i32 @test_qnan_quiet_bit2(float nofpclass(sub norm inf snan) %x) { define i16 @test_simplify_mask(i32 %ui, float %x) { ; CHECK-LABEL: @test_simplify_mask( ; CHECK-NEXT: [[CONV:%.*]] = uitofp i32 [[UI:%.*]] to float -; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[CONV]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[CONV]] ; CHECK-NEXT: br i1 [[CMP]], label [[IF_ELSE:%.*]], label [[IF_END:%.*]] ; CHECK: if.end: ; CHECK-NEXT: ret i16 31744 diff --git a/llvm/test/Transforms/InstCombine/known-never-nan.ll b/llvm/test/Transforms/InstCombine/known-never-nan.ll index 82075b37b4361..1ca24671d65c4 100644 --- a/llvm/test/Transforms/InstCombine/known-never-nan.ll +++ b/llvm/test/Transforms/InstCombine/known-never-nan.ll @@ -62,7 +62,7 @@ define i1 @nnan_fadd(double %arg0, double %arg1) { define i1 @nnan_fadd_maybe_nan_lhs(double %arg0, double %arg1) { ; CHECK-LABEL: @nnan_fadd_maybe_nan_lhs( ; CHECK-NEXT: [[NNAN_ARG1:%.*]] = fadd nnan double [[ARG1:%.*]], 1.000000e+00 -; CHECK-NEXT: [[OP:%.*]] = fadd double [[NNAN_ARG1]], [[ARG0:%.*]] +; CHECK-NEXT: [[OP:%.*]] = fadd double [[ARG0:%.*]], [[NNAN_ARG1]] ; CHECK-NEXT: [[TMP:%.*]] = fcmp ord double [[OP]], 0.000000e+00 ; CHECK-NEXT: ret i1 [[TMP]] ; diff --git a/llvm/test/Transforms/InstCombine/ldexp-ext.ll b/llvm/test/Transforms/InstCombine/ldexp-ext.ll index 4608553eb8874..58710005d6cce 100644 --- a/llvm/test/Transforms/InstCombine/ldexp-ext.ll +++ b/llvm/test/Transforms/InstCombine/ldexp-ext.ll @@ -4,7 +4,7 @@ define float @ldexp_zext_float(float %x, i1 %bool) { ; CHECK-LABEL: @ldexp_zext_float( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[BOOL:%.*]], float 2.000000e+00, float 1.000000e+00 -; CHECK-NEXT: [[LDEXP:%.*]] = fmul float [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[LDEXP:%.*]] = fmul float [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[LDEXP]] ; %zext = zext i1 %bool to i32 @@ -26,7 +26,7 @@ define float @ldexp_zext_float_negative(float %x, i8 %y) { define double @ldexp_zext_double(double %x, i1 %bool) { ; CHECK-LABEL: @ldexp_zext_double( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[BOOL:%.*]], double 2.000000e+00, double 1.000000e+00 -; CHECK-NEXT: [[LDEXP:%.*]] = fmul double [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[LDEXP:%.*]] = fmul double [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[LDEXP]] ; %zext = zext i1 %bool to i32 @@ -37,7 +37,7 @@ define double @ldexp_zext_double(double %x, i1 %bool) { define double @ldexp_zext_double_fast_math(double %x, i1 %bool) { ; CHECK-LABEL: @ldexp_zext_double_fast_math( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[BOOL:%.*]], double 2.000000e+00, double 1.000000e+00 -; CHECK-NEXT: [[LDEXP:%.*]] = fmul reassoc double [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[LDEXP:%.*]] = fmul reassoc double [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[LDEXP]] ; %zext = zext i1 %bool to i32 @@ -48,7 +48,7 @@ define double @ldexp_zext_double_fast_math(double %x, i1 %bool) { define <2 x float> @ldexp_zext_float_vector(<2 x float> %x, <2 x i1> %bool) { ; CHECK-LABEL: @ldexp_zext_float_vector( ; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> [[BOOL:%.*]], <2 x float> , <2 x float> -; CHECK-NEXT: [[LDEXP:%.*]] = fmul <2 x float> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[LDEXP:%.*]] = fmul <2 x float> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x float> [[LDEXP]] ; %zext = zext <2 x i1> %bool to <2 x i32> @@ -59,7 +59,7 @@ define <2 x float> @ldexp_zext_float_vector(<2 x float> %x, <2 x i1> %bool) { define float @ldexp_sext_float(float %x, i1 %bool) { ; CHECK-LABEL: @ldexp_sext_float( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[BOOL:%.*]], float 5.000000e-01, float 1.000000e+00 -; CHECK-NEXT: [[LDEXP:%.*]] = fmul float [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[LDEXP:%.*]] = fmul float [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[LDEXP]] ; %sext = sext i1 %bool to i32 @@ -81,7 +81,7 @@ define float @ldexp_sext_float_negative(float %x, i8 %y) { define double @ldexp_sext_double(double %x, i1 %bool) { ; CHECK-LABEL: @ldexp_sext_double( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[BOOL:%.*]], double 5.000000e-01, double 1.000000e+00 -; CHECK-NEXT: [[LDEXP:%.*]] = fmul double [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[LDEXP:%.*]] = fmul double [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[LDEXP]] ; %sext = sext i1 %bool to i32 @@ -92,7 +92,7 @@ define double @ldexp_sext_double(double %x, i1 %bool) { define double @ldexp_sext_double_fast_math(double %x, i1 %bool) { ; CHECK-LABEL: @ldexp_sext_double_fast_math( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[BOOL:%.*]], double 5.000000e-01, double 1.000000e+00 -; CHECK-NEXT: [[LDEXP:%.*]] = fmul reassoc double [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[LDEXP:%.*]] = fmul reassoc double [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret double [[LDEXP]] ; %sext = sext i1 %bool to i32 @@ -103,7 +103,7 @@ define double @ldexp_sext_double_fast_math(double %x, i1 %bool) { define <2 x float> @ldexp_sext_float_vector(<2 x float> %x, <2 x i1> %bool) { ; CHECK-LABEL: @ldexp_sext_float_vector( ; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> [[BOOL:%.*]], <2 x float> , <2 x float> -; CHECK-NEXT: [[LDEXP:%.*]] = fmul <2 x float> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[LDEXP:%.*]] = fmul <2 x float> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x float> [[LDEXP]] ; %sext = sext <2 x i1> %bool to <2 x i32> diff --git a/llvm/test/Transforms/InstCombine/log-pow.ll b/llvm/test/Transforms/InstCombine/log-pow.ll index 1dfe5c944eee7..b628e7cc57f15 100644 --- a/llvm/test/Transforms/InstCombine/log-pow.ll +++ b/llvm/test/Transforms/InstCombine/log-pow.ll @@ -4,7 +4,7 @@ define double @log_pow(double %x, double %y) { ; CHECK-LABEL: @log_pow( ; CHECK-NEXT: [[LOG1:%.*]] = call fast double @llvm.log.f64(double [[X:%.*]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul fast double [[LOG1]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul fast double [[Y:%.*]], [[LOG1]] ; CHECK-NEXT: ret double [[MUL]] ; %pow = call fast double @pow(double %x, double %y) @@ -84,7 +84,7 @@ define double @log_powi_not_fast(double %x, i32 %y) { define float @log10f_powf(float %x, float %y) { ; CHECK-LABEL: @log10f_powf( ; CHECK-NEXT: [[LOG1:%.*]] = call fast float @llvm.log10.f32(float [[X:%.*]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[LOG1]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul fast float [[Y:%.*]], [[LOG1]] ; CHECK-NEXT: ret float [[MUL]] ; %pow = call fast float @powf(float %x, float %y) @@ -95,7 +95,7 @@ define float @log10f_powf(float %x, float %y) { define <2 x double> @log2v_powv(<2 x double> %x, <2 x double> %y) { ; CHECK-LABEL: @log2v_powv( ; CHECK-NEXT: [[LOG1:%.*]] = call fast <2 x double> @llvm.log2.v2f64(<2 x double> [[X:%.*]]) -; CHECK-NEXT: [[MUL:%.*]] = fmul fast <2 x double> [[LOG1]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = fmul fast <2 x double> [[Y:%.*]], [[LOG1]] ; CHECK-NEXT: ret <2 x double> [[MUL]] ; %pow = call fast <2 x double> @llvm.pow.v2f64(<2 x double> %x, <2 x double> %y) diff --git a/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll b/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll index 20d60206ebcdf..cf0dc35032884 100644 --- a/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/logical-select-inseltpoison.ll @@ -4,8 +4,8 @@ define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: @foo( -; CHECK-NEXT: [[E:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[J:%.*]] = select i1 [[E]], i32 [[C:%.*]], i32 [[D:%.*]] +; CHECK-NEXT: [[E_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[J:%.*]] = select i1 [[E_NOT]], i32 [[C:%.*]], i32 [[D:%.*]] ; CHECK-NEXT: ret i32 [[J]] ; %e = icmp slt i32 %a, %b @@ -19,8 +19,8 @@ define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) { define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: @bar( -; CHECK-NEXT: [[E:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[J:%.*]] = select i1 [[E]], i32 [[C:%.*]], i32 [[D:%.*]] +; CHECK-NEXT: [[E_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[J:%.*]] = select i1 [[E_NOT]], i32 [[C:%.*]], i32 [[D:%.*]] ; CHECK-NEXT: ret i32 [[J]] ; %e = icmp slt i32 %a, %b @@ -34,8 +34,8 @@ define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d) { define i32 @goo(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: @goo( -; CHECK-NEXT: [[T0:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0]], i32 [[C:%.*]], i32 [[D:%.*]] +; CHECK-NEXT: [[T0_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0_NOT]], i32 [[C:%.*]], i32 [[D:%.*]] ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = icmp slt i32 %a, %b @@ -141,8 +141,8 @@ define <2 x i32> @fold_inverted_icmp_vector_preds(<2 x i32> %a, <2 x i32> %b, <2 define i32 @par(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: @par( -; CHECK-NEXT: [[T0:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0]], i32 [[C:%.*]], i32 [[D:%.*]] +; CHECK-NEXT: [[T0_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0_NOT]], i32 [[C:%.*]], i32 [[D:%.*]] ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = icmp slt i32 %a, %b @@ -343,10 +343,10 @@ define <2 x i64> @bitcast_select_multi_uses(<4 x i1> %cmp, <2 x i64> %a, <2 x i6 ; CHECK-LABEL: @bitcast_select_multi_uses( ; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP:%.*]] to <4 x i32> ; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64> -; CHECK-NEXT: [[AND1:%.*]] = and <2 x i64> [[BC1]], [[A:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and <2 x i64> [[A:%.*]], [[BC1]] ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64> ; CHECK-NEXT: [[BC2:%.*]] = xor <2 x i64> [[TMP1]], -; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[BC2]], [[B:%.*]] +; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[B:%.*]], [[BC2]] ; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[AND2]], [[AND1]] ; CHECK-NEXT: [[ADD:%.*]] = add <2 x i64> [[AND2]], [[BC2]] ; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i64> [[OR]], [[ADD]] @@ -393,7 +393,7 @@ define i1 @bools_logical(i1 %a, i1 %b, i1 %c) { define i1 @bools_multi_uses1(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @bools_multi_uses1( ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[C:%.*]], true -; CHECK-NEXT: [[AND1:%.*]] = and i1 [[NOT]], [[A:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i1 [[A:%.*]], [[NOT]] ; CHECK-NEXT: [[OR:%.*]] = select i1 [[C]], i1 [[B:%.*]], i1 [[A]] ; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[OR]], [[AND1]] ; CHECK-NEXT: ret i1 [[XOR]] diff --git a/llvm/test/Transforms/InstCombine/logical-select.ll b/llvm/test/Transforms/InstCombine/logical-select.ll index 6e2ed6bf796d0..62a63839704a4 100644 --- a/llvm/test/Transforms/InstCombine/logical-select.ll +++ b/llvm/test/Transforms/InstCombine/logical-select.ll @@ -9,8 +9,8 @@ declare void @use2(<2 x i1>) define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: @foo( -; CHECK-NEXT: [[E:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[J:%.*]] = select i1 [[E]], i32 [[C:%.*]], i32 [[D:%.*]] +; CHECK-NEXT: [[E_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[J:%.*]] = select i1 [[E_NOT]], i32 [[C:%.*]], i32 [[D:%.*]] ; CHECK-NEXT: ret i32 [[J]] ; %e = icmp slt i32 %a, %b @@ -24,8 +24,8 @@ define i32 @foo(i32 %a, i32 %b, i32 %c, i32 %d) { define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: @bar( -; CHECK-NEXT: [[E:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[J:%.*]] = select i1 [[E]], i32 [[C:%.*]], i32 [[D:%.*]] +; CHECK-NEXT: [[E_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[J:%.*]] = select i1 [[E_NOT]], i32 [[C:%.*]], i32 [[D:%.*]] ; CHECK-NEXT: ret i32 [[J]] ; %e = icmp slt i32 %a, %b @@ -39,8 +39,8 @@ define i32 @bar(i32 %a, i32 %b, i32 %c, i32 %d) { define i32 @goo(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: @goo( -; CHECK-NEXT: [[T0:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0]], i32 [[C:%.*]], i32 [[D:%.*]] +; CHECK-NEXT: [[T0_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0_NOT]], i32 [[C:%.*]], i32 [[D:%.*]] ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = icmp slt i32 %a, %b @@ -146,8 +146,8 @@ define <2 x i32> @fold_inverted_icmp_vector_preds(<2 x i32> %a, <2 x i32> %b, <2 define i32 @par(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: @par( -; CHECK-NEXT: [[T0:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0]], i32 [[C:%.*]], i32 [[D:%.*]] +; CHECK-NEXT: [[T0_NOT:%.*]] = icmp slt i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[T3:%.*]] = select i1 [[T0_NOT]], i32 [[C:%.*]], i32 [[D:%.*]] ; CHECK-NEXT: ret i32 [[T3]] ; %t0 = icmp slt i32 %a, %b @@ -348,10 +348,10 @@ define <2 x i64> @bitcast_select_multi_uses(<4 x i1> %cmp, <2 x i64> %a, <2 x i6 ; CHECK-LABEL: @bitcast_select_multi_uses( ; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i1> [[CMP:%.*]] to <4 x i32> ; CHECK-NEXT: [[BC1:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64> -; CHECK-NEXT: [[AND1:%.*]] = and <2 x i64> [[BC1]], [[A:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and <2 x i64> [[A:%.*]], [[BC1]] ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[SEXT]] to <2 x i64> ; CHECK-NEXT: [[BC2:%.*]] = xor <2 x i64> [[TMP1]], -; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[BC2]], [[B:%.*]] +; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[B:%.*]], [[BC2]] ; CHECK-NEXT: [[OR:%.*]] = or <2 x i64> [[AND2]], [[AND1]] ; CHECK-NEXT: [[ADD:%.*]] = add <2 x i64> [[AND2]], [[BC2]] ; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i64> [[OR]], [[ADD]] @@ -398,7 +398,7 @@ define i1 @bools_logical(i1 %a, i1 %b, i1 %c) { define i1 @bools_multi_uses1(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @bools_multi_uses1( ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[C:%.*]], true -; CHECK-NEXT: [[AND1:%.*]] = and i1 [[NOT]], [[A:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i1 [[A:%.*]], [[NOT]] ; CHECK-NEXT: [[OR:%.*]] = select i1 [[C]], i1 [[B:%.*]], i1 [[A]] ; CHECK-NEXT: [[XOR:%.*]] = xor i1 [[OR]], [[AND1]] ; CHECK-NEXT: ret i1 [[XOR]] @@ -766,7 +766,7 @@ define <8 x i3> @bitcast_vec_cond_commute1(<3 x i1> noundef %cond, <8 x i3> %pc, ; CHECK-NEXT: [[T9:%.*]] = bitcast <3 x i8> [[S]] to <8 x i3> ; CHECK-NEXT: [[NOTT9:%.*]] = xor <8 x i3> [[T9]], ; CHECK-NEXT: [[T11:%.*]] = and <8 x i3> [[C]], [[NOTT9]] -; CHECK-NEXT: [[T12:%.*]] = and <8 x i3> [[T9]], [[D:%.*]] +; CHECK-NEXT: [[T12:%.*]] = and <8 x i3> [[D:%.*]], [[T9]] ; CHECK-NEXT: [[R:%.*]] = or disjoint <8 x i3> [[T11]], [[T12]] ; CHECK-NEXT: ret <8 x i3> [[R]] ; @@ -831,8 +831,8 @@ define <2 x i64> @bitcast_fp_vec_cond(<2 x double> noundef %s, <2 x i64> %c, <2 ; CHECK-LABEL: @bitcast_fp_vec_cond( ; CHECK-NEXT: [[T9:%.*]] = bitcast <2 x double> [[S:%.*]] to <2 x i64> ; CHECK-NEXT: [[NOTT9:%.*]] = xor <2 x i64> [[T9]], -; CHECK-NEXT: [[T11:%.*]] = and <2 x i64> [[NOTT9]], [[C:%.*]] -; CHECK-NEXT: [[T12:%.*]] = and <2 x i64> [[T9]], [[D:%.*]] +; CHECK-NEXT: [[T11:%.*]] = and <2 x i64> [[C:%.*]], [[NOTT9]] +; CHECK-NEXT: [[T12:%.*]] = and <2 x i64> [[D:%.*]], [[T9]] ; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i64> [[T11]], [[T12]] ; CHECK-NEXT: ret <2 x i64> [[R]] ; @@ -851,8 +851,8 @@ define <2 x i64> @bitcast_int_vec_cond(i1 noundef %b, <2 x i64> %c, <2 x i64> %d ; CHECK-NEXT: [[S:%.*]] = sext i1 [[B:%.*]] to i128 ; CHECK-NEXT: [[T9:%.*]] = bitcast i128 [[S]] to <2 x i64> ; CHECK-NEXT: [[NOTT9:%.*]] = xor <2 x i64> [[T9]], -; CHECK-NEXT: [[T11:%.*]] = and <2 x i64> [[NOTT9]], [[C:%.*]] -; CHECK-NEXT: [[T12:%.*]] = and <2 x i64> [[T9]], [[D:%.*]] +; CHECK-NEXT: [[T11:%.*]] = and <2 x i64> [[C:%.*]], [[NOTT9]] +; CHECK-NEXT: [[T12:%.*]] = and <2 x i64> [[D:%.*]], [[T9]] ; CHECK-NEXT: [[R:%.*]] = or disjoint <2 x i64> [[T11]], [[T12]] ; CHECK-NEXT: ret <2 x i64> [[R]] ; @@ -1126,7 +1126,7 @@ define i1 @not_d_bools_negative_use2(i1 %c, i1 %x, i1 %y) { define i1 @logical_and_or_with_not_op(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @logical_and_or_with_not_op( ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[C:%.*]], true -; CHECK-NEXT: [[OR:%.*]] = or i1 [[NOT]], [[B:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i1 [[B:%.*]], [[NOT]] ; CHECK-NEXT: [[AND:%.*]] = select i1 [[A:%.*]], i1 [[OR]], i1 false ; CHECK-NEXT: ret i1 [[AND]] ; @@ -1217,7 +1217,7 @@ define i1 @logical_and_or_with_common_not_op_variant5(i1 %a) { define i1 @logical_or_and_with_not_op(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @logical_or_and_with_not_op( ; CHECK-NEXT: [[NOT:%.*]] = xor i1 [[C:%.*]], true -; CHECK-NEXT: [[AND:%.*]] = and i1 [[NOT]], [[B:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i1 [[B:%.*]], [[NOT]] ; CHECK-NEXT: [[OR:%.*]] = select i1 [[A:%.*]], i1 true, i1 [[AND]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -1325,9 +1325,9 @@ define i1 @reduce_logical_and2(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @reduce_logical_and2( ; CHECK-NEXT: bb: ; CHECK-NEXT: [[TMP0:%.*]] = xor i1 [[C:%.*]], true -; CHECK-NEXT: [[B:%.*]] = and i1 [[TMP0]], [[B1:%.*]] -; CHECK-NEXT: [[AND3:%.*]] = select i1 [[AND2:%.*]], i1 [[B]], i1 false -; CHECK-NEXT: ret i1 [[AND3]] +; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[B:%.*]], [[TMP0]] +; CHECK-NEXT: [[AND2:%.*]] = select i1 [[A:%.*]], i1 [[TMP1]], i1 false +; CHECK-NEXT: ret i1 [[AND2]] ; bb: %or = xor i1 %c, %b @@ -1373,9 +1373,9 @@ bb: define i1 @reduce_logical_or2(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @reduce_logical_or2( ; CHECK-NEXT: bb: -; CHECK-NEXT: [[B:%.*]] = or i1 [[C:%.*]], [[B1:%.*]] -; CHECK-NEXT: [[AND3:%.*]] = select i1 [[AND2:%.*]], i1 true, i1 [[B]] -; CHECK-NEXT: ret i1 [[AND3]] +; CHECK-NEXT: [[TMP0:%.*]] = or i1 [[C:%.*]], [[B:%.*]] +; CHECK-NEXT: [[AND2:%.*]] = select i1 [[A:%.*]], i1 true, i1 [[TMP0]] +; CHECK-NEXT: ret i1 [[AND2]] ; bb: %or = xor i1 %c, %b @@ -1493,7 +1493,7 @@ define i1 @reduce_bitwise_and1(i1 %a, i32 %b, i32 %c) { ; CHECK-NEXT: bb: ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[B:%.*]], 6 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[C:%.*]], [[B]] -; CHECK-NEXT: [[AND1:%.*]] = or i1 [[CMP1]], [[A:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = or i1 [[A:%.*]], [[CMP1]] ; CHECK-NEXT: [[AND2:%.*]] = and i1 [[AND1]], [[CMP]] ; CHECK-NEXT: ret i1 [[AND2]] ; diff --git a/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll b/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll index 5d058b20be720..89522a00d7894 100644 --- a/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll +++ b/llvm/test/Transforms/InstCombine/lshr-and-negC-icmpeq-zero.ll @@ -252,7 +252,7 @@ define i1 @scalar_i32_lshr_and_negC_eq_nonzero(i32 %x, i32 %y) { define i1 @scalar_i8_lshr_and_negC_eq_not_negatedPowerOf2(i8 %x, i8 %y) { ; CHECK-LABEL: @scalar_i8_lshr_and_negC_eq_not_negatedPowerOf2( ; CHECK-NEXT: [[TMP1:%.*]] = shl i8 -3, [[Y:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll index 01e07985ba6ab..4360714c78caa 100644 --- a/llvm/test/Transforms/InstCombine/lshr.ll +++ b/llvm/test/Transforms/InstCombine/lshr.ll @@ -742,7 +742,7 @@ define i32 @mul_splat_fold_wrong_lshr_const(i32 %x) { define i32 @mul_splat_fold_no_nuw(i32 %x) { ; CHECK-LABEL: @mul_splat_fold_no_nuw( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 16 -; CHECK-NEXT: [[T:%.*]] = add nsw i32 [[TMP1]], [[X]] +; CHECK-NEXT: [[T:%.*]] = add nsw i32 [[X]], [[TMP1]] ; CHECK-NEXT: ret i32 [[T]] ; %m = mul nsw i32 %x, 65537 @@ -1406,7 +1406,7 @@ define i2 @bool_add_lshr(i1 %a, i1 %b) { define i4 @not_bool_add_lshr(i2 %a, i2 %b) { ; CHECK-LABEL: @not_bool_add_lshr( ; CHECK-NEXT: [[TMP1:%.*]] = xor i2 [[A:%.*]], -1 -; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i2 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i2 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i4 ; CHECK-NEXT: ret i4 [[LSHR]] ; diff --git a/llvm/test/Transforms/InstCombine/masked-merge-add.ll b/llvm/test/Transforms/InstCombine/masked-merge-add.ll index 0484369e99d6a..5ef53ad515013 100644 --- a/llvm/test/Transforms/InstCombine/masked-merge-add.ll +++ b/llvm/test/Transforms/InstCombine/masked-merge-add.ll @@ -20,7 +20,7 @@ define i32 @p(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -35,7 +35,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) { ; CHECK-LABEL: @p_splatvec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], -; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; @@ -65,7 +65,7 @@ define <3 x i32> @p_vec_poison(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) ; CHECK-LABEL: @p_vec_poison( ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], -; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <3 x i32> [[RET]] ; @@ -199,7 +199,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p_commutative0( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -231,7 +231,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p_commutative2( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -263,7 +263,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p_commutative4( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -333,7 +333,7 @@ define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @n0_oneuse( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: call void @use32(i32 [[AND]]) ; CHECK-NEXT: call void @use32(i32 [[NEG]]) @@ -390,7 +390,7 @@ define i32 @n2_badmask(i32 %x, i32 %y, i32 %m1, i32 %m2) { ; CHECK-LABEL: @n2_badmask( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M1:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M2:%.*]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = add i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; diff --git a/llvm/test/Transforms/InstCombine/masked-merge-and-of-ors.ll b/llvm/test/Transforms/InstCombine/masked-merge-and-of-ors.ll index dc76743c565ed..639478dfcc6fe 100644 --- a/llvm/test/Transforms/InstCombine/masked-merge-and-of-ors.ll +++ b/llvm/test/Transforms/InstCombine/masked-merge-and-of-ors.ll @@ -17,7 +17,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @p( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: ret i32 [[RET]] @@ -32,7 +32,7 @@ define i32 @p(i32 %x, i32 %y, i32 %m) { define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) { ; CHECK-LABEL: @p_splatvec( ; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M:%.*]], -; CHECK-NEXT: [[OR:%.*]] = or <2 x i32> [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or <2 x i32> [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or <2 x i32> [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and <2 x i32> [[OR]], [[OR1]] ; CHECK-NEXT: ret <2 x i32> [[RET]] @@ -125,7 +125,7 @@ declare i32 @gen32() define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @p_commutative0( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: ret i32 [[RET]] @@ -141,8 +141,8 @@ define i32 @p_commutative1(i32 %x, i32 %m) { ; CHECK-LABEL: @p_commutative1( ; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32() ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[M]], [[Y]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -157,7 +157,7 @@ define i32 @p_commutative1(i32 %x, i32 %m) { define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @p_commutative2( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]] ; CHECK-NEXT: ret i32 [[RET]] @@ -173,8 +173,8 @@ define i32 @p_commutative3(i32 %x, i32 %m) { ; CHECK-LABEL: @p_commutative3( ; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32() ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[M]], [[Y]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -189,7 +189,7 @@ define i32 @p_commutative3(i32 %x, i32 %m) { define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @p_commutative4( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]] ; CHECK-NEXT: ret i32 [[RET]] @@ -205,8 +205,8 @@ define i32 @p_commutative5(i32 %x, i32 %m) { ; CHECK-LABEL: @p_commutative5( ; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32() ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[M]], [[Y]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -222,8 +222,8 @@ define i32 @p_commutative6(i32 %x, i32 %m) { ; CHECK-LABEL: @p_commutative6( ; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32() ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[M]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[M]], [[Y]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR1]], [[OR]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -259,7 +259,7 @@ declare void @use32(i32) define i32 @n0_oneuse_of_neg_is_ok_0(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @n0_oneuse_of_neg_is_ok_0( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: call void @use32(i32 [[NEG]]) @@ -276,7 +276,7 @@ define i32 @n0_oneuse_of_neg_is_ok_0(i32 %x, i32 %y, i32 %m) { define i32 @n0_oneuse_1(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @n0_oneuse_1( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: call void @use32(i32 [[OR]]) @@ -293,7 +293,7 @@ define i32 @n0_oneuse_1(i32 %x, i32 %y, i32 %m) { define i32 @n0_oneuse_2(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @n0_oneuse_2( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: call void @use32(i32 [[OR1]]) @@ -310,7 +310,7 @@ define i32 @n0_oneuse_2(i32 %x, i32 %y, i32 %m) { define i32 @n0_oneuse_3(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @n0_oneuse_3( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: call void @use32(i32 [[NEG]]) @@ -329,7 +329,7 @@ define i32 @n0_oneuse_3(i32 %x, i32 %y, i32 %m) { define i32 @n0_oneuse_4(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @n0_oneuse_4( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: call void @use32(i32 [[NEG]]) @@ -348,7 +348,7 @@ define i32 @n0_oneuse_4(i32 %x, i32 %y, i32 %m) { define i32 @n0_oneuse_5(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @n0_oneuse_5( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: call void @use32(i32 [[NEG]]) @@ -369,7 +369,7 @@ define i32 @n0_oneuse_5(i32 %x, i32 %y, i32 %m) { define i32 @n0_oneuse_6(i32 %x, i32 %y, i32 %m) { ; CHECK-LABEL: @n0_oneuse_6( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[M]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: call void @use32(i32 [[OR]]) @@ -456,7 +456,7 @@ define i32 @n1_badxor(i32 %x, i32 %y, i32 %m) { define i32 @n2_badmask(i32 %x, i32 %y, i32 %m1, i32 %m2) { ; CHECK-LABEL: @n2_badmask( ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M2:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X:%.*]], [[NEG]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[M1:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[RET:%.*]] = and i32 [[OR]], [[OR1]] ; CHECK-NEXT: ret i32 [[RET]] diff --git a/llvm/test/Transforms/InstCombine/masked-merge-or.ll b/llvm/test/Transforms/InstCombine/masked-merge-or.ll index 0531a532fc7e0..dd2ac6dfe5109 100644 --- a/llvm/test/Transforms/InstCombine/masked-merge-or.ll +++ b/llvm/test/Transforms/InstCombine/masked-merge-or.ll @@ -20,7 +20,7 @@ define i32 @p(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -35,7 +35,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) { ; CHECK-LABEL: @p_splatvec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], -; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; @@ -65,7 +65,7 @@ define <3 x i32> @p_vec_poison(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) ; CHECK-LABEL: @p_vec_poison( ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], -; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <3 x i32> [[RET]] ; @@ -199,7 +199,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p_commutative0( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -231,7 +231,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p_commutative2( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -263,7 +263,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p_commutative4( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -333,7 +333,7 @@ define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @n0_oneuse( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: call void @use32(i32 [[AND]]) ; CHECK-NEXT: call void @use32(i32 [[NEG]]) @@ -390,7 +390,7 @@ define i32 @n2_badmask(i32 %x, i32 %y, i32 %m1, i32 %m2) { ; CHECK-LABEL: @n2_badmask( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M1:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M2:%.*]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; diff --git a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll index 74cc7625aebff..7ed1f3fdfdab6 100644 --- a/llvm/test/Transforms/InstCombine/masked-merge-xor.ll +++ b/llvm/test/Transforms/InstCombine/masked-merge-xor.ll @@ -20,7 +20,7 @@ define i32 @p(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -35,7 +35,7 @@ define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> noundef %m) { ; CHECK-LABEL: @p_splatvec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], -; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; @@ -65,7 +65,7 @@ define <3 x i32> @p_vec_poison(<3 x i32> %x, <3 x i32> %y, <3 x i32> noundef %m) ; CHECK-LABEL: @p_vec_poison( ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], -; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint <3 x i32> [[AND]], [[AND1]] ; CHECK-NEXT: ret <3 x i32> [[RET]] ; @@ -84,8 +84,8 @@ define i32 @p_constmask(i32 %x, i32 %y) { ; CHECK-LABEL: @p_constmask( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND]], [[AND1]] -; CHECK-NEXT: ret i32 [[RET1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] +; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, 65280 %and1 = and i32 %y, -65281 @@ -97,8 +97,8 @@ define <2 x i32> @p_constmask_splatvec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @p_constmask_splatvec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[RET1:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] -; CHECK-NEXT: ret <2 x i32> [[RET1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: ret <2 x i32> [[RET]] ; %and = and <2 x i32> %x, %and1 = and <2 x i32> %y, @@ -140,8 +140,8 @@ define i32 @p_constmask2(i32 %x, i32 %y) { ; CHECK-LABEL: @p_constmask2( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 61440 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND]], [[AND1]] -; CHECK-NEXT: ret i32 [[RET1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] +; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, 61440 %and1 = and i32 %y, -65281 @@ -153,8 +153,8 @@ define <2 x i32> @p_constmask2_splatvec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @p_constmask2_splatvec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], -; CHECK-NEXT: [[RET1:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] -; CHECK-NEXT: ret <2 x i32> [[RET1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint <2 x i32> [[AND]], [[AND1]] +; CHECK-NEXT: ret <2 x i32> [[RET]] ; %and = and <2 x i32> %x, %and1 = and <2 x i32> %y, @@ -199,7 +199,7 @@ define i32 @p_commutative0(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p_commutative0( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -231,7 +231,7 @@ define i32 @p_commutative2(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p_commutative2( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -263,7 +263,7 @@ define i32 @p_commutative4(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @p_commutative4( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] ; CHECK-NEXT: ret i32 [[RET]] ; @@ -312,8 +312,8 @@ define i32 @p_constmask_commutative(i32 %x, i32 %y) { ; CHECK-LABEL: @p_constmask_commutative( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND1]], [[AND]] -; CHECK-NEXT: ret i32 [[RET1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND1]], [[AND]] +; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, 65280 %and1 = and i32 %y, -65281 @@ -333,7 +333,7 @@ define i32 @n0_oneuse(i32 %x, i32 %y, i32 noundef %m) { ; CHECK-LABEL: @n0_oneuse( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: call void @use32(i32 [[AND]]) ; CHECK-NEXT: call void @use32(i32 [[NEG]]) @@ -354,10 +354,10 @@ define i32 @n0_constmask_oneuse(i32 %x, i32 %y) { ; CHECK-LABEL: @n0_constmask_oneuse( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 -; CHECK-NEXT: [[RET1:%.*]] = or disjoint i32 [[AND]], [[AND1]] +; CHECK-NEXT: [[RET:%.*]] = or disjoint i32 [[AND]], [[AND1]] ; CHECK-NEXT: call void @use32(i32 [[AND]]) ; CHECK-NEXT: call void @use32(i32 [[AND1]]) -; CHECK-NEXT: ret i32 [[RET1]] +; CHECK-NEXT: ret i32 [[RET]] ; %and = and i32 %x, 65280 %and1 = and i32 %y, -65281 @@ -390,7 +390,7 @@ define i32 @n2_badmask(i32 %x, i32 %y, i32 %m1, i32 %m2) { ; CHECK-LABEL: @n2_badmask( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M1:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M2:%.*]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: [[RET:%.*]] = xor i32 [[AND]], [[AND1]] ; CHECK-NEXT: ret i32 [[RET]] ; diff --git a/llvm/test/Transforms/InstCombine/minmax-fold.ll b/llvm/test/Transforms/InstCombine/minmax-fold.ll index 3e870c695cf1a..26cd4996e687d 100644 --- a/llvm/test/Transforms/InstCombine/minmax-fold.ll +++ b/llvm/test/Transforms/InstCombine/minmax-fold.ll @@ -99,7 +99,7 @@ define i32 @t8(i64 %a, i32 %b) { ; CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.smin.i64(i64 [[A:%.*]], i64 -32767) ; CHECK-NEXT: [[TMP2:%.*]] = trunc i64 [[TMP1]] to i32 ; CHECK-NEXT: [[TMP3:%.*]] = icmp slt i32 [[B:%.*]], 42 -; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[TMP2]], [[B]] +; CHECK-NEXT: [[TMP4:%.*]] = icmp ne i32 [[B]], [[TMP2]] ; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP3]], i1 true, i1 [[TMP4]] ; CHECK-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32 ; CHECK-NEXT: ret i32 [[TMP6]] @@ -1360,11 +1360,11 @@ define i8 @PR14613_smax(i8 %x) { define i8 @PR46271(<2 x i8> %x) { ; CHECK-LABEL: @PR46271( -; CHECK-NEXT: [[TMP3:%.*]] = xor <2 x i8> [[X:%.*]], +; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], ; CHECK-NEXT: [[A_INV:%.*]] = icmp slt <2 x i8> [[X]], zeroinitializer -; CHECK-NEXT: [[TMP1:%.*]] = select <2 x i1> [[A_INV]], <2 x i8> , <2 x i8> [[TMP3]] -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i8> [[TMP1]], i64 1 -; CHECK-NEXT: ret i8 [[TMP2]] +; CHECK-NEXT: [[NOT:%.*]] = select <2 x i1> [[A_INV]], <2 x i8> , <2 x i8> [[TMP1]] +; CHECK-NEXT: [[R:%.*]] = extractelement <2 x i8> [[NOT]], i64 1 +; CHECK-NEXT: ret i8 [[R]] ; %a = icmp sgt <2 x i8> %x, %b = select <2 x i1> %a, <2 x i8> %x, <2 x i8> diff --git a/llvm/test/Transforms/InstCombine/minmax-of-xor-x.ll b/llvm/test/Transforms/InstCombine/minmax-of-xor-x.ll index b8430da451f9a..8b896632b8adc 100644 --- a/llvm/test/Transforms/InstCombine/minmax-of-xor-x.ll +++ b/llvm/test/Transforms/InstCombine/minmax-of-xor-x.ll @@ -76,8 +76,8 @@ define i8 @smin_xor_Cpow2_neg(i8 %x) { define i8 @umax_xor_pow2(i8 %x, i8 %y) { ; CHECK-LABEL: @umax_xor_pow2( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[YP2:%.*]] = and i8 [[NY]], [[Y]] -; CHECK-NEXT: [[R:%.*]] = or i8 [[YP2]], [[X:%.*]] +; CHECK-NEXT: [[YP2:%.*]] = and i8 [[Y]], [[NY]] +; CHECK-NEXT: [[R:%.*]] = or i8 [[X:%.*]], [[YP2]] ; CHECK-NEXT: ret i8 [[R]] ; %ny = sub i8 0, %y @@ -90,9 +90,9 @@ define i8 @umax_xor_pow2(i8 %x, i8 %y) { define <2 x i8> @umin_xor_pow2(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @umin_xor_pow2( ; CHECK-NEXT: [[NY:%.*]] = sub <2 x i8> zeroinitializer, [[Y:%.*]] -; CHECK-NEXT: [[YP2:%.*]] = and <2 x i8> [[NY]], [[Y]] +; CHECK-NEXT: [[YP2:%.*]] = and <2 x i8> [[Y]], [[NY]] ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[YP2]], -; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = and <2 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %ny = sub <2 x i8> , %y @@ -105,8 +105,8 @@ define <2 x i8> @umin_xor_pow2(<2 x i8> %x, <2 x i8> %y) { define i8 @smax_xor_pow2_unk(i8 %x, i8 %y) { ; CHECK-LABEL: @smax_xor_pow2_unk( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[YP2:%.*]] = and i8 [[NY]], [[Y]] -; CHECK-NEXT: [[X_XOR:%.*]] = xor i8 [[YP2]], [[X:%.*]] +; CHECK-NEXT: [[YP2:%.*]] = and i8 [[Y]], [[NY]] +; CHECK-NEXT: [[X_XOR:%.*]] = xor i8 [[X:%.*]], [[YP2]] ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.smax.i8(i8 [[X]], i8 [[X_XOR]]) ; CHECK-NEXT: ret i8 [[R]] ; @@ -120,8 +120,8 @@ define i8 @smax_xor_pow2_unk(i8 %x, i8 %y) { define <2 x i8> @smin_xor_pow2_unk(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @smin_xor_pow2_unk( ; CHECK-NEXT: [[NY:%.*]] = sub <2 x i8> zeroinitializer, [[Y:%.*]] -; CHECK-NEXT: [[YP2:%.*]] = and <2 x i8> [[NY]], [[Y]] -; CHECK-NEXT: [[X_XOR:%.*]] = xor <2 x i8> [[YP2]], [[X:%.*]] +; CHECK-NEXT: [[YP2:%.*]] = and <2 x i8> [[Y]], [[NY]] +; CHECK-NEXT: [[X_XOR:%.*]] = xor <2 x i8> [[X:%.*]], [[YP2]] ; CHECK-NEXT: [[R:%.*]] = call <2 x i8> @llvm.smin.v2i8(<2 x i8> [[X]], <2 x i8> [[X_XOR]]) ; CHECK-NEXT: ret <2 x i8> [[R]] ; @@ -159,12 +159,12 @@ pos: define i8 @smin_xor_pow2_pos(i8 %x, i8 %y) { ; CHECK-LABEL: @smin_xor_pow2_pos( ; CHECK-NEXT: [[NY:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[YP2:%.*]] = and i8 [[NY]], [[Y]] +; CHECK-NEXT: [[YP2:%.*]] = and i8 [[Y]], [[NY]] ; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i8 [[YP2]], 0 ; CHECK-NEXT: br i1 [[CMP]], label [[NEG:%.*]], label [[POS:%.*]] ; CHECK: neg: ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[YP2]], -1 -; CHECK-NEXT: [[R:%.*]] = and i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[R]] ; CHECK: pos: ; CHECK-NEXT: call void @barrier() diff --git a/llvm/test/Transforms/InstCombine/mul-masked-bits.ll b/llvm/test/Transforms/InstCombine/mul-masked-bits.ll index e940ae3fec163..fd8ad88764f59 100644 --- a/llvm/test/Transforms/InstCombine/mul-masked-bits.ll +++ b/llvm/test/Transforms/InstCombine/mul-masked-bits.ll @@ -182,7 +182,7 @@ define i33 @squared_demanded_3_low_bits(i33 %x) { define i64 @scalar_mul_bit_x0_y0(i64 %x, i64 %y) { ; CHECK-LABEL: @scalar_mul_bit_x0_y0( ; CHECK-NEXT: [[AND2:%.*]] = and i64 [[Y:%.*]], 1 -; CHECK-NEXT: [[MUL:%.*]] = and i64 [[AND2]], [[X:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = and i64 [[X:%.*]], [[AND2]] ; CHECK-NEXT: ret i64 [[MUL]] ; %and1 = and i64 %x, 1 @@ -199,7 +199,7 @@ define i64 @scalar_mul_bit_x0_y0_uses(i64 %x, i64 %y) { ; CHECK-NEXT: call void @use(i64 [[AND1]]) ; CHECK-NEXT: [[AND2:%.*]] = and i64 [[Y:%.*]], 1 ; CHECK-NEXT: call void @use(i64 [[AND2]]) -; CHECK-NEXT: [[MUL:%.*]] = and i64 [[AND2]], [[X]] +; CHECK-NEXT: [[MUL:%.*]] = and i64 [[X]], [[AND2]] ; CHECK-NEXT: ret i64 [[MUL]] ; %and1 = and i64 %x, 1 @@ -241,7 +241,7 @@ define i64 @scalar_mul_bit_x0_yC(i64 %x, i64 %y, i64 %c) { define <2 x i64> @vector_mul_bit_x0_y0(<2 x i64> %x, <2 x i64> %y) { ; CHECK-LABEL: @vector_mul_bit_x0_y0( ; CHECK-NEXT: [[AND2:%.*]] = and <2 x i64> [[Y:%.*]], -; CHECK-NEXT: [[MUL:%.*]] = and <2 x i64> [[AND2]], [[X:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = and <2 x i64> [[X:%.*]], [[AND2]] ; CHECK-NEXT: ret <2 x i64> [[MUL]] ; %and1 = and <2 x i64> %x, diff --git a/llvm/test/Transforms/InstCombine/mul-pow2.ll b/llvm/test/Transforms/InstCombine/mul-pow2.ll index c16fd710f309b..bc172f0152fe5 100644 --- a/llvm/test/Transforms/InstCombine/mul-pow2.ll +++ b/llvm/test/Transforms/InstCombine/mul-pow2.ll @@ -107,7 +107,7 @@ define <2 x i8> @mul_x_selectp2_vec(<2 x i8> %xx, i1 %c) { define i8 @shl_add_log_may_cause_poison_pr62175_fail(i8 %x, i8 %y) { ; CHECK-LABEL: @shl_add_log_may_cause_poison_pr62175_fail( ; CHECK-NEXT: [[SHL:%.*]] = shl i8 4, [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[SHL]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[Y:%.*]], [[SHL]] ; CHECK-NEXT: ret i8 [[MUL]] ; %shl = shl i8 4, %x diff --git a/llvm/test/Transforms/InstCombine/mul.ll b/llvm/test/Transforms/InstCombine/mul.ll index 66455479feaaa..8c528e340bc6c 100644 --- a/llvm/test/Transforms/InstCombine/mul.ll +++ b/llvm/test/Transforms/InstCombine/mul.ll @@ -289,7 +289,7 @@ define i32 @shl1_decrement_use(i32 %x, i32 %y) { ; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[X:%.*]] ; CHECK-NEXT: [[X1:%.*]] = xor i32 [[NOTMASK]], -1 ; CHECK-NEXT: call void @use32(i32 [[X1]]) -; CHECK-NEXT: [[M:%.*]] = mul i32 [[X1]], [[Y:%.*]] +; CHECK-NEXT: [[M:%.*]] = mul i32 [[Y:%.*]], [[X1]] ; CHECK-NEXT: ret i32 [[M]] ; %pow2x = shl i32 1, %x @@ -1413,7 +1413,7 @@ define i32 @mul_nsw_shl_nsw_neg_onearg(i32 %x) { define i32 @mul_use_mul_neg(i32 %x,i32 %y) { ; CHECK-LABEL: @mul_use_mul_neg( ; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[NEG]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[Y:%.*]], [[NEG]] ; CHECK-NEXT: call void @use32(i32 [[MUL]]) ; CHECK-NEXT: [[MUL2:%.*]] = mul i32 [[MUL]], [[NEG]] ; CHECK-NEXT: ret i32 [[MUL2]] @@ -2121,7 +2121,7 @@ define i32 @test_mul_sext_bool_commuted(i1 %x, i32 %y) { define i32 @test_mul_sext_nonbool(i2 %x, i32 %y) { ; CHECK-LABEL: @test_mul_sext_nonbool( ; CHECK-NEXT: [[SEXT:%.*]] = sext i2 [[X:%.*]] to i32 -; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[SEXT]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[Y:%.*]], [[SEXT]] ; CHECK-NEXT: ret i32 [[MUL]] ; %sext = sext i2 %x to i32 @@ -2133,7 +2133,7 @@ define i32 @test_mul_sext_multiuse(i1 %x, i32 %y) { ; CHECK-LABEL: @test_mul_sext_multiuse( ; CHECK-NEXT: [[SEXT:%.*]] = sext i1 [[X:%.*]] to i32 ; CHECK-NEXT: tail call void @use(i32 [[SEXT]]) -; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[SEXT]], [[Y:%.*]] +; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[Y:%.*]], [[SEXT]] ; CHECK-NEXT: ret i32 [[MUL]] ; %sext = sext i1 %x to i32 diff --git a/llvm/test/Transforms/InstCombine/mul_fold.ll b/llvm/test/Transforms/InstCombine/mul_fold.ll index a1fdec3c68cc4..e4a21db8a6ece 100644 --- a/llvm/test/Transforms/InstCombine/mul_fold.ll +++ b/llvm/test/Transforms/InstCombine/mul_fold.ll @@ -55,7 +55,7 @@ define i8 @mul8_low_A0_B1(i8 %p, i8 %in1) { define i8 @mul8_low_A0_B2(i8 %in0, i8 %p) { ; CHECK-LABEL: @mul8_low_A0_B2( ; CHECK-NEXT: [[IN1:%.*]] = call i8 @use8(i8 [[P:%.*]]) -; CHECK-NEXT: [[RETLO:%.*]] = mul i8 [[IN1]], [[IN0:%.*]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i8 [[IN0:%.*]], [[IN1]] ; CHECK-NEXT: ret i8 [[RETLO]] ; @@ -262,7 +262,7 @@ define i32 @mul32_low_A2_B2(i32 %in0, i32 %p) { ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1]], 16 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i32 [[IN0LO]], [[IN1HI]] ; CHECK-NEXT: call void @use32(i32 [[M10]]) -; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN1]], [[IN0]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i32 [[RETLO]] ; %in1 = call i32 @use32(i32 %p) ; thwart complexity-based canonicalization @@ -287,7 +287,7 @@ define i32 @mul32_low_A2_B3(i32 %in0, i32 %p) { ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1]], 16 ; CHECK-NEXT: [[M10:%.*]] = mul nuw i32 [[IN1HI]], [[IN0LO]] ; CHECK-NEXT: call void @use32(i32 [[M10]]) -; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN1]], [[IN0]] +; CHECK-NEXT: [[RETLO:%.*]] = mul i32 [[IN0]], [[IN1]] ; CHECK-NEXT: ret i32 [[RETLO]] ; %in1 = call i32 @use32(i32 %p) ; thwart complexity-based canonicalization @@ -639,7 +639,7 @@ define i64 @mul64_low_no_and(i64 %in0, i64 %in1) { ; CHECK-NEXT: [[IN0HI:%.*]] = lshr i64 [[IN0:%.*]], 32 ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i64 [[IN1:%.*]], 32 ; CHECK-NEXT: [[M10:%.*]] = mul i64 [[IN1HI]], [[IN0]] -; CHECK-NEXT: [[M01:%.*]] = mul i64 [[IN0HI]], [[IN1]] +; CHECK-NEXT: [[M01:%.*]] = mul i64 [[IN1]], [[IN0HI]] ; CHECK-NEXT: [[M00:%.*]] = mul i64 [[IN1]], [[IN0]] ; CHECK-NEXT: [[ADDC:%.*]] = add i64 [[M10]], [[M01]] ; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[ADDC]], 32 @@ -719,7 +719,7 @@ define i32 @mul32_low_extra_shl_use(i32 %in0, i32 %in1) { ; CHECK-NEXT: [[IN0HI:%.*]] = lshr i32 [[IN0:%.*]], 16 ; CHECK-NEXT: [[IN1HI:%.*]] = lshr i32 [[IN1:%.*]], 16 ; CHECK-NEXT: [[M10:%.*]] = mul i32 [[IN1HI]], [[IN0]] -; CHECK-NEXT: [[M01:%.*]] = mul i32 [[IN0HI]], [[IN1]] +; CHECK-NEXT: [[M01:%.*]] = mul i32 [[IN1]], [[IN0HI]] ; CHECK-NEXT: [[ADDC:%.*]] = add i32 [[M10]], [[M01]] ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[ADDC]], 16 ; CHECK-NEXT: call void @use32(i32 [[SHL]]) @@ -738,4 +738,4 @@ define i32 @mul32_low_extra_shl_use(i32 %in0, i32 %in1) { call void @use32(i32 %shl) %retLo = add i32 %shl, %m00 ret i32 %retLo -} \ No newline at end of file +} diff --git a/llvm/test/Transforms/InstCombine/mul_full_64.ll b/llvm/test/Transforms/InstCombine/mul_full_64.ll index 7cddb63b9ba63..1bec5bb927604 100644 --- a/llvm/test/Transforms/InstCombine/mul_full_64.ll +++ b/llvm/test/Transforms/InstCombine/mul_full_64.ll @@ -459,7 +459,7 @@ define i64 @mullo(i64 %x, i64 %y) { ; CHECK-NEXT: [[YL:%.*]] = and i64 [[Y:%.*]], 4294967295 ; CHECK-NEXT: [[YH:%.*]] = lshr i64 [[Y]], 32 ; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[YL]], [[XL]] -; CHECK-NEXT: [[T1:%.*]] = mul i64 [[XH]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = mul i64 [[Y]], [[XH]] ; CHECK-NEXT: [[T2:%.*]] = mul i64 [[YH]], [[X]] ; CHECK-NEXT: [[T0L:%.*]] = and i64 [[T0]], 4294967295 ; CHECK-NEXT: [[T0H:%.*]] = lshr i64 [[T0]], 32 @@ -526,7 +526,7 @@ define i64 @mullo_duplicate(i64 %x, i64 %y) { ; CHECK-NEXT: [[YL:%.*]] = and i64 [[Y]], 4294967295 ; CHECK-NEXT: [[YH:%.*]] = lshr i64 [[Y]], 32 ; CHECK-NEXT: [[T0:%.*]] = mul nuw i64 [[YL]], [[XL]] -; CHECK-NEXT: [[T1:%.*]] = mul i64 [[XH]], [[Y]] +; CHECK-NEXT: [[T1:%.*]] = mul i64 [[Y]], [[XH]] ; CHECK-NEXT: [[T2:%.*]] = mul i64 [[YH]], [[X]] ; CHECK-NEXT: [[T0L:%.*]] = and i64 [[T0]], 4294967295 ; CHECK-NEXT: [[T0H:%.*]] = lshr i64 [[T0]], 32 diff --git a/llvm/test/Transforms/InstCombine/not-add.ll b/llvm/test/Transforms/InstCombine/not-add.ll index 5c600c991de58..ecbd11dbdc620 100644 --- a/llvm/test/Transforms/InstCombine/not-add.ll +++ b/llvm/test/Transforms/InstCombine/not-add.ll @@ -42,7 +42,7 @@ define i8 @basic_use_xor(i8 %x, i8 %y) { define i8 @basic_use_add(i8 %x, i8 %y) { ; CHECK-LABEL: @basic_use_add( ; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[X:%.*]], -1 -; CHECK-NEXT: [[A:%.*]] = add i8 [[NOTX]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = add i8 [[Y:%.*]], [[NOTX]] ; CHECK-NEXT: call void @use(i8 [[A]]) ; CHECK-NEXT: [[NOTA:%.*]] = sub i8 [[X]], [[Y]] ; CHECK-NEXT: ret i8 [[NOTA]] @@ -58,7 +58,7 @@ define i8 @basic_use_both(i8 %x, i8 %y) { ; CHECK-LABEL: @basic_use_both( ; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: call void @use(i8 [[NOTX]]) -; CHECK-NEXT: [[A:%.*]] = add i8 [[NOTX]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = add i8 [[Y:%.*]], [[NOTX]] ; CHECK-NEXT: call void @use(i8 [[A]]) ; CHECK-NEXT: [[NOTA:%.*]] = sub i8 [[X]], [[Y]] ; CHECK-NEXT: ret i8 [[NOTA]] @@ -143,8 +143,8 @@ define i32 @pr50308(i1 %c1, i32 %v1, i32 %v2, i32 %v3) { ; CHECK-NEXT: entry: ; CHECK-NEXT: br i1 [[C1:%.*]], label [[COND_TRUE:%.*]], label [[COND_END:%.*]] ; CHECK: cond.true: -; CHECK-NEXT: [[ADD_NOT:%.*]] = sub i32 -2, [[V1:%.*]] -; CHECK-NEXT: [[ADD1_NEG:%.*]] = xor i32 [[ADD_NOT]], [[V2:%.*]] +; CHECK-NEXT: [[TMP0:%.*]] = sub i32 -2, [[V1:%.*]] +; CHECK-NEXT: [[ADD1_NEG:%.*]] = xor i32 [[TMP0]], [[V2:%.*]] ; CHECK-NEXT: br label [[COND_END]] ; CHECK: cond.end: ; CHECK-NEXT: [[COND_NEG:%.*]] = phi i32 [ [[ADD1_NEG]], [[COND_TRUE]] ], [ 0, [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/InstCombine/not.ll b/llvm/test/Transforms/InstCombine/not.ll index 0c2c6195e3240..3679976d9dc39 100644 --- a/llvm/test/Transforms/InstCombine/not.ll +++ b/llvm/test/Transforms/InstCombine/not.ll @@ -442,7 +442,7 @@ define i8 @not_or_neg_use1(i8 %x, i8 %y) { ; CHECK-LABEL: @not_or_neg_use1( ; CHECK-NEXT: [[S:%.*]] = sub i8 0, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[S]]) -; CHECK-NEXT: [[O:%.*]] = or i8 [[S]], [[X:%.*]] +; CHECK-NEXT: [[O:%.*]] = or i8 [[X:%.*]], [[S]] ; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[O]], -1 ; CHECK-NEXT: ret i8 [[NOT]] ; @@ -458,7 +458,7 @@ define i8 @not_or_neg_use1(i8 %x, i8 %y) { define i8 @not_or_neg_use2(i8 %x, i8 %y) { ; CHECK-LABEL: @not_or_neg_use2( ; CHECK-NEXT: [[S:%.*]] = sub i8 0, [[Y:%.*]] -; CHECK-NEXT: [[O:%.*]] = or i8 [[S]], [[X:%.*]] +; CHECK-NEXT: [[O:%.*]] = or i8 [[X:%.*]], [[S]] ; CHECK-NEXT: call void @use8(i8 [[O]]) ; CHECK-NEXT: [[NOT:%.*]] = xor i8 [[O]], -1 ; CHECK-NEXT: ret i8 [[NOT]] @@ -850,7 +850,7 @@ define i32 @test_zext(i32 %a, i32 %b){ ; CHECK-LABEL: @test_zext( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[A:%.*]], 0 ; CHECK-NEXT: [[SEXT:%.*]] = zext i1 [[CMP]] to i32 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SEXT]], [[B:%.*]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[B:%.*]], [[SEXT]] ; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[ADD]], -1 ; CHECK-NEXT: ret i32 [[NOT]] ; @@ -864,11 +864,11 @@ define i32 @test_zext(i32 %a, i32 %b){ define void @test_invert_demorgan_or(i32 %a, i32 %b, i1 %cond) { ; CHECK-LABEL: @test_invert_demorgan_or( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[B:%.*]], 0 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[B1:%.*]], 0 -; CHECK-NEXT: [[OR_NOT1:%.*]] = and i1 [[CMP2]], [[CMP3]] -; CHECK-NEXT: [[MERGE:%.*]] = and i1 [[OR_NOT1]], [[COND:%.*]] -; CHECK-NEXT: br i1 [[MERGE]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A:%.*]], 0 +; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[B:%.*]], 0 +; CHECK-NEXT: [[OR_NOT1:%.*]] = and i1 [[CMP1]], [[CMP2]] +; CHECK-NEXT: [[MERGE_NOT:%.*]] = and i1 [[OR_NOT1]], [[COND:%.*]] +; CHECK-NEXT: br i1 [[MERGE_NOT]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]] ; CHECK: if.then: ; CHECK-NEXT: call void @f1() ; CHECK-NEXT: unreachable @@ -897,8 +897,8 @@ define i1 @test_invert_demorgan_or2(i64 %a, i64 %b, i64 %c) { ; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i64 [[B:%.*]], 60 ; CHECK-NEXT: [[OR1_NOT1:%.*]] = and i1 [[CMP1]], [[CMP2]] ; CHECK-NEXT: [[CMP3:%.*]] = icmp ult i64 [[C:%.*]], 60 -; CHECK-NEXT: [[NOT:%.*]] = and i1 [[OR1_NOT1]], [[CMP3]] -; CHECK-NEXT: ret i1 [[NOT]] +; CHECK-NEXT: [[OR2_NOT:%.*]] = and i1 [[OR1_NOT1]], [[CMP3]] +; CHECK-NEXT: ret i1 [[OR2_NOT]] ; %cmp1 = icmp ugt i64 %a, 23 %cmp2 = icmp ugt i64 %b, 59 @@ -920,8 +920,8 @@ define i1 @test_invert_demorgan_or3(i32 %a, i32 %b) { ; CHECK-NEXT: [[CMP4:%.*]] = icmp ult i32 [[TMP3]], -196112 ; CHECK-NEXT: [[OR1_NOT2:%.*]] = and i1 [[CMP1]], [[CMP2]] ; CHECK-NEXT: [[OR2_NOT1:%.*]] = and i1 [[OR1_NOT2]], [[CMP3]] -; CHECK-NEXT: [[NOT:%.*]] = and i1 [[OR2_NOT1]], [[CMP4]] -; CHECK-NEXT: ret i1 [[NOT]] +; CHECK-NEXT: [[OR3_NOT:%.*]] = and i1 [[OR2_NOT1]], [[CMP4]] +; CHECK-NEXT: ret i1 [[OR3_NOT]] ; %cmp1 = icmp eq i32 %a, 178206 %v1 = add i32 %b, -195102 @@ -943,8 +943,8 @@ define i1 @test_invert_demorgan_logical_or(i64 %x, i64 %y) { ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[Y:%.*]], 0 ; CHECK-NEXT: [[SEL_NOT1:%.*]] = select i1 [[CMP1]], i1 [[CMP2]], i1 false ; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[X]], 0 -; CHECK-NEXT: [[NOT:%.*]] = and i1 [[CMP3]], [[SEL_NOT1]] -; CHECK-NEXT: ret i1 [[NOT]] +; CHECK-NEXT: [[OR_NOT:%.*]] = and i1 [[CMP3]], [[SEL_NOT1]] +; CHECK-NEXT: ret i1 [[OR_NOT]] ; %cmp1 = icmp eq i64 %x, 27 %cmp2 = icmp eq i64 %y, 0 @@ -958,11 +958,11 @@ define i1 @test_invert_demorgan_logical_or(i64 %x, i64 %y) { define i1 @test_invert_demorgan_and(i32 %a, i32 %b, i1 %cond) { ; CHECK-LABEL: @test_invert_demorgan_and( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i32 [[B:%.*]], 0 -; CHECK-NEXT: [[CMP3:%.*]] = icmp eq i32 [[B1:%.*]], 0 -; CHECK-NEXT: [[AND_NOT1:%.*]] = or i1 [[CMP2]], [[CMP3]] -; CHECK-NEXT: [[MERGE:%.*]] = or i1 [[AND_NOT1]], [[COND:%.*]] -; CHECK-NEXT: br i1 [[MERGE]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[A:%.*]], 0 +; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[B:%.*]], 0 +; CHECK-NEXT: [[AND_NOT1:%.*]] = or i1 [[CMP1]], [[CMP2]] +; CHECK-NEXT: [[MERGE_NOT:%.*]] = or i1 [[AND_NOT1]], [[COND:%.*]] +; CHECK-NEXT: br i1 [[MERGE_NOT]], label [[IF_ELSE:%.*]], label [[IF_THEN:%.*]] ; CHECK: if.then: ; CHECK-NEXT: call void @f1() ; CHECK-NEXT: unreachable @@ -999,9 +999,9 @@ define i64 @test_invert_demorgan_and2(i64 %x) { define i1 @test_invert_demorgan_and3(i32 %a, i32 %b) { ; CHECK-LABEL: @test_invert_demorgan_and3( -; CHECK-NEXT: [[ADD:%.*]] = sub i32 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i32 [[ADD]], 4095 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 4095 +; CHECK-NEXT: [[TMP1:%.*]] = sub i32 [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 4095 +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP2]], 4095 ; CHECK-NEXT: ret i1 [[CMP]] ; %not = xor i32 %a, -1 @@ -1017,8 +1017,8 @@ define i1 @test_invert_demorgan_logical_and(i64 %x, i64 %y) { ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[Y:%.*]], 0 ; CHECK-NEXT: [[SEL_NOT1:%.*]] = select i1 [[CMP1]], i1 true, i1 [[CMP2]] ; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[X]], 0 -; CHECK-NEXT: [[NOT:%.*]] = and i1 [[CMP3]], [[SEL_NOT1]] -; CHECK-NEXT: ret i1 [[NOT]] +; CHECK-NEXT: [[OR_NOT:%.*]] = and i1 [[CMP3]], [[SEL_NOT1]] +; CHECK-NEXT: ret i1 [[OR_NOT]] ; %cmp1 = icmp eq i64 %x, 27 %cmp2 = icmp eq i64 %y, 0 diff --git a/llvm/test/Transforms/InstCombine/onehot_merge.ll b/llvm/test/Transforms/InstCombine/onehot_merge.ll index 228ad233c9763..d8ef66a4dd781 100644 --- a/llvm/test/Transforms/InstCombine/onehot_merge.ll +++ b/llvm/test/Transforms/InstCombine/onehot_merge.ll @@ -48,7 +48,7 @@ define i1 @foo1_and(i32 %k, i32 %c1, i32 %c2) { ; CHECK-NEXT: [[T:%.*]] = shl nuw i32 1, [[C1:%.*]] ; CHECK-NEXT: [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -68,7 +68,7 @@ define i1 @foo1_and_logical(i32 %k, i32 %c1, i32 %c2) { ; CHECK-NEXT: [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T4]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -87,7 +87,7 @@ define <2 x i1> @foo1_and_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) { ; CHECK-NEXT: [[T:%.*]] = shl nuw <2 x i32> , [[C1:%.*]] ; CHECK-NEXT: [[T4:%.*]] = shl nuw <2 x i32> , [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]] -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[K:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne <2 x i32> [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[OR]] ; @@ -213,7 +213,7 @@ define i1 @foo1_or(i32 %k, i32 %c1, i32 %c2) { ; CHECK-NEXT: [[T:%.*]] = shl nuw i32 1, [[C1:%.*]] ; CHECK-NEXT: [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -233,7 +233,7 @@ define i1 @foo1_or_logical(i32 %k, i32 %c1, i32 %c2) { ; CHECK-NEXT: [[T4:%.*]] = shl nuw i32 1, [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T4]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]] ; CHECK-NEXT: [[OR:%.*]] = icmp eq i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -252,7 +252,7 @@ define <2 x i1> @foo1_or_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i32> %c2) { ; CHECK-NEXT: [[T:%.*]] = shl nuw <2 x i32> , [[C1:%.*]] ; CHECK-NEXT: [[T4:%.*]] = shl nuw <2 x i32> , [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]] -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[K:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp eq <2 x i32> [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[OR]] ; @@ -336,7 +336,7 @@ define i1 @foo1_and_signbit_lshr(i32 %k, i32 %c1, i32 %c2) { ; CHECK-NEXT: [[T:%.*]] = shl nuw i32 1, [[C1:%.*]] ; CHECK-NEXT: [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -356,7 +356,7 @@ define i1 @foo1_and_signbit_lshr_logical(i32 %k, i32 %c1, i32 %c2) { ; CHECK-NEXT: [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T4]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -375,7 +375,7 @@ define <2 x i1> @foo1_and_signbit_lshr_vector(<2 x i32> %k, <2 x i32> %c1, <2 x ; CHECK-NEXT: [[T:%.*]] = shl nuw <2 x i32> , [[C1:%.*]] ; CHECK-NEXT: [[T4:%.*]] = lshr exact <2 x i32> , [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]] -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[K:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne <2 x i32> [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[OR]] ; @@ -394,7 +394,7 @@ define i1 @foo1_or_signbit_lshr(i32 %k, i32 %c1, i32 %c2) { ; CHECK-NEXT: [[T:%.*]] = shl nuw i32 1, [[C1:%.*]] ; CHECK-NEXT: [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T]], [[T4]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp eq i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -414,7 +414,7 @@ define i1 @foo1_or_signbit_lshr_logical(i32 %k, i32 %c1, i32 %c2) { ; CHECK-NEXT: [[T4:%.*]] = lshr exact i32 -2147483648, [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T4]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]] ; CHECK-NEXT: [[OR:%.*]] = icmp eq i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -433,7 +433,7 @@ define <2 x i1> @foo1_or_signbit_lshr_vector(<2 x i32> %k, <2 x i32> %c1, <2 x i ; CHECK-NEXT: [[T:%.*]] = shl nuw <2 x i32> , [[C1:%.*]] ; CHECK-NEXT: [[T4:%.*]] = lshr exact <2 x i32> , [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[T]], [[T4]] -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[K:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[K:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp eq <2 x i32> [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[OR]] ; @@ -618,7 +618,7 @@ define i1 @foo1_and_extra_use_shl(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: store i32 [[T0]], ptr [[P:%.*]], align 4 ; CHECK-NEXT: [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -640,7 +640,7 @@ define i1 @foo1_and_extra_use_shl_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -663,7 +663,7 @@ define i1 @foo1_and_extra_use_and(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: [[T2:%.*]] = and i32 [[T0]], [[K:%.*]] ; CHECK-NEXT: store i32 [[T2]], ptr [[P:%.*]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -686,7 +686,7 @@ define i1 @foo1_and_extra_use_and_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: store i32 [[T2]], ptr [[P:%.*]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K]], [[TMP2]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -710,7 +710,7 @@ define i1 @foo1_and_extra_use_cmp(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: [[T3:%.*]] = icmp eq i32 [[T2]], 0 ; CHECK-NEXT: store i1 [[T3]], ptr [[P:%.*]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -734,7 +734,7 @@ define i1 @foo1_and_extra_use_cmp_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: store i1 [[T3]], ptr [[P:%.*]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]] ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K]], [[TMP2]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -756,7 +756,7 @@ define i1 @foo1_and_extra_use_shl2(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: [[T1:%.*]] = shl nuw i32 1, [[C2:%.*]] ; CHECK-NEXT: store i32 [[T1]], ptr [[P:%.*]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K:%.*]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -778,7 +778,7 @@ define i1 @foo1_and_extra_use_shl2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: [[TMP1:%.*]] = freeze i32 [[T1]] ; CHECK-NEXT: store i32 [[TMP1]], ptr [[P:%.*]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K:%.*]], [[TMP2]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -801,7 +801,7 @@ define i1 @foo1_and_extra_use_and2(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: [[T4:%.*]] = and i32 [[T1]], [[K:%.*]] ; CHECK-NEXT: store i32 [[T4]], ptr [[P:%.*]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -824,7 +824,7 @@ define i1 @foo1_and_extra_use_and2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: [[T4:%.*]] = and i32 [[TMP1]], [[K:%.*]] ; CHECK-NEXT: store i32 [[T4]], ptr [[P:%.*]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K]], [[TMP2]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -848,7 +848,7 @@ define i1 @foo1_and_extra_use_cmp2(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], 0 ; CHECK-NEXT: store i1 [[T5]], ptr [[P:%.*]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[T0]], [[T1]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[K]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[K]], [[TMP1]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP2]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; @@ -872,7 +872,7 @@ define i1 @foo1_and_extra_use_cmp2_logical(i32 %k, i32 %c1, i32 %c2, ptr %p) { ; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], 0 ; CHECK-NEXT: store i1 [[T5]], ptr [[P:%.*]], align 1 ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[T0]], [[TMP1]] -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[K]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[K]], [[TMP2]] ; CHECK-NEXT: [[OR:%.*]] = icmp ne i32 [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret i1 [[OR]] ; diff --git a/llvm/test/Transforms/InstCombine/or-xor-xor.ll b/llvm/test/Transforms/InstCombine/or-xor-xor.ll index 327d5f8d6220a..c3f1aedb1879a 100644 --- a/llvm/test/Transforms/InstCombine/or-xor-xor.ll +++ b/llvm/test/Transforms/InstCombine/or-xor-xor.ll @@ -98,7 +98,7 @@ define i3 @or_xor_xor_normal_multiple_uses_and(i3 %a, i3 %b) { define i32 @or_xor_xor_negative_multiple_uses_xor1(i32 %a, i32 %b) { ; CHECK-LABEL: @or_xor_xor_negative_multiple_uses_xor1( ; CHECK-NEXT: [[AND1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[XOR1:%.*]] = and i32 [[AND1]], [[B:%.*]] +; CHECK-NEXT: [[XOR1:%.*]] = and i32 [[B:%.*]], [[AND1]] ; CHECK-NEXT: call void @use.i32(i32 [[XOR1]]) ; CHECK-NEXT: [[OR:%.*]] = xor i32 [[A]], [[B]] ; CHECK-NEXT: ret i32 [[OR]] @@ -114,7 +114,7 @@ define i32 @or_xor_xor_negative_multiple_uses_xor1(i32 %a, i32 %b) { define i5 @or_xor_xor_negative_multiple_uses_xor2(i5 %a, i5 %b) { ; CHECK-LABEL: @or_xor_xor_negative_multiple_uses_xor2( ; CHECK-NEXT: [[A1:%.*]] = xor i5 [[B:%.*]], -1 -; CHECK-NEXT: [[XOR2:%.*]] = and i5 [[A1]], [[A:%.*]] +; CHECK-NEXT: [[XOR2:%.*]] = and i5 [[A:%.*]], [[A1]] ; CHECK-NEXT: call void @use.i5(i5 [[XOR2]]) ; CHECK-NEXT: [[OR:%.*]] = xor i5 [[A]], [[B]] ; CHECK-NEXT: ret i5 [[OR]] diff --git a/llvm/test/Transforms/InstCombine/or-xor.ll b/llvm/test/Transforms/InstCombine/or-xor.ll index cf6b9000182d2..f4ddbb5abc463 100644 --- a/llvm/test/Transforms/InstCombine/or-xor.ll +++ b/llvm/test/Transforms/InstCombine/or-xor.ll @@ -8,7 +8,7 @@ declare void @use(i8) define i32 @test1(i32 %x, i32 %y) { ; CHECK-LABEL: @test1( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], -1 -; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[Z]] ; %or = or i32 %x, %y @@ -23,7 +23,7 @@ define i32 @test1(i32 %x, i32 %y) { define i32 @test2(i32 %x, i32 %y) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[Z]] ; %or = or i32 %x, %y @@ -37,7 +37,7 @@ define i32 @test2(i32 %x, i32 %y) { define i32 @test3(i32 %x, i32 %y) { ; CHECK-LABEL: @test3( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], -1 -; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[Z]] ; %xor = xor i32 %x, %y @@ -52,7 +52,7 @@ define i32 @test3(i32 %x, i32 %y) { define i32 @test4(i32 %x, i32 %y) { ; CHECK-LABEL: @test4( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[Z]] ; %xor = xor i32 %x, %y @@ -206,7 +206,7 @@ define i8 @xor_common_op_commute3(i8 %p, i8 %q) { define i32 @test8(i32 %x, i32 %y) { ; CHECK-LABEL: @test8( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[Z]] ; %not = xor i32 %y, -1 @@ -218,7 +218,7 @@ define i32 @test8(i32 %x, i32 %y) { define i32 @test9(i32 %x, i32 %y) { ; CHECK-LABEL: @test9( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], -1 -; CHECK-NEXT: [[Z:%.*]] = or i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[Z]] ; %not = xor i32 %x, -1 @@ -306,7 +306,7 @@ define i32 @test10_canonical(i32 %A, i32 %B) { ; (x | y) & ((~x) ^ y) -> (x & y) define i32 @test11(i32 %x, i32 %y) { ; CHECK-LABEL: @test11( -; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret i32 [[AND]] ; %or = or i32 %x, %y @@ -319,7 +319,7 @@ define i32 @test11(i32 %x, i32 %y) { ; ((~x) ^ y) & (x | y) -> (x & y) define i32 @test12(i32 %x, i32 %y) { ; CHECK-LABEL: @test12( -; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret i32 [[AND]] ; %neg = xor i32 %x, -1 @@ -331,7 +331,7 @@ define i32 @test12(i32 %x, i32 %y) { define i32 @test12_commuted(i32 %x, i32 %y) { ; CHECK-LABEL: @test12_commuted( -; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret i32 [[AND]] ; %neg = xor i32 %x, -1 @@ -344,7 +344,7 @@ define i32 @test12_commuted(i32 %x, i32 %y) { ; ((x | y) ^ (x ^ y)) -> (x & y) define i32 @test13(i32 %x, i32 %y) { ; CHECK-LABEL: @test13( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret i32 [[TMP1]] ; %1 = xor i32 %y, %x @@ -800,7 +800,7 @@ define i4 @or_not_xor_common_op_commute0(i4 %x, i4 %y, i4 %z) { ; CHECK-LABEL: @or_not_xor_common_op_commute0( ; CHECK-NEXT: [[TMP1:%.*]] = and i4 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[NAND:%.*]] = xor i4 [[TMP1]], -1 -; CHECK-NEXT: [[O2:%.*]] = or i4 [[NAND]], [[Z:%.*]] +; CHECK-NEXT: [[O2:%.*]] = or i4 [[Z:%.*]], [[NAND]] ; CHECK-NEXT: ret i4 [[O2]] ; %notx = xor i4 %x, -1 @@ -816,7 +816,7 @@ define i8 @or_not_xor_common_op_commute1(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: call void @use(i8 [[NOTX]]) ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[X]], [[Y:%.*]] ; CHECK-NEXT: [[NAND:%.*]] = xor i8 [[TMP1]], -1 -; CHECK-NEXT: [[O2:%.*]] = or i8 [[NAND]], [[Z:%.*]] +; CHECK-NEXT: [[O2:%.*]] = or i8 [[Z:%.*]], [[NAND]] ; CHECK-NEXT: ret i8 [[O2]] ; %notx = xor i8 %x, -1 @@ -863,7 +863,7 @@ define <2 x i4> @or_not_xor_common_op_commute4(<2 x i4> %x, <2 x i4> %y, <2 x i4 ; CHECK-LABEL: @or_not_xor_common_op_commute4( ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i4> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NAND:%.*]] = xor <2 x i4> [[TMP1]], -; CHECK-NEXT: [[O2:%.*]] = or <2 x i4> [[NAND]], [[Z:%.*]] +; CHECK-NEXT: [[O2:%.*]] = or <2 x i4> [[Z:%.*]], [[NAND]] ; CHECK-NEXT: ret <2 x i4> [[O2]] ; %notx = xor <2 x i4> %x, @@ -877,7 +877,7 @@ define i8 @or_not_xor_common_op_commute5(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @or_not_xor_common_op_commute5( ; CHECK-NEXT: [[TMP1:%.*]] = and i8 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NAND:%.*]] = xor i8 [[TMP1]], -1 -; CHECK-NEXT: [[O2:%.*]] = or i8 [[NAND]], [[Z:%.*]] +; CHECK-NEXT: [[O2:%.*]] = or i8 [[Z:%.*]], [[NAND]] ; CHECK-NEXT: ret i8 [[O2]] ; %notx = xor i8 %x, -1 @@ -926,7 +926,7 @@ define i8 @or_not_xor_common_op_use1(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X]], [[Y:%.*]] ; CHECK-NEXT: call void @use(i8 [[XOR]]) -; CHECK-NEXT: [[O1:%.*]] = or i8 [[NOTX]], [[Z:%.*]] +; CHECK-NEXT: [[O1:%.*]] = or i8 [[Z:%.*]], [[NOTX]] ; CHECK-NEXT: [[O2:%.*]] = or i8 [[XOR]], [[O1]] ; CHECK-NEXT: ret i8 [[O2]] ; @@ -944,7 +944,7 @@ define i8 @or_not_xor_common_op_use2(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @or_not_xor_common_op_use2( ; CHECK-NEXT: [[NOTX:%.*]] = xor i8 [[X:%.*]], -1 ; CHECK-NEXT: [[XOR:%.*]] = xor i8 [[X]], [[Y:%.*]] -; CHECK-NEXT: [[O1:%.*]] = or i8 [[NOTX]], [[Z:%.*]] +; CHECK-NEXT: [[O1:%.*]] = or i8 [[Z:%.*]], [[NOTX]] ; CHECK-NEXT: call void @use(i8 [[O1]]) ; CHECK-NEXT: [[O2:%.*]] = or i8 [[XOR]], [[O1]] ; CHECK-NEXT: ret i8 [[O2]] @@ -1098,7 +1098,7 @@ define i32 @PR75692_3(i32 %x, i32 %y) { define i32 @or_xor_not(i32 %x, i32 %y) { ; CHECK-LABEL: @or_xor_not( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[OR1]] ; %not = xor i32 %y, -1 @@ -1112,7 +1112,7 @@ define i32 @or_xor_not_uses1(i32 %x, i32 %y) { ; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use(i32 [[NOT]]) ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i32 [[OR1]] ; %not = xor i32 %y, -1 @@ -1127,7 +1127,7 @@ define i32 @or_xor_not_uses2(i32 %x, i32 %y) { ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], -1 ; CHECK-NEXT: call void @use(i32 [[XOR]]) -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[XOR]], [[Y]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[Y]], [[XOR]] ; CHECK-NEXT: ret i32 [[OR1]] ; %not = xor i32 %y, -1 diff --git a/llvm/test/Transforms/InstCombine/or.ll b/llvm/test/Transforms/InstCombine/or.ll index 6e2085a8bb6c7..9bcad034b363e 100644 --- a/llvm/test/Transforms/InstCombine/or.ll +++ b/llvm/test/Transforms/InstCombine/or.ll @@ -696,7 +696,7 @@ define i32 @test39d(i32 %a, float %b) { define i32 @test40(i32 %a, i32 %b) { ; CHECK-LABEL: @test40( ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], [[B:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[B:%.*]], [[XOR]] ; CHECK-NEXT: ret i32 [[OR]] ; %and = and i32 %a, %b @@ -708,7 +708,7 @@ define i32 @test40(i32 %a, i32 %b) { define i32 @test40b(i32 %a, i32 %b) { ; CHECK-LABEL: @test40b( ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], [[B:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[B:%.*]], [[XOR]] ; CHECK-NEXT: ret i32 [[OR]] ; %and = and i32 %b, %a @@ -720,7 +720,7 @@ define i32 @test40b(i32 %a, i32 %b) { define i32 @test40c(i32 %a, i32 %b) { ; CHECK-LABEL: @test40c( ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], [[B:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[B:%.*]], [[XOR]] ; CHECK-NEXT: ret i32 [[OR]] ; %and = and i32 %b, %a @@ -732,7 +732,7 @@ define i32 @test40c(i32 %a, i32 %b) { define i32 @test40d(i32 %a, i32 %b) { ; CHECK-LABEL: @test40d( ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[XOR]], [[B:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[B:%.*]], [[XOR]] ; CHECK-NEXT: ret i32 [[OR]] ; %and = and i32 %a, %b @@ -743,7 +743,7 @@ define i32 @test40d(i32 %a, i32 %b) { define i32 @test45(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @test45( -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[Z:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], [[Y:%.*]] ; CHECK-NEXT: ret i32 [[OR1]] ; @@ -757,7 +757,7 @@ define i32 @test45_uses1(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @test45_uses1( ; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], [[Z:%.*]] ; CHECK-NEXT: call void @use(i32 [[OR]]) -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[Z]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], [[Z]] ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[TMP1]], [[Y]] ; CHECK-NEXT: ret i32 [[OR1]] ; @@ -771,7 +771,7 @@ define i32 @test45_uses1(i32 %x, i32 %y, i32 %z) { define i32 @test45_uses2(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @test45_uses2( ; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], [[Z:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i32 [[OR]], [[X:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[OR]] ; CHECK-NEXT: call void @use(i32 [[AND]]) ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[AND]], [[Y]] ; CHECK-NEXT: ret i32 [[OR1]] @@ -1605,7 +1605,7 @@ define i32 @mul_no_common_bits_commute2(i32 %p1, i32 %p2) { define i32 @mul_no_common_bits_disjoint(i32 %x, i32 %y) { ; CHECK-LABEL: @mul_no_common_bits_disjoint( ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[Y:%.*]], 1 -; CHECK-NEXT: [[R:%.*]] = mul i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = mul i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[R]] ; %m = mul i32 %x, %y @@ -1976,7 +1976,7 @@ define i32 @or_xor_and_uses1(i32 %x, i32 %y, i32 %z) { define i32 @or_xor_and_uses2(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @or_xor_and_uses2( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[Z:%.*]] -; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[AND]], [[X:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[X:%.*]], [[AND]] ; CHECK-NEXT: call void @use(i32 [[XOR]]) ; CHECK-NEXT: [[OR1:%.*]] = or i32 [[X]], [[Y]] ; CHECK-NEXT: ret i32 [[OR1]] @@ -2019,7 +2019,7 @@ define i32 @or_xor_and_commuted2(i32 %x, i32 %y, i32 %z) { define i32 @or_xor_and_commuted3(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @or_xor_and_commuted3( ; CHECK-NEXT: [[YY:%.*]] = mul i32 [[Y:%.*]], [[Y]] -; CHECK-NEXT: [[OR1:%.*]] = or i32 [[YY]], [[X:%.*]] +; CHECK-NEXT: [[OR1:%.*]] = or i32 [[X:%.*]], [[YY]] ; CHECK-NEXT: ret i32 [[OR1]] ; %yy = mul i32 %y, %y ; thwart complexity-based ordering diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll index 5ed7d641df65b..469375633b60e 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-after-truncation-variant-b.ll @@ -210,7 +210,7 @@ define i32 @n5_extrause0(i64 %x, i32 %nbits) { ; CHECK-NEXT: call void @use64(i64 [[T2]]) ; CHECK-NEXT: call void @use64(i64 [[T3]]) ; CHECK-NEXT: call void @use32(i32 [[T4]]) -; CHECK-NEXT: [[T5:%.*]] = and i64 [[T3]], [[X:%.*]] +; CHECK-NEXT: [[T5:%.*]] = and i64 [[X:%.*]], [[T3]] ; CHECK-NEXT: call void @use64(i64 [[T5]]) ; CHECK-NEXT: [[T6:%.*]] = trunc i64 [[T5]] to i32 ; CHECK-NEXT: [[T7:%.*]] = shl i32 [[T6]], [[T4]] @@ -246,7 +246,7 @@ define i32 @n6_extrause1(i64 %x, i32 %nbits) { ; CHECK-NEXT: call void @use64(i64 [[T2]]) ; CHECK-NEXT: call void @use64(i64 [[T3]]) ; CHECK-NEXT: call void @use32(i32 [[T4]]) -; CHECK-NEXT: [[T5:%.*]] = and i64 [[T3]], [[X:%.*]] +; CHECK-NEXT: [[T5:%.*]] = and i64 [[X:%.*]], [[T3]] ; CHECK-NEXT: [[T6:%.*]] = trunc i64 [[T5]] to i32 ; CHECK-NEXT: call void @use32(i32 [[T6]]) ; CHECK-NEXT: [[T7:%.*]] = shl i32 [[T6]], [[T4]] @@ -282,7 +282,7 @@ define i32 @n7_extrause2(i64 %x, i32 %nbits) { ; CHECK-NEXT: call void @use64(i64 [[T2]]) ; CHECK-NEXT: call void @use64(i64 [[T3]]) ; CHECK-NEXT: call void @use32(i32 [[T4]]) -; CHECK-NEXT: [[T5:%.*]] = and i64 [[T3]], [[X:%.*]] +; CHECK-NEXT: [[T5:%.*]] = and i64 [[X:%.*]], [[T3]] ; CHECK-NEXT: call void @use64(i64 [[T5]]) ; CHECK-NEXT: [[T6:%.*]] = trunc i64 [[T5]] to i32 ; CHECK-NEXT: call void @use32(i32 [[T6]]) diff --git a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll index 1debf111b18cd..bce2a1c3f7e50 100644 --- a/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/partally-redundant-left-shift-input-masking-variant-b.ll @@ -132,7 +132,7 @@ define i32 @n3_extrause(i32 %x, i32 %nbits) { ; CHECK-NEXT: [[T0:%.*]] = add i32 [[NBITS:%.*]], -1 ; CHECK-NEXT: [[T1:%.*]] = shl nsw i32 -1, [[T0]] ; CHECK-NEXT: [[T2:%.*]] = xor i32 [[T1]], -1 -; CHECK-NEXT: [[T3:%.*]] = and i32 [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = and i32 [[X:%.*]], [[T2]] ; CHECK-NEXT: [[T4:%.*]] = sub i32 32, [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) diff --git a/llvm/test/Transforms/InstCombine/phi.ll b/llvm/test/Transforms/InstCombine/phi.ll index 2673b1d74bb6f..e03e45312687b 100644 --- a/llvm/test/Transforms/InstCombine/phi.ll +++ b/llvm/test/Transforms/InstCombine/phi.ll @@ -1416,7 +1416,7 @@ define i1 @phi_knownnonzero_eq_oricmp_commuted(i32 %n, i32 %s, ptr %P, i32 %val) ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: ; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 1, [[IF_THEN]] ], [ [[N]], [[ENTRY:%.*]] ] -; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[PHI]], [[VAL:%.*]] +; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[VAL:%.*]], [[PHI]] ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i32 [[ORPHI]], 0 ; CHECK-NEXT: ret i1 [[CMP1]] ; @@ -1506,7 +1506,7 @@ define i1 @phi_knownnonzero_ne_oricmp_commuted(i32 %n, i32 %s, ptr %P, i32 %val) ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: ; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 1, [[IF_THEN]] ], [ [[N]], [[ENTRY:%.*]] ] -; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[PHI]], [[VAL:%.*]] +; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[VAL:%.*]], [[PHI]] ; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[ORPHI]], 0 ; CHECK-NEXT: ret i1 [[CMP1]] ; @@ -1580,7 +1580,7 @@ define i1 @phi_knownnonzero_ne_multiuse_oricmp_commuted(i32 %n, i32 %s, ptr %P, ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: ; CHECK-NEXT: [[PHI:%.*]] = phi i32 [ 1, [[IF_THEN]] ], [ [[N]], [[ENTRY:%.*]] ] -; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[PHI]], [[VAL:%.*]] +; CHECK-NEXT: [[ORPHI:%.*]] = or i32 [[VAL:%.*]], [[PHI]] ; CHECK-NEXT: [[CMP1:%.*]] = icmp ne i32 [[ORPHI]], 0 ; CHECK-NEXT: br i1 [[CMP1]], label [[NEXT:%.*]], label [[CLEANUP:%.*]] ; CHECK: next: @@ -1622,7 +1622,7 @@ define i1 @phi_knownnonzero_eq_multiuse_andicmp(i32 %n, i32 %s, ptr %P, i32 %val ; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] ; CHECK: if.then: ; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P:%.*]], align 4 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LOAD]], [[N]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[N]], [[LOAD]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 1, i32 2 ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: @@ -1669,7 +1669,7 @@ define i1 @phi_knownnonzero_ne_multiuse_andicmp(i32 %n, i32 %s, ptr %P, i32 %val ; CHECK-NEXT: br i1 [[TOBOOL]], label [[IF_END:%.*]], label [[IF_THEN:%.*]] ; CHECK: if.then: ; CHECK-NEXT: [[LOAD:%.*]] = load i32, ptr [[P:%.*]], align 4 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[LOAD]], [[N]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[N]], [[LOAD]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i32 1, i32 2 ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: diff --git a/llvm/test/Transforms/InstCombine/pr44242.ll b/llvm/test/Transforms/InstCombine/pr44242.ll index e86c17057fe27..bce22734127da 100644 --- a/llvm/test/Transforms/InstCombine/pr44242.ll +++ b/llvm/test/Transforms/InstCombine/pr44242.ll @@ -12,7 +12,7 @@ define float @sitofp(float %x) { ; CHECK: loop_header: ; CHECK-NEXT: [[VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAL_INCR_CASTED:%.*]], [[LOOP:%.*]] ] ; CHECK-NEXT: [[VAL_CASTED:%.*]] = bitcast i32 [[VAL]] to float -; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[VAL_CASTED]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[VAL_CASTED]] ; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[LOOP]] ; CHECK: loop: ; CHECK-NEXT: [[VAL_INCR:%.*]] = fadd float [[VAL_CASTED]], 1.000000e+00 @@ -46,7 +46,7 @@ define <2 x i16> @bitcast(float %x) { ; CHECK: loop_header: ; CHECK-NEXT: [[VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAL_INCR_CASTED:%.*]], [[LOOP:%.*]] ] ; CHECK-NEXT: [[VAL_CASTED:%.*]] = bitcast i32 [[VAL]] to float -; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[VAL_CASTED]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[VAL_CASTED]] ; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[LOOP]] ; CHECK: loop: ; CHECK-NEXT: [[VAL_INCR:%.*]] = fadd float [[VAL_CASTED]], 1.000000e+00 @@ -82,7 +82,7 @@ define void @store_volatile(float %x) { ; CHECK: loop_header: ; CHECK-NEXT: [[VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAL_INCR_CASTED:%.*]], [[LOOP:%.*]] ] ; CHECK-NEXT: [[VAL_CASTED:%.*]] = bitcast i32 [[VAL]] to float -; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[VAL_CASTED]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[VAL_CASTED]] ; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[LOOP]] ; CHECK: loop: ; CHECK-NEXT: [[VAL_INCR:%.*]] = fadd float [[VAL_CASTED]], 1.000000e+00 @@ -149,7 +149,7 @@ define i32 @multiple_phis(float %x) { ; CHECK: loop_header: ; CHECK-NEXT: [[VAL:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[VAL2:%.*]], [[LOOP_END:%.*]] ] ; CHECK-NEXT: [[VAL_CASTED:%.*]] = bitcast i32 [[VAL]] to float -; CHECK-NEXT: [[CMP:%.*]] = fcmp ogt float [[VAL_CASTED]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp olt float [[X:%.*]], [[VAL_CASTED]] ; CHECK-NEXT: br i1 [[CMP]], label [[END:%.*]], label [[LOOP:%.*]] ; CHECK: loop: ; CHECK-NEXT: [[CMP2:%.*]] = fcmp ogt float [[VAL_CASTED]], 2.000000e+00 diff --git a/llvm/test/Transforms/InstCombine/pr49688.ll b/llvm/test/Transforms/InstCombine/pr49688.ll index 284b098b02afa..902aea262f537 100644 --- a/llvm/test/Transforms/InstCombine/pr49688.ll +++ b/llvm/test/Transforms/InstCombine/pr49688.ll @@ -7,7 +7,7 @@ define i1 @f(i32 %i1) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[I1:%.*]], 0 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 7, [[I1]] -; CHECK-NEXT: [[CMP4:%.*]] = icmp slt i32 [[SHR]], [[I1]] +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt i32 [[I1]], [[SHR]] ; CHECK-NEXT: [[I2:%.*]] = select i1 [[CMP]], i1 true, i1 [[CMP4]] ; CHECK-NEXT: ret i1 [[I2]] ; @@ -24,7 +24,7 @@ define i32 @f2(i32 signext %g, i32 zeroext %h) { ; CHECK-LABEL: @f2( ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[G:%.*]], 0 ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 7, [[H:%.*]] -; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[SHR]], [[G]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[G]], [[SHR]] ; CHECK-NEXT: [[DOT0:%.*]] = select i1 [[CMP]], i1 true, i1 [[CMP1]] ; CHECK-NEXT: [[LOR_EXT:%.*]] = zext i1 [[DOT0]] to i32 ; CHECK-NEXT: ret i32 [[LOR_EXT]] diff --git a/llvm/test/Transforms/InstCombine/pr75369.ll b/llvm/test/Transforms/InstCombine/pr75369.ll index 2f90753504b36..3855880047d6b 100644 --- a/llvm/test/Transforms/InstCombine/pr75369.ll +++ b/llvm/test/Transforms/InstCombine/pr75369.ll @@ -5,7 +5,7 @@ define i32 @main(ptr %a, i8 %a0, i32 %conv, i8 %a1) { ; CHECK-LABEL: define i32 @main( ; CHECK-SAME: ptr [[A:%.*]], i8 [[A0:%.*]], i32 [[CONV:%.*]], i8 [[A1:%.*]]) { ; CHECK-NEXT: [[A3:%.*]] = trunc i32 [[CONV]] to i8 -; CHECK-NEXT: [[OR11:%.*]] = or i8 [[A3]], [[A0]] +; CHECK-NEXT: [[OR11:%.*]] = or i8 [[A0]], [[A3]] ; CHECK-NEXT: store i8 [[OR11]], ptr [[A]], align 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i8 [[A1]], 0 ; CHECK-NEXT: call void @llvm.assume(i1 [[CMP]]) diff --git a/llvm/test/Transforms/InstCombine/ptr-int-ptr-icmp.ll b/llvm/test/Transforms/InstCombine/ptr-int-ptr-icmp.ll index 5249aa4269e87..eec78063805a1 100644 --- a/llvm/test/Transforms/InstCombine/ptr-int-ptr-icmp.ll +++ b/llvm/test/Transforms/InstCombine/ptr-int-ptr-icmp.ll @@ -8,7 +8,7 @@ target triple = "x86_64-unknown-linux-gnu" define i1 @func(ptr %X, ptr %Y) { ; CHECK-LABEL: @func( -; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret i1 [[CMP]] ; %i = ptrtoint ptr %X to i64 @@ -19,7 +19,7 @@ define i1 @func(ptr %X, ptr %Y) { define <2 x i1> @func_vec(<2 x ptr> %X, <2 x ptr> %Y) { ; CHECK-LABEL: @func_vec( -; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x ptr> [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq <2 x ptr> [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret <2 x i1> [[CMP]] ; %i = ptrtoint <2 x ptr> %X to <2 x i64> @@ -30,7 +30,7 @@ define <2 x i1> @func_vec(<2 x ptr> %X, <2 x ptr> %Y) { define @func_svec( %X, %Y) { ; CHECK-LABEL: @func_svec( -; CHECK-NEXT: [[CMP:%.*]] = icmp eq [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret [[CMP]] ; %i = ptrtoint %X to @@ -41,7 +41,7 @@ define @func_svec( %X, %Y define i1 @func_pointer_different_types(ptr %X, ptr %Y) { ; CHECK-LABEL: @func_pointer_different_types( -; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret i1 [[CMP]] ; %i = ptrtoint ptr %X to i64 @@ -72,7 +72,7 @@ define i1 @func_integer_type_too_small(ptr %X, ptr %Y) { ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[X:%.*]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], 4294967295 ; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[TMP2]] to ptr -; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Y:%.*]], [[P]] ; CHECK-NEXT: ret i1 [[CMP]] ; %i = ptrtoint ptr %X to i32 @@ -87,7 +87,7 @@ define i1 @func_ptr_different_addrspace(ptr %X, ptr addrspace(3) %Y){ ; CHECK-LABEL: @func_ptr_different_addrspace( ; CHECK-NEXT: [[I:%.*]] = ptrtoint ptr [[X:%.*]] to i64 ; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[I]] to ptr addrspace(3) -; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(3) [[P]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr addrspace(3) [[Y:%.*]], [[P]] ; CHECK-NEXT: ret i1 [[CMP]] ; %i = ptrtoint ptr %X to i64 @@ -103,7 +103,7 @@ define i1 @func_ptr_different_addrspace1(ptr addrspace(2) %X, ptr %Y){ ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr addrspace(2) [[X:%.*]] to i32 ; CHECK-NEXT: [[I:%.*]] = zext i32 [[TMP1]] to i64 ; CHECK-NEXT: [[P:%.*]] = inttoptr i64 [[I]] to ptr -; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[P]], [[Y:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq ptr [[Y:%.*]], [[P]] ; CHECK-NEXT: ret i1 [[CMP]] ; %i = ptrtoint ptr addrspace(2) %X to i64 diff --git a/llvm/test/Transforms/InstCombine/ptrmask.ll b/llvm/test/Transforms/InstCombine/ptrmask.ll index 4631b81cd1ce1..24777b1b7f208 100644 --- a/llvm/test/Transforms/InstCombine/ptrmask.ll +++ b/llvm/test/Transforms/InstCombine/ptrmask.ll @@ -155,7 +155,7 @@ define i64 @ptrtoint_of_ptrmask(ptr %p, i64 %m) { ; CHECK-LABEL: define i64 @ptrtoint_of_ptrmask ; CHECK-SAME: (ptr [[P:%.*]], i64 [[M:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 -; CHECK-NEXT: [[R:%.*]] = and i64 [[TMP1]], [[M]] +; CHECK-NEXT: [[R:%.*]] = and i64 [[M]], [[TMP1]] ; CHECK-NEXT: ret i64 [[R]] ; %pm = call ptr @llvm.ptrmask.p0.i64(ptr %p, i64 %m) @@ -168,7 +168,7 @@ define i32 @ptrtoint_of_ptrmask2(ptr %p, i64 %m) { ; CHECK-LABEL: define i32 @ptrtoint_of_ptrmask2 ; CHECK-SAME: (ptr [[P:%.*]], i64 [[M:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint ptr [[P]] to i64 -; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[M]] +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[M]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = trunc i64 [[TMP2]] to i32 ; CHECK-NEXT: ret i32 [[R]] ; @@ -181,7 +181,7 @@ define <2 x i64> @ptrtoint_of_ptrmask_vec(<2 x ptr> %p, <2 x i64> %m) { ; CHECK-LABEL: define <2 x i64> @ptrtoint_of_ptrmask_vec ; CHECK-SAME: (<2 x ptr> [[P:%.*]], <2 x i64> [[M:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <2 x ptr> [[P]] to <2 x i64> -; CHECK-NEXT: [[R:%.*]] = and <2 x i64> [[TMP1]], [[M]] +; CHECK-NEXT: [[R:%.*]] = and <2 x i64> [[M]], [[TMP1]] ; CHECK-NEXT: ret <2 x i64> [[R]] ; %pm = call <2 x ptr> @llvm.ptrmask.v2p0.v2i64(<2 x ptr> %p, <2 x i64> %m) @@ -193,7 +193,7 @@ define <2 x i32> @ptrtoint_of_ptrmask_vec2(<2 x ptr> %p, <2 x i64> %m) { ; CHECK-LABEL: define <2 x i32> @ptrtoint_of_ptrmask_vec2 ; CHECK-SAME: (<2 x ptr> [[P:%.*]], <2 x i64> [[M:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = ptrtoint <2 x ptr> [[P]] to <2 x i64> -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i64> [[TMP1]], [[M]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i64> [[M]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = trunc <2 x i64> [[TMP2]] to <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[R]] ; @@ -374,10 +374,10 @@ define ptr @ptrmask_to_modified_gep6(ptr align 16 %p) { define ptr @ptrmask_to_modified_gep_indirect0(ptr align 16 %p) { ; CHECK-LABEL: define ptr @ptrmask_to_modified_gep_indirect0 ; CHECK-SAME: (ptr align 16 [[P:%.*]]) { -; 44 from 4*sizeof(i32) + (31 & -4) -; CHECK-NEXT: [[GEP1:%.*]] = getelementptr i8, ptr [[P]], i64 44 -; CHECK-NEXT: ret ptr [[GEP1]] +; CHECK-NEXT: [[GEP11:%.*]] = getelementptr i8, ptr [[P]], i64 44 +; CHECK-NEXT: ret ptr [[GEP11]] ; +; 44 from 4*sizeof(i32) + (31 & -4) %gep0 = getelementptr i32, ptr %p, i32 4 %gep1 = getelementptr i8, ptr %gep0, i32 31 %pm = call ptr @llvm.ptrmask.p0.i64(ptr %gep1, i64 -4) @@ -387,11 +387,11 @@ define ptr @ptrmask_to_modified_gep_indirect0(ptr align 16 %p) { define ptr @ptrmask_to_modified_gep_indirect1(ptr %p) { ; CHECK-LABEL: define ptr @ptrmask_to_modified_gep_indirect1 ; CHECK-SAME: (ptr [[P:%.*]]) { - -; CHECK-NEXT: [[R:%.*]] = call align 16 ptr @llvm.ptrmask.p0.i64(ptr [[P]], i64 -16) -; CHECK-NEXT: [[GEP:%.*]] = getelementptr i8, ptr [[R]], i64 32 -; CHECK-NEXT: ret ptr [[GEP]] +; CHECK-NEXT: [[PM0:%.*]] = call align 16 ptr @llvm.ptrmask.p0.i64(ptr [[P]], i64 -16) +; CHECK-NEXT: [[PGEP1:%.*]] = getelementptr i8, ptr [[PM0]], i64 32 +; CHECK-NEXT: ret ptr [[PGEP1]] ; + %pm0 = call ptr @llvm.ptrmask.p0.i64(ptr %p, i64 -16) %pgep = getelementptr i8, ptr %pm0, i64 33 %r = call ptr @llvm.ptrmask.p0.i64(ptr %pgep, i64 -16) diff --git a/llvm/test/Transforms/InstCombine/range-check.ll b/llvm/test/Transforms/InstCombine/range-check.ll index 210e57c1d1fe4..ebb310fb7c1f8 100644 --- a/llvm/test/Transforms/InstCombine/range-check.ll +++ b/llvm/test/Transforms/InstCombine/range-check.ll @@ -7,7 +7,7 @@ define i1 @test_and1(i32 %x, i32 %n) { ; CHECK-LABEL: @test_and1( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -21,7 +21,7 @@ define i1 @test_and1_logical(i32 %x, i32 %n) { ; CHECK-LABEL: @test_and1_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 ; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[X:%.*]], -1 -; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[NN]], [[X]] +; CHECK-NEXT: [[B:%.*]] = icmp slt i32 [[X]], [[NN]] ; CHECK-NEXT: [[C:%.*]] = select i1 [[A]], i1 [[B]], i1 false ; CHECK-NEXT: ret i1 [[C]] ; @@ -35,7 +35,7 @@ define i1 @test_and1_logical(i32 %x, i32 %n) { define i1 @test_and2(i32 %x, i32 %n) { ; CHECK-LABEL: @test_and2( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -49,7 +49,7 @@ define i1 @test_and2_logical(i32 %x, i32 %n) { ; CHECK-LABEL: @test_and2_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 ; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[X:%.*]], -1 -; CHECK-NEXT: [[B:%.*]] = icmp sge i32 [[NN]], [[X]] +; CHECK-NEXT: [[B:%.*]] = icmp sle i32 [[X]], [[NN]] ; CHECK-NEXT: [[C:%.*]] = select i1 [[A]], i1 [[B]], i1 false ; CHECK-NEXT: ret i1 [[C]] ; @@ -63,7 +63,7 @@ define i1 @test_and2_logical(i32 %x, i32 %n) { define i1 @test_and3(i32 %x, i32 %n) { ; CHECK-LABEL: @test_and3( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -76,7 +76,7 @@ define i1 @test_and3(i32 %x, i32 %n) { define i1 @test_and3_logical(i32 %x, i32 %n) { ; CHECK-LABEL: @test_and3_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -89,7 +89,7 @@ define i1 @test_and3_logical(i32 %x, i32 %n) { define i1 @test_and4(i32 %x, i32 %n) { ; CHECK-LABEL: @test_and4( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -102,7 +102,7 @@ define i1 @test_and4(i32 %x, i32 %n) { define i1 @test_and4_logical(i32 %x, i32 %n) { ; CHECK-LABEL: @test_and4_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -115,7 +115,7 @@ define i1 @test_and4_logical(i32 %x, i32 %n) { define i1 @test_or1(i32 %x, i32 %n) { ; CHECK-LABEL: @test_or1( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -129,7 +129,7 @@ define i1 @test_or1_logical(i32 %x, i32 %n) { ; CHECK-LABEL: @test_or1_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 ; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], 0 -; CHECK-NEXT: [[B:%.*]] = icmp sle i32 [[NN]], [[X]] +; CHECK-NEXT: [[B:%.*]] = icmp sge i32 [[X]], [[NN]] ; CHECK-NEXT: [[C:%.*]] = select i1 [[A]], i1 true, i1 [[B]] ; CHECK-NEXT: ret i1 [[C]] ; @@ -143,7 +143,7 @@ define i1 @test_or1_logical(i32 %x, i32 %n) { define i1 @test_or2(i32 %x, i32 %n) { ; CHECK-LABEL: @test_or2( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -157,7 +157,7 @@ define i1 @test_or2_logical(i32 %x, i32 %n) { ; CHECK-LABEL: @test_or2_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 ; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], 0 -; CHECK-NEXT: [[B:%.*]] = icmp slt i32 [[NN]], [[X]] +; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], [[NN]] ; CHECK-NEXT: [[C:%.*]] = select i1 [[A]], i1 true, i1 [[B]] ; CHECK-NEXT: ret i1 [[C]] ; @@ -171,7 +171,7 @@ define i1 @test_or2_logical(i32 %x, i32 %n) { define i1 @test_or3(i32 %x, i32 %n) { ; CHECK-LABEL: @test_or3( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -184,7 +184,7 @@ define i1 @test_or3(i32 %x, i32 %n) { define i1 @test_or3_logical(i32 %x, i32 %n) { ; CHECK-LABEL: @test_or3_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp ule i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp uge i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -197,7 +197,7 @@ define i1 @test_or3_logical(i32 %x, i32 %n) { define i1 @test_or4(i32 %x, i32 %n) { ; CHECK-LABEL: @test_or4( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -210,7 +210,7 @@ define i1 @test_or4(i32 %x, i32 %n) { define i1 @test_or4_logical(i32 %x, i32 %n) { ; CHECK-LABEL: @test_or4_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: ret i1 [[C]] ; %nn = and i32 %n, 2147483647 @@ -225,7 +225,7 @@ define i1 @test_or4_logical(i32 %x, i32 %n) { define i1 @negative1(i32 %x, i32 %n) { ; CHECK-LABEL: @negative1( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], 0 ; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] ; CHECK-NEXT: ret i1 [[C]] @@ -240,7 +240,7 @@ define i1 @negative1(i32 %x, i32 %n) { define i1 @negative1_logical(i32 %x, i32 %n) { ; CHECK-LABEL: @negative1_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], 0 ; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] ; CHECK-NEXT: ret i1 [[C]] @@ -281,7 +281,7 @@ define i1 @negative2_logical(i32 %x, i32 %n) { define i1 @negative3(i32 %x, i32 %y, i32 %n) { ; CHECK-LABEL: @negative3( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] ; CHECK-NEXT: ret i1 [[C]] @@ -296,7 +296,7 @@ define i1 @negative3(i32 %x, i32 %y, i32 %n) { define i1 @negative3_logical(i32 %x, i32 %y, i32 %n) { ; CHECK-LABEL: @negative3_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[A:%.*]] = icmp sgt i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[A:%.*]] = icmp slt i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[C:%.*]] = select i1 [[A]], i1 [[B]], i1 false ; CHECK-NEXT: ret i1 [[C]] @@ -311,7 +311,7 @@ define i1 @negative3_logical(i32 %x, i32 %y, i32 %n) { define i1 @negative4(i32 %x, i32 %n) { ; CHECK-LABEL: @negative4( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[A:%.*]] = icmp ne i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[A:%.*]] = icmp ne i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], -1 ; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] ; CHECK-NEXT: ret i1 [[C]] @@ -326,7 +326,7 @@ define i1 @negative4(i32 %x, i32 %n) { define i1 @negative4_logical(i32 %x, i32 %n) { ; CHECK-LABEL: @negative4_logical( ; CHECK-NEXT: [[NN:%.*]] = and i32 [[N:%.*]], 2147483647 -; CHECK-NEXT: [[A:%.*]] = icmp ne i32 [[NN]], [[X:%.*]] +; CHECK-NEXT: [[A:%.*]] = icmp ne i32 [[X:%.*]], [[NN]] ; CHECK-NEXT: [[B:%.*]] = icmp sgt i32 [[X]], -1 ; CHECK-NEXT: [[C:%.*]] = and i1 [[A]], [[B]] ; CHECK-NEXT: ret i1 [[C]] diff --git a/llvm/test/Transforms/InstCombine/reassociate-nuw.ll b/llvm/test/Transforms/InstCombine/reassociate-nuw.ll index 9718739ed8ab2..99f07c0a8e0ad 100644 --- a/llvm/test/Transforms/InstCombine/reassociate-nuw.ll +++ b/llvm/test/Transforms/InstCombine/reassociate-nuw.ll @@ -132,7 +132,7 @@ define i32 @tryFactorization_add_nuw_mul(i32 %x) { define i32 @tryFactorization_add_nuw_mul_nuw_mul_nuw_var(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @tryFactorization_add_nuw_mul_nuw_mul_nuw_var( ; CHECK-NEXT: [[MUL21:%.*]] = add i32 [[Y:%.*]], [[Z:%.*]] -; CHECK-NEXT: [[ADD1:%.*]] = mul nuw i32 [[MUL21]], [[X:%.*]] +; CHECK-NEXT: [[ADD1:%.*]] = mul nuw i32 [[X:%.*]], [[MUL21]] ; CHECK-NEXT: ret i32 [[ADD1]] ; %mul1 = mul nuw i32 %x, %y @@ -144,7 +144,7 @@ define i32 @tryFactorization_add_nuw_mul_nuw_mul_nuw_var(i32 %x, i32 %y, i32 %z) define i32 @tryFactorization_add_nuw_mul_mul_nuw_var(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @tryFactorization_add_nuw_mul_mul_nuw_var( ; CHECK-NEXT: [[MUL21:%.*]] = add i32 [[Y:%.*]], [[Z:%.*]] -; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[MUL21]], [[X:%.*]] +; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[X:%.*]], [[MUL21]] ; CHECK-NEXT: ret i32 [[ADD1]] ; %mul1 = mul i32 %x, %y @@ -156,7 +156,7 @@ define i32 @tryFactorization_add_nuw_mul_mul_nuw_var(i32 %x, i32 %y, i32 %z) { define i32 @tryFactorization_add_nuw_mul_nuw_mul_var(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @tryFactorization_add_nuw_mul_nuw_mul_var( ; CHECK-NEXT: [[MUL21:%.*]] = add i32 [[Y:%.*]], [[Z:%.*]] -; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[MUL21]], [[X:%.*]] +; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[X:%.*]], [[MUL21]] ; CHECK-NEXT: ret i32 [[ADD1]] ; %mul1 = mul nuw i32 %x, %y @@ -168,7 +168,7 @@ define i32 @tryFactorization_add_nuw_mul_nuw_mul_var(i32 %x, i32 %y, i32 %z) { define i32 @tryFactorization_add_mul_nuw_mul_var(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @tryFactorization_add_mul_nuw_mul_var( ; CHECK-NEXT: [[MUL21:%.*]] = add i32 [[Y:%.*]], [[Z:%.*]] -; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[MUL21]], [[X:%.*]] +; CHECK-NEXT: [[ADD1:%.*]] = mul i32 [[X:%.*]], [[MUL21]] ; CHECK-NEXT: ret i32 [[ADD1]] ; %mul1 = mul nuw i32 %x, %y diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll index cb6775e689b8c..8c61e24a97f1d 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-after-truncation-variant-b.ll @@ -20,7 +20,7 @@ define i32 @t0_basic(i64 %x, i32 %nbits) { ; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]] ; CHECK-NEXT: [[T2:%.*]] = xor i64 [[T1]], -1 ; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]] -; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = and i64 [[X:%.*]], [[T2]] ; CHECK-NEXT: call void @use32(i32 [[NBITS]]) ; CHECK-NEXT: call void @use64(i64 [[T0]]) ; CHECK-NEXT: call void @use64(i64 [[T1]]) @@ -60,7 +60,7 @@ define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> , [[T0]] ; CHECK-NEXT: [[T2:%.*]] = xor <8 x i64> [[T1]], ; CHECK-NEXT: [[T3:%.*]] = sub <8 x i32> , [[NBITS]] -; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[X:%.*]], [[T2]] ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[NBITS]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) @@ -95,7 +95,7 @@ define <8 x i32> @t2_vec_splat_poison(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: [[T1:%.*]] = shl nsw <8 x i64> , [[T0]] ; CHECK-NEXT: [[T2:%.*]] = xor <8 x i64> [[T1]], ; CHECK-NEXT: [[T3:%.*]] = sub <8 x i32> , [[NBITS]] -; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[X:%.*]], [[T2]] ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[NBITS]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) @@ -131,7 +131,7 @@ define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) { ; CHECK-NEXT: [[T2:%.*]] = shl nsw <8 x i64> , [[T1]] ; CHECK-NEXT: [[T3:%.*]] = xor <8 x i64> [[T2]], ; CHECK-NEXT: [[T4:%.*]] = sub <8 x i32> , [[NBITS]] -; CHECK-NEXT: [[T5:%.*]] = and <8 x i64> [[T3]], [[X:%.*]] +; CHECK-NEXT: [[T5:%.*]] = and <8 x i64> [[X:%.*]], [[T3]] ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T0]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]]) ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T2]]) @@ -206,7 +206,7 @@ define i32 @n5_extrause(i64 %x, i32 %nbits) { ; CHECK-NEXT: [[T1:%.*]] = shl nsw i64 -1, [[T0]] ; CHECK-NEXT: [[T2:%.*]] = xor i64 [[T1]], -1 ; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]] -; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T4:%.*]] = and i64 [[X:%.*]], [[T2]] ; CHECK-NEXT: call void @use32(i32 [[NBITS]]) ; CHECK-NEXT: call void @use64(i64 [[T0]]) ; CHECK-NEXT: call void @use64(i64 [[T1]]) diff --git a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll index 4b955a894fcfe..e3c0981389116 100644 --- a/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll +++ b/llvm/test/Transforms/InstCombine/redundant-left-shift-input-masking-variant-b.ll @@ -19,7 +19,7 @@ define i32 @t0_basic(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -44,7 +44,7 @@ define i32 @t1_bigger_shift(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t1_bigger_shift( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = sub i32 33, [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -70,7 +70,7 @@ define i32 @t2_bigger_mask(i32 %x, i32 %nbits) { ; CHECK-NEXT: [[T0:%.*]] = add i32 [[NBITS:%.*]], 1 ; CHECK-NEXT: [[T1:%.*]] = shl nsw i32 -1, [[T0]] ; CHECK-NEXT: [[T2:%.*]] = xor i32 [[T1]], -1 -; CHECK-NEXT: [[T3:%.*]] = and i32 [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = and i32 [[X:%.*]], [[T2]] ; CHECK-NEXT: [[T4:%.*]] = sub i32 32, [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -102,7 +102,7 @@ define <3 x i32> @t3_vec_splat(<3 x i32> %x, <3 x i32> %nbits) { ; CHECK-LABEL: @t3_vec_splat( ; CHECK-NEXT: [[T1:%.*]] = shl nsw <3 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[T2:%.*]] = xor <3 x i32> [[T1]], -; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[X:%.*]], [[T2]] ; CHECK-NEXT: [[T4:%.*]] = sub <3 x i32> , [[NBITS]] ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[NBITS]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]]) @@ -131,7 +131,7 @@ define <3 x i32> @t4_vec_nonsplat(<3 x i32> %x, <3 x i32> %nbits) { ; CHECK-NEXT: [[T0:%.*]] = add <3 x i32> [[NBITS:%.*]], ; CHECK-NEXT: [[T1:%.*]] = shl nsw <3 x i32> , [[T0]] ; CHECK-NEXT: [[T2:%.*]] = xor <3 x i32> [[T1]], -; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[X:%.*]], [[T2]] ; CHECK-NEXT: [[T4:%.*]] = sub <3 x i32> , [[NBITS]] ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T0]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]]) @@ -159,7 +159,7 @@ define <3 x i32> @t5_vec_poison(<3 x i32> %x, <3 x i32> %nbits) { ; CHECK-LABEL: @t5_vec_poison( ; CHECK-NEXT: [[T1:%.*]] = shl nsw <3 x i32> , [[NBITS:%.*]] ; CHECK-NEXT: [[T2:%.*]] = xor <3 x i32> [[T1]], -; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[T2]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = and <3 x i32> [[X:%.*]], [[T2]] ; CHECK-NEXT: [[T4:%.*]] = sub <3 x i32> , [[NBITS]] ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[NBITS]]) ; CHECK-NEXT: call void @use3xi32(<3 x i32> [[T1]]) @@ -285,7 +285,7 @@ define i32 @t9_nuw(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t9_nuw( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -310,7 +310,7 @@ define i32 @t10_nsw(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t10_nsw( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -335,7 +335,7 @@ define i32 @t11_nuw_nsw(i32 %x, i32 %nbits) { ; CHECK-LABEL: @t11_nuw_nsw( ; CHECK-NEXT: [[T0:%.*]] = shl nsw i32 -1, [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) @@ -362,7 +362,7 @@ define i32 @n12_not_minus_one(i32 %x, i32 %nbits) { ; CHECK-LABEL: @n12_not_minus_one( ; CHECK-NEXT: [[T0:%.*]] = shl i32 -2, [[NBITS:%.*]] ; CHECK-NEXT: [[T1:%.*]] = xor i32 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = and i32 [[T1]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = and i32 [[X:%.*]], [[T1]] ; CHECK-NEXT: [[T3:%.*]] = sub i32 32, [[NBITS]] ; CHECK-NEXT: call void @use32(i32 [[T0]]) ; CHECK-NEXT: call void @use32(i32 [[T1]]) diff --git a/llvm/test/Transforms/InstCombine/rem.ll b/llvm/test/Transforms/InstCombine/rem.ll index de484fe6df857..05ff214f91b8c 100644 --- a/llvm/test/Transforms/InstCombine/rem.ll +++ b/llvm/test/Transforms/InstCombine/rem.ll @@ -239,7 +239,7 @@ define <2 x i1> @test3a_vec(<2 x i32> %A) { define i32 @test4(i32 %X, i1 %C) { ; CHECK-LABEL: @test4( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[C:%.*]], i32 0, i32 7 -; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[R]] ; %V = select i1 %C, i32 1, i32 8 @@ -252,7 +252,7 @@ define i32 @test5(i32 %X, i8 %B) { ; CHECK-NEXT: [[SHIFT_UPGRD_1:%.*]] = zext nneg i8 [[B:%.*]] to i32 ; CHECK-NEXT: [[AMT:%.*]] = shl nuw i32 32, [[SHIFT_UPGRD_1]] ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[AMT]], -1 -; CHECK-NEXT: [[V:%.*]] = and i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[V:%.*]] = and i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[V]] ; %shift.upgrd.1 = zext i8 %B to i32 @@ -340,7 +340,7 @@ define i64 @test14(i64 %x, i32 %y) { ; CHECK-NEXT: [[SHL:%.*]] = shl nuw i32 1, [[Y:%.*]] ; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[SHL]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i64 [[ZEXT]], -1 -; CHECK-NEXT: [[UREM:%.*]] = and i64 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[UREM:%.*]] = and i64 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i64 [[UREM]] ; %shl = shl i32 1, %y @@ -353,7 +353,7 @@ define i64 @test15(i32 %x, i32 %y) { ; CHECK-LABEL: @test15( ; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[Y:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[NOTMASK]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[UREM:%.*]] = zext nneg i32 [[TMP2]] to i64 ; CHECK-NEXT: ret i64 [[UREM]] ; @@ -369,7 +369,7 @@ define i32 @test16(i32 %x, i32 %y) { ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[Y:%.*]], 11 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHR]], 4 ; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i32 [[AND]], 3 -; CHECK-NEXT: [[REM:%.*]] = and i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[REM:%.*]] = and i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[REM]] ; %shr = lshr i32 %y, 11 @@ -394,7 +394,7 @@ define i32 @test18(i16 %x, i32 %y) { ; CHECK-NEXT: [[TMP1:%.*]] = and i16 [[X:%.*]], 4 ; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i16 [[TMP1]], 0 ; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[DOTNOT]], i32 63, i32 31 -; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and i32 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i32 [[TMP3]] ; %1 = and i16 %x, 4 @@ -411,7 +411,7 @@ define i32 @test19(i32 %x, i32 %y) { ; CHECK-NEXT: [[C:%.*]] = and i32 [[A]], [[B]] ; CHECK-NEXT: [[D:%.*]] = add i32 [[C]], [[A]] ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[D]], -1 -; CHECK-NEXT: [[E:%.*]] = and i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[E:%.*]] = and i32 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i32 [[E]] ; %A = shl i32 1, %x @@ -429,7 +429,7 @@ define i32 @test19_commutative0(i32 %x, i32 %y) { ; CHECK-NEXT: [[C:%.*]] = and i32 [[B]], [[A]] ; CHECK-NEXT: [[D:%.*]] = add i32 [[C]], [[A]] ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[D]], -1 -; CHECK-NEXT: [[E:%.*]] = and i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[E:%.*]] = and i32 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i32 [[E]] ; %A = shl i32 1, %x @@ -447,7 +447,7 @@ define i32 @test19_commutative1(i32 %x, i32 %y) { ; CHECK-NEXT: [[C:%.*]] = and i32 [[A]], [[B]] ; CHECK-NEXT: [[D:%.*]] = add i32 [[A]], [[C]] ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[D]], -1 -; CHECK-NEXT: [[E:%.*]] = and i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[E:%.*]] = and i32 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i32 [[E]] ; %A = shl i32 1, %x @@ -465,7 +465,7 @@ define i32 @test19_commutative2(i32 %x, i32 %y) { ; CHECK-NEXT: [[C:%.*]] = and i32 [[B]], [[A]] ; CHECK-NEXT: [[D:%.*]] = add i32 [[A]], [[C]] ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[D]], -1 -; CHECK-NEXT: [[E:%.*]] = and i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[E:%.*]] = and i32 [[Y]], [[TMP1]] ; CHECK-NEXT: ret i32 [[E]] ; %A = shl i32 1, %x @@ -726,7 +726,7 @@ define i1 @test26(i32 %A, i32 %B) { ; CHECK-LABEL: @test26( ; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i32 -1, [[B:%.*]] ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[NOTMASK]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: [[E:%.*]] = icmp ne i32 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[E]] ; diff --git a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll index 107ef291bf439..8103d366d444d 100644 --- a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll +++ b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-is-non-zero-and-no-underflow.ll @@ -49,7 +49,7 @@ define i1 @t1(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -70,7 +70,7 @@ define i1 @t1_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -92,7 +92,7 @@ define i1 @t2(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %offset, 0 @@ -113,7 +113,7 @@ define i1 @t2_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %offset, 0 @@ -137,7 +137,7 @@ define i1 @t3_oneuse0(i8 %base, i8 %offset) { ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -161,7 +161,7 @@ define i1 @t3_oneuse0_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -184,7 +184,7 @@ define i1 @t4_oneuse1(i8 %base, i8 %offset) { ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -208,7 +208,7 @@ define i1 @t4_oneuse1_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i8 [[ADJUSTED]], [[BASE]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -281,7 +281,7 @@ define i1 @t6_commutativity0(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -302,7 +302,7 @@ define i1 @t6_commutativity0_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -322,7 +322,7 @@ define i1 @t7_commutativity1(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -343,7 +343,7 @@ define i1 @t7_commutativity1_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -363,7 +363,7 @@ define i1 @t7_commutativity3(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -384,7 +384,7 @@ define i1 @t7_commutativity3_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -406,7 +406,7 @@ define i1 @t8(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -427,7 +427,7 @@ define i1 @t8_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -449,7 +449,7 @@ define i1 @t9(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 @@ -470,7 +470,7 @@ define i1 @t9_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp = icmp slt i8 %base, 0 diff --git a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll index 0be4457ad3fc0..f967fcac367bb 100644 --- a/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll +++ b/llvm/test/Transforms/InstCombine/result-of-add-of-negative-or-zero-is-non-zero-and-no-underflow.ll @@ -11,7 +11,7 @@ define i1 @t0(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -27,7 +27,7 @@ define i1 @t0_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -46,7 +46,7 @@ define i1 @t1_oneuse0(i8 %base, i8 %offset) { ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -65,7 +65,7 @@ define i1 @t1_oneuse0_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i8 [[ADJUSTED]], 0 ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -83,7 +83,7 @@ define i1 @t2_oneuse1(i8 %base, i8 %offset) { ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -102,7 +102,7 @@ define i1 @t2_oneuse1_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i8 [[ADJUSTED]], [[BASE]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -160,7 +160,7 @@ define i1 @t4_commutativity0(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -176,7 +176,7 @@ define i1 @t4_commutativity0_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -191,7 +191,7 @@ define i1 @t5_commutativity1(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -207,7 +207,7 @@ define i1 @t5_commutativity1_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -222,7 +222,7 @@ define i1 @t6_commutativity3(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -238,7 +238,7 @@ define i1 @t6_commutativity3_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -255,7 +255,7 @@ define i1 @t7(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -271,7 +271,7 @@ define i1 @t7_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[OFFSET]] -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[BASE]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -288,7 +288,7 @@ define i1 @t8(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset @@ -304,7 +304,7 @@ define i1 @t8_logical(i8 %base, i8 %offset) { ; CHECK-NEXT: [[ADJUSTED:%.*]] = add i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) ; CHECK-NEXT: [[TMP1:%.*]] = sub i8 0, [[BASE]] -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[OFFSET]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[OFFSET]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = add i8 %base, %offset diff --git a/llvm/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll b/llvm/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll index a8be8180b9118..30a5072c7edc8 100644 --- a/llvm/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll +++ b/llvm/test/Transforms/InstCombine/result-of-usub-is-non-zero-and-no-overflow.ll @@ -509,11 +509,11 @@ define i1 @t9_commutative(i8 %base, i8 %offset) { ; CHECK-LABEL: @t9_commutative( ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ugt i8 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[UNDERFLOW]]) ; CHECK-NEXT: [[NULL:%.*]] = icmp eq i8 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NULL]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[BASE]], [[OFFSET]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = sub i8 %base, %offset @@ -530,11 +530,11 @@ define i1 @t9_commutative_logical(i8 %base, i8 %offset) { ; CHECK-LABEL: @t9_commutative_logical( ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i8 [[BASE:%.*]], [[OFFSET:%.*]] ; CHECK-NEXT: call void @use8(i8 [[ADJUSTED]]) -; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ugt i8 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[UNDERFLOW:%.*]] = icmp ult i8 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[UNDERFLOW]]) ; CHECK-NEXT: [[NULL:%.*]] = icmp eq i8 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NULL]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[BASE]], [[OFFSET]] ; CHECK-NEXT: ret i1 [[R]] ; %adjusted = sub i8 %base, %offset @@ -554,11 +554,11 @@ define i1 @t10(i64 %base, ptr nonnull %offsetptr) { ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]]) -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) -; CHECK-NEXT: [[R:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: ret i1 [[R]] ; %offset = ptrtoint ptr %offsetptr to i64 @@ -578,11 +578,11 @@ define i1 @t10_logical(i64 %base, ptr nonnull %offsetptr) { ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]]) -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) -; CHECK-NEXT: [[R:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: ret i1 [[R]] ; %offset = ptrtoint ptr %offsetptr to i64 @@ -601,11 +601,11 @@ define i1 @t11_commutative(i64 %base, ptr nonnull %offsetptr) { ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]]) -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) -; CHECK-NEXT: [[R:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: ret i1 [[R]] ; %offset = ptrtoint ptr %offsetptr to i64 @@ -625,11 +625,11 @@ define i1 @t11_commutative_logical(i64 %base, ptr nonnull %offsetptr) { ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]]) -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ule i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp uge i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp ne i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) -; CHECK-NEXT: [[R:%.*]] = icmp ult i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: ret i1 [[R]] ; %offset = ptrtoint ptr %offsetptr to i64 @@ -649,11 +649,11 @@ define i1 @t12(i64 %base, ptr nonnull %offsetptr) { ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]]) -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: ret i1 [[R]] ; %offset = ptrtoint ptr %offsetptr to i64 @@ -673,11 +673,11 @@ define i1 @t12_logical(i64 %base, ptr nonnull %offsetptr) { ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]]) -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: ret i1 [[R]] ; %offset = ptrtoint ptr %offsetptr to i64 @@ -696,11 +696,11 @@ define i1 @t13(i64 %base, ptr nonnull %offsetptr) { ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]]) -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: ret i1 [[R]] ; %offset = ptrtoint ptr %offsetptr to i64 @@ -720,11 +720,11 @@ define i1 @t13_logical(i64 %base, ptr nonnull %offsetptr) { ; CHECK-NEXT: [[OFFSET:%.*]] = ptrtoint ptr [[OFFSETPTR:%.*]] to i64 ; CHECK-NEXT: [[ADJUSTED:%.*]] = sub i64 [[BASE:%.*]], [[OFFSET]] ; CHECK-NEXT: call void @use64(i64 [[ADJUSTED]]) -; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ugt i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NO_UNDERFLOW:%.*]] = icmp ult i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NO_UNDERFLOW]]) -; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[NOT_NULL:%.*]] = icmp eq i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: call void @use1(i1 [[NOT_NULL]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge i64 [[OFFSET]], [[BASE]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i64 [[BASE]], [[OFFSET]] ; CHECK-NEXT: ret i1 [[R]] ; %offset = ptrtoint ptr %offsetptr to i64 diff --git a/llvm/test/Transforms/InstCombine/saturating-add-sub.ll b/llvm/test/Transforms/InstCombine/saturating-add-sub.ll index d23f8d48e0c71..a88fd3cc21f1b 100644 --- a/llvm/test/Transforms/InstCombine/saturating-add-sub.ll +++ b/llvm/test/Transforms/InstCombine/saturating-add-sub.ll @@ -1809,7 +1809,7 @@ define i32 @not_uadd_sat2(i32 %x, i32 %y) { define i32 @uadd_sat_not(i32 %x, i32 %y) { ; CHECK-LABEL: @uadd_sat_not( ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]]) ; CHECK-NEXT: ret i32 [[R]] ; %notx = xor i32 %x, -1 @@ -1822,7 +1822,7 @@ define i32 @uadd_sat_not(i32 %x, i32 %y) { define i32 @uadd_sat_not_nonstrict(i32 %x, i32 %y) { ; CHECK-LABEL: @uadd_sat_not_nonstrict( ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]]) ; CHECK-NEXT: ret i32 [[R]] ; %notx = xor i32 %x, -1 @@ -1852,7 +1852,7 @@ define i32 @uadd_sat_not_commute_add(i32 %xp, i32 %yp) { define i32 @uadd_sat_not_ugt(i32 %x, i32 %y) { ; CHECK-LABEL: @uadd_sat_not_ugt( ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]]) ; CHECK-NEXT: ret i32 [[R]] ; %notx = xor i32 %x, -1 @@ -1865,7 +1865,7 @@ define i32 @uadd_sat_not_ugt(i32 %x, i32 %y) { define i32 @uadd_sat_not_uge(i32 %x, i32 %y) { ; CHECK-LABEL: @uadd_sat_not_uge( ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]]) ; CHECK-NEXT: ret i32 [[R]] ; %notx = xor i32 %x, -1 @@ -1893,7 +1893,7 @@ define <2 x i32> @uadd_sat_not_ugt_commute_add(<2 x i32> %x, <2 x i32> %yp) { define i32 @uadd_sat_not_commute_select(i32 %x, i32 %y) { ; CHECK-LABEL: @uadd_sat_not_commute_select( ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]]) ; CHECK-NEXT: ret i32 [[R]] ; %notx = xor i32 %x, -1 @@ -1906,7 +1906,7 @@ define i32 @uadd_sat_not_commute_select(i32 %x, i32 %y) { define i32 @uadd_sat_not_commute_select_nonstrict(i32 %x, i32 %y) { ; CHECK-LABEL: @uadd_sat_not_commute_select_nonstrict( ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]]) ; CHECK-NEXT: ret i32 [[R]] ; %notx = xor i32 %x, -1 @@ -1951,7 +1951,7 @@ define <2 x i32> @uadd_sat_not_commute_select_ugt(<2 x i32> %xp, <2 x i32> %yp) define i32 @uadd_sat_not_commute_select_ugt_commute_add(i32 %x, i32 %y) { ; CHECK-LABEL: @uadd_sat_not_commute_select_ugt_commute_add( ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]]) ; CHECK-NEXT: ret i32 [[R]] ; %notx = xor i32 %x, -1 @@ -1964,7 +1964,7 @@ define i32 @uadd_sat_not_commute_select_ugt_commute_add(i32 %x, i32 %y) { define i32 @uadd_sat_not_commute_select_uge_commute_add(i32 %x, i32 %y) { ; CHECK-LABEL: @uadd_sat_not_commute_select_uge_commute_add( ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[NOTX]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.uadd.sat.i32(i32 [[Y:%.*]], i32 [[NOTX]]) ; CHECK-NEXT: ret i32 [[R]] ; %notx = xor i32 %x, -1 @@ -2138,7 +2138,7 @@ define i32 @unsigned_sat_variable_using_wrong_min(i32 %x) { ; CHECK-LABEL: @unsigned_sat_variable_using_wrong_min( ; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32() ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y]], -1 -; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.smin.i32(i32 [[NOTY]], i32 [[X:%.*]]) +; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 [[NOTY]]) ; CHECK-NEXT: [[R:%.*]] = add i32 [[Y]], [[S]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -2156,8 +2156,8 @@ define i32 @unsigned_sat_variable_using_wrong_value(i32 %x, i32 %z) { ; CHECK-LABEL: @unsigned_sat_variable_using_wrong_value( ; CHECK-NEXT: [[Y:%.*]] = call i32 @get_i32() ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y]], -1 -; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.umin.i32(i32 [[NOTY]], i32 [[X:%.*]]) -; CHECK-NEXT: [[R:%.*]] = add i32 [[S]], [[Z:%.*]] +; CHECK-NEXT: [[S:%.*]] = call i32 @llvm.umin.i32(i32 [[X:%.*]], i32 [[NOTY]]) +; CHECK-NEXT: [[R:%.*]] = add i32 [[Z:%.*]], [[S]] ; CHECK-NEXT: ret i32 [[R]] ; %y = call i32 @get_i32() ; thwart complexity-based canonicalization @@ -2268,7 +2268,7 @@ define i32 @uadd_sat_via_add_swapped_cmp(i32 %x, i32 %y) { define i32 @uadd_sat_via_add_swapped_cmp_nonstrict(i32 %x, i32 %y) { ; CHECK-LABEL: @uadd_sat_via_add_swapped_cmp_nonstrict( ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[C_NOT:%.*]] = icmp ugt i32 [[A]], [[Y]] +; CHECK-NEXT: [[C_NOT:%.*]] = icmp ult i32 [[Y]], [[A]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C_NOT]], i32 [[A]], i32 -1 ; CHECK-NEXT: ret i32 [[R]] ; @@ -2292,7 +2292,7 @@ define i32 @uadd_sat_via_add_swapped_cmp_nonstric(i32 %x, i32 %y) { define i32 @uadd_sat_via_add_swapped_cmp_select_nonstrict(i32 %x, i32 %y) { ; CHECK-LABEL: @uadd_sat_via_add_swapped_cmp_select_nonstrict( ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[A]], [[Y]] +; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[Y]], [[A]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 -1 ; CHECK-NEXT: ret i32 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/scalarization-inseltpoison.ll b/llvm/test/Transforms/InstCombine/scalarization-inseltpoison.ll index 424470aa929e1..29c0ac415ce7c 100644 --- a/llvm/test/Transforms/InstCombine/scalarization-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/scalarization-inseltpoison.ll @@ -184,8 +184,8 @@ define float @extract_element_load(<4 x float> %x, ptr %ptr) { ; ; CHECK-LABEL: @extract_element_load( ; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR:%.*]], align 16 -; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[LOAD]], i64 2 -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2 +; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2 +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[LOAD]], i64 2 ; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret float [[R]] ; @@ -200,7 +200,7 @@ define float @extract_element_multi_Use_load(<4 x float> %x, ptr %ptr0, ptr %ptr ; CHECK-LABEL: @extract_element_multi_Use_load( ; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR0:%.*]], align 16 ; CHECK-NEXT: store <4 x float> [[LOAD]], ptr [[PTR1:%.*]], align 16 -; CHECK-NEXT: [[ADD:%.*]] = fadd <4 x float> [[LOAD]], [[X:%.*]] +; CHECK-NEXT: [[ADD:%.*]] = fadd <4 x float> [[X:%.*]], [[LOAD]] ; CHECK-NEXT: [[R:%.*]] = extractelement <4 x float> [[ADD]], i64 2 ; CHECK-NEXT: ret float [[R]] ; @@ -227,7 +227,7 @@ define float @extelt_binop_insertelt(<4 x float> %A, <4 x float> %B, float %f) { ; ; CHECK-LABEL: @extelt_binop_insertelt( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0 -; CHECK-NEXT: [[E:%.*]] = fmul nnan float [[TMP1]], [[F:%.*]] +; CHECK-NEXT: [[E:%.*]] = fmul nnan float [[F:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[E]] ; %C = insertelement <4 x float> %A, float %f, i32 0 @@ -243,7 +243,7 @@ define i32 @extelt_binop_binop_insertelt(<4 x i32> %A, <4 x i32> %B, i32 %f) { ; ; CHECK-LABEL: @extelt_binop_binop_insertelt( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[B:%.*]], i64 0 -; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[F:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[F:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[B]], i64 0 ; CHECK-NEXT: [[E:%.*]] = mul nsw i32 [[TMP2]], [[TMP3]] ; CHECK-NEXT: ret i32 [[E]] @@ -348,7 +348,7 @@ define i1 @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(<2 x float> % ; CHECK-LABEL: @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use( ; CHECK-NEXT: [[ADD:%.*]] = fadd <2 x float> [[ARG1:%.*]], [[ARG2:%.*]] ; CHECK-NEXT: store volatile <2 x float> [[ADD]], ptr undef, align 8 -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <2 x float> [[ADD]], [[ARG0:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <2 x float> [[ARG0:%.*]], [[ADD]] ; CHECK-NEXT: [[EXT:%.*]] = extractelement <2 x i1> [[CMP]], i64 0 ; CHECK-NEXT: ret i1 [[EXT]] ; diff --git a/llvm/test/Transforms/InstCombine/scalarization.ll b/llvm/test/Transforms/InstCombine/scalarization.ll index 2f539ece88320..591437b72c1fc 100644 --- a/llvm/test/Transforms/InstCombine/scalarization.ll +++ b/llvm/test/Transforms/InstCombine/scalarization.ll @@ -212,8 +212,8 @@ define float @extract_element_load(<4 x float> %x, ptr %ptr) { ; ; CHECK-LABEL: @extract_element_load( ; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR:%.*]], align 16 -; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[LOAD]], i64 2 -; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2 +; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[X:%.*]], i64 2 +; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x float> [[LOAD]], i64 2 ; CHECK-NEXT: [[R:%.*]] = fadd float [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret float [[R]] ; @@ -228,7 +228,7 @@ define float @extract_element_multi_Use_load(<4 x float> %x, ptr %ptr0, ptr %ptr ; CHECK-LABEL: @extract_element_multi_Use_load( ; CHECK-NEXT: [[LOAD:%.*]] = load <4 x float>, ptr [[PTR0:%.*]], align 16 ; CHECK-NEXT: store <4 x float> [[LOAD]], ptr [[PTR1:%.*]], align 16 -; CHECK-NEXT: [[ADD:%.*]] = fadd <4 x float> [[LOAD]], [[X:%.*]] +; CHECK-NEXT: [[ADD:%.*]] = fadd <4 x float> [[X:%.*]], [[LOAD]] ; CHECK-NEXT: [[R:%.*]] = extractelement <4 x float> [[ADD]], i64 2 ; CHECK-NEXT: ret float [[R]] ; @@ -255,7 +255,7 @@ define float @extelt_binop_insertelt(<4 x float> %A, <4 x float> %B, float %f) { ; ; CHECK-LABEL: @extelt_binop_insertelt( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x float> [[B:%.*]], i64 0 -; CHECK-NEXT: [[E:%.*]] = fmul nnan float [[TMP1]], [[F:%.*]] +; CHECK-NEXT: [[E:%.*]] = fmul nnan float [[F:%.*]], [[TMP1]] ; CHECK-NEXT: ret float [[E]] ; %C = insertelement <4 x float> %A, float %f, i32 0 @@ -269,7 +269,7 @@ define i32 @extelt_binop_binop_insertelt(<4 x i32> %A, <4 x i32> %B, i32 %f) { ; ; CHECK-LABEL: @extelt_binop_binop_insertelt( ; CHECK-NEXT: [[TMP1:%.*]] = extractelement <4 x i32> [[B:%.*]], i64 0 -; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[F:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[F:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[B]], i64 0 ; CHECK-NEXT: [[E:%.*]] = mul nsw i32 [[TMP2]], [[TMP3]] ; CHECK-NEXT: ret i32 [[E]] @@ -385,7 +385,7 @@ define i1 @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use(<2 x float> % ; CHECK-LABEL: @extractelt_vector_fcmp_not_cheap_to_scalarize_multi_use( ; CHECK-NEXT: [[ADD:%.*]] = fadd <2 x float> [[ARG1:%.*]], [[ARG2:%.*]] ; CHECK-NEXT: store volatile <2 x float> [[ADD]], ptr undef, align 8 -; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <2 x float> [[ADD]], [[ARG0:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = fcmp oeq <2 x float> [[ARG0:%.*]], [[ADD]] ; CHECK-NEXT: [[EXT:%.*]] = extractelement <2 x i1> [[CMP]], i64 0 ; CHECK-NEXT: ret i1 [[EXT]] ; diff --git a/llvm/test/Transforms/InstCombine/select-and-or.ll b/llvm/test/Transforms/InstCombine/select-and-or.ll index c4c279361d2a6..68bd28cf234b4 100644 --- a/llvm/test/Transforms/InstCombine/select-and-or.ll +++ b/llvm/test/Transforms/InstCombine/select-and-or.ll @@ -509,7 +509,7 @@ define i1 @and_or2_commuted(i1 %a, i1 %b, i1 %c) { define i1 @and_or1_multiuse(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @and_or1_multiuse( ; CHECK-NEXT: [[NOTA:%.*]] = xor i1 [[A:%.*]], true -; CHECK-NEXT: [[COND:%.*]] = or i1 [[NOTA]], [[C:%.*]] +; CHECK-NEXT: [[COND:%.*]] = or i1 [[C:%.*]], [[NOTA]] ; CHECK-NEXT: call void @use(i1 [[COND]]) ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A]], i1 [[B:%.*]] ; CHECK-NEXT: ret i1 [[R]] @@ -524,7 +524,7 @@ define i1 @and_or1_multiuse(i1 %a, i1 %b, i1 %c) { define i1 @and_or2_multiuse(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @and_or2_multiuse( ; CHECK-NEXT: [[NOTC:%.*]] = xor i1 [[C:%.*]], true -; CHECK-NEXT: [[COND:%.*]] = and i1 [[NOTC]], [[B:%.*]] +; CHECK-NEXT: [[COND:%.*]] = and i1 [[B:%.*]], [[NOTC]] ; CHECK-NEXT: call void @use(i1 [[COND]]) ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[B]] ; CHECK-NEXT: ret i1 [[R]] @@ -595,7 +595,7 @@ define <2 x i1> @and_or2_vec_commuted(<2 x i1> %a, <2 x i1> %b) { define i1 @and_or1_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) { ; CHECK-LABEL: @and_or1_wrong_operand( ; CHECK-NEXT: [[NOTA:%.*]] = xor i1 [[A:%.*]], true -; CHECK-NEXT: [[COND:%.*]] = or i1 [[NOTA]], [[C:%.*]] +; CHECK-NEXT: [[COND:%.*]] = or i1 [[C:%.*]], [[NOTA]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[D:%.*]], i1 [[B:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -608,7 +608,7 @@ define i1 @and_or1_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) { define i1 @and_or2_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) { ; CHECK-LABEL: @and_or2_wrong_operand( ; CHECK-NEXT: [[NOTC:%.*]] = xor i1 [[C:%.*]], true -; CHECK-NEXT: [[COND:%.*]] = and i1 [[NOTC]], [[B:%.*]] +; CHECK-NEXT: [[COND:%.*]] = and i1 [[B:%.*]], [[NOTC]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[D:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -658,7 +658,7 @@ define i1 @and_or3_not_free_to_invert(i1 %a, i1 %b, i1 %c) { define i1 @and_or3_multiuse(i1 %a, i1 %b, i32 %x, i32 %y) { ; CHECK-LABEL: @and_or3_multiuse( ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[COND:%.*]] = and i1 [[C]], [[B:%.*]] +; CHECK-NEXT: [[COND:%.*]] = and i1 [[B:%.*]], [[C]] ; CHECK-NEXT: call void @use(i1 [[COND]]) ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[B]] ; CHECK-NEXT: ret i1 [[R]] @@ -699,7 +699,7 @@ define <2 x i1> @and_or3_vec_commuted(<2 x i1> %a, <2 x i1> %b, <2 x i32> %x, <2 define i1 @and_or3_wrong_operand(i1 %a, i1 %b, i32 %x, i32 %y, i1 %d) { ; CHECK-LABEL: @and_or3_wrong_operand( ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[COND:%.*]] = and i1 [[C]], [[B:%.*]] +; CHECK-NEXT: [[COND:%.*]] = and i1 [[B:%.*]], [[C]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[D:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -760,7 +760,7 @@ define i1 @or_and2_commuted(i1 %a, i1 %b, i1 %c) { define i1 @or_and1_multiuse(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @or_and1_multiuse( ; CHECK-NEXT: [[NOTB:%.*]] = xor i1 [[B:%.*]], true -; CHECK-NEXT: [[COND:%.*]] = and i1 [[NOTB]], [[C:%.*]] +; CHECK-NEXT: [[COND:%.*]] = and i1 [[C:%.*]], [[NOTB]] ; CHECK-NEXT: call void @use(i1 [[COND]]) ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[B]] ; CHECK-NEXT: ret i1 [[R]] @@ -775,7 +775,7 @@ define i1 @or_and1_multiuse(i1 %a, i1 %b, i1 %c) { define i1 @or_and2_multiuse(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @or_and2_multiuse( ; CHECK-NEXT: [[NOTC:%.*]] = xor i1 [[C:%.*]], true -; CHECK-NEXT: [[COND:%.*]] = or i1 [[NOTC]], [[A:%.*]] +; CHECK-NEXT: [[COND:%.*]] = or i1 [[A:%.*]], [[NOTC]] ; CHECK-NEXT: call void @use(i1 [[COND]]) ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A]], i1 [[B:%.*]] ; CHECK-NEXT: ret i1 [[R]] @@ -846,7 +846,7 @@ define <2 x i1> @or_and2_vec_commuted(<2 x i1> %a, <2 x i1> %b) { define i1 @or_and1_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) { ; CHECK-LABEL: @or_and1_wrong_operand( ; CHECK-NEXT: [[NOTB:%.*]] = xor i1 [[B:%.*]], true -; CHECK-NEXT: [[COND:%.*]] = and i1 [[NOTB]], [[C:%.*]] +; CHECK-NEXT: [[COND:%.*]] = and i1 [[C:%.*]], [[NOTB]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A:%.*]], i1 [[D:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -859,7 +859,7 @@ define i1 @or_and1_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) { define i1 @or_and2_wrong_operand(i1 %a, i1 %b, i1 %c, i1 %d) { ; CHECK-LABEL: @or_and2_wrong_operand( ; CHECK-NEXT: [[NOTC:%.*]] = xor i1 [[C:%.*]], true -; CHECK-NEXT: [[COND:%.*]] = or i1 [[NOTC]], [[A:%.*]] +; CHECK-NEXT: [[COND:%.*]] = or i1 [[A:%.*]], [[NOTC]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[D:%.*]], i1 [[B:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -922,7 +922,7 @@ define i1 @or_and3_not_free_to_invert(i1 %a, i1 %b, i1 %c) { define i1 @or_and3_multiuse(i1 %a, i1 %b, i32 %x, i32 %y) { ; CHECK-LABEL: @or_and3_multiuse( ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[COND:%.*]] = or i1 [[C]], [[A:%.*]] +; CHECK-NEXT: [[COND:%.*]] = or i1 [[A:%.*]], [[C]] ; CHECK-NEXT: call void @use(i1 [[COND]]) ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[A]], i1 [[B:%.*]] ; CHECK-NEXT: ret i1 [[R]] @@ -963,7 +963,7 @@ define <2 x i1> @or_and3_vec_commuted(<2 x i1> %a, <2 x i1> %b, <2 x i32> %x, <2 define i1 @or_and3_wrong_operand(i1 %a, i1 %b, i32 %x, i32 %y, i1 %d) { ; CHECK-LABEL: @or_and3_wrong_operand( ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[COND:%.*]] = or i1 [[C]], [[A:%.*]] +; CHECK-NEXT: [[COND:%.*]] = or i1 [[A:%.*]], [[C]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND]], i1 [[D:%.*]], i1 [[B:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; @@ -1223,7 +1223,7 @@ define i8 @test_or_eq_different_operands(i8 %a, i8 %b, i8 %c) { define i8 @test_or_eq_a_b_multi_use(i1 %other_cond, i8 %a, i8 %b) { ; CHECK-LABEL: @test_or_eq_a_b_multi_use( ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[COND:%.*]] = or i1 [[CMP]], [[OTHER_COND:%.*]] +; CHECK-NEXT: [[COND:%.*]] = or i1 [[OTHER_COND:%.*]], [[CMP]] ; CHECK-NEXT: call void @use(i1 [[CMP]]) ; CHECK-NEXT: call void @use(i1 [[COND]]) ; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[OTHER_COND]], i8 [[A]], i8 [[B]] diff --git a/llvm/test/Transforms/InstCombine/select-binop-cmp.ll b/llvm/test/Transforms/InstCombine/select-binop-cmp.ll index fb56764598e2d..647287ef5ebad 100644 --- a/llvm/test/Transforms/InstCombine/select-binop-cmp.ll +++ b/llvm/test/Transforms/InstCombine/select-binop-cmp.ll @@ -1210,7 +1210,7 @@ define i32 @select_replace_nested(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @select_replace_nested( ; CHECK-NEXT: [[C:%.*]] = icmp eq i32 [[X:%.*]], 0 ; CHECK-NEXT: [[ADD:%.*]] = select i1 [[C]], i32 [[Z:%.*]], i32 0 -; CHECK-NEXT: [[S:%.*]] = add i32 [[ADD]], [[Y:%.*]] +; CHECK-NEXT: [[S:%.*]] = add i32 [[Y:%.*]], [[ADD]] ; CHECK-NEXT: ret i32 [[S]] ; %c = icmp eq i32 %x, 0 diff --git a/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll b/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll index 77ff16a8b2e3d..e5ad312bb85c1 100644 --- a/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll +++ b/llvm/test/Transforms/InstCombine/select-binop-foldable-floating-point.ll @@ -15,7 +15,7 @@ define float @select_maybe_nan_fadd(i1 %cond, float %A, float %B) { define float @select_fpclass_fadd(i1 %cond, float nofpclass(nan) %A, float %B) { ; CHECK-LABEL: @select_fpclass_fadd( ; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00 -; CHECK-NEXT: [[D:%.*]] = fadd float [[C]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; %C = fadd float %A, %B @@ -26,7 +26,7 @@ define float @select_fpclass_fadd(i1 %cond, float nofpclass(nan) %A, float %B) { define float @select_nnan_fadd(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fadd( ; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00 -; CHECK-NEXT: [[D:%.*]] = fadd float [[C]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; %C = fadd float %A, %B @@ -37,7 +37,7 @@ define float @select_nnan_fadd(i1 %cond, float %A, float %B) { define float @select_nnan_fadd_swapped(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fadd_swapped( ; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]] -; CHECK-NEXT: [[D:%.*]] = fadd float [[C]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = fadd float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; %C = fadd float %A, %B @@ -48,7 +48,7 @@ define float @select_nnan_fadd_swapped(i1 %cond, float %A, float %B) { define float @select_nnan_fadd_fast_math(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fadd_fast_math( ; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float [[B:%.*]], float -0.000000e+00 -; CHECK-NEXT: [[D:%.*]] = fadd fast float [[C]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = fadd fast float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; %C = fadd fast float %A, %B @@ -59,7 +59,7 @@ define float @select_nnan_fadd_fast_math(i1 %cond, float %A, float %B) { define float @select_nnan_fadd_swapped_fast_math(i1 %cond, float %A, float %B) { ; CHECK-LABEL: @select_nnan_fadd_swapped_fast_math( ; CHECK-NEXT: [[C:%.*]] = select nnan i1 [[COND:%.*]], float -0.000000e+00, float [[B:%.*]] -; CHECK-NEXT: [[D:%.*]] = fadd fast float [[C]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = fadd fast float [[A:%.*]], [[C]] ; CHECK-NEXT: ret float [[D]] ; %C = fadd fast float %A, %B @@ -70,7 +70,7 @@ define float @select_nnan_fadd_swapped_fast_math(i1 %cond, float %A, float %B) { define <4 x float> @select_nnan_nsz_fadd_v4f32(<4 x i1> %cond, <4 x float> %A, <4 x float> %B) { ; CHECK-LABEL: @select_nnan_nsz_fadd_v4f32( ; CHECK-NEXT: [[C:%.*]] = select nnan nsz <4 x i1> [[COND:%.*]], <4 x float> [[B:%.*]], <4 x float> zeroinitializer -; CHECK-NEXT: [[D:%.*]] = fadd nnan nsz <4 x float> [[C]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = fadd nnan nsz <4 x float> [[A:%.*]], [[C]] ; CHECK-NEXT: ret <4 x float> [[D]] ; %C = fadd nsz nnan <4 x float> %A, %B @@ -81,7 +81,7 @@ define <4 x float> @select_nnan_nsz_fadd_v4f32(<4 x i1> %cond, <4 x float> %A, < define @select_nnan_nsz_fadd_nxv4f32( %cond, %A, %B) { ; CHECK-LABEL: @select_nnan_nsz_fadd_nxv4f32( ; CHECK-NEXT: [[C:%.*]] = select nnan nsz [[COND:%.*]], [[B:%.*]], zeroinitializer -; CHECK-NEXT: [[D:%.*]] = fadd nnan nsz [[C]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = fadd nnan nsz [[A:%.*]], [[C]] ; CHECK-NEXT: ret [[D]] ; %C = fadd nnan nsz %A, %B @@ -92,7 +92,7 @@ define @select_nnan_nsz_fadd_nxv4f32( %con define @select_nnan_nsz_fadd_nxv4f32_swapops( %cond, %A, %B) { ; CHECK-LABEL: @select_nnan_nsz_fadd_nxv4f32_swapops( ; CHECK-NEXT: [[C:%.*]] = select fast [[COND:%.*]], zeroinitializer, [[B:%.*]] -; CHECK-NEXT: [[D:%.*]] = fadd fast [[C]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = fadd fast [[A:%.*]], [[C]] ; CHECK-NEXT: ret [[D]] ; %C = fadd fast %A, %B @@ -103,7 +103,7 @@ define @select_nnan_nsz_fadd_nxv4f32_swapops( @icmp_ne_common_op11(<3 x i1> %c, <3 x i17> %x, <3 x i17> %y, <3 x i17> %z) { ; CHECK-LABEL: @icmp_ne_common_op11( ; CHECK-NEXT: [[R_V:%.*]] = select <3 x i1> [[C:%.*]], <3 x i17> [[Y:%.*]], <3 x i17> [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp ne <3 x i17> [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ne <3 x i17> [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret <3 x i1> [[R]] ; %cmp1 = icmp ne <3 x i17> %y, %x @@ -62,7 +62,7 @@ define <3 x i1> @icmp_ne_common_op11(<3 x i1> %c, <3 x i17> %x, <3 x i17> %y, <3 define i1 @icmp_eq_common_op00(i1 %c, i5 %x, i5 %y, i5 %z) { ; CHECK-LABEL: @icmp_eq_common_op00( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i5 [[Y:%.*]], i5 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp eq i5 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i5 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp eq i5 %x, %y @@ -74,7 +74,7 @@ define i1 @icmp_eq_common_op00(i1 %c, i5 %x, i5 %y, i5 %z) { define <5 x i1> @icmp_eq_common_op01(<5 x i1> %c, <5 x i7> %x, <5 x i7> %y, <5 x i7> %z) { ; CHECK-LABEL: @icmp_eq_common_op01( ; CHECK-NEXT: [[R_V:%.*]] = select <5 x i1> [[C:%.*]], <5 x i7> [[Y:%.*]], <5 x i7> [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp eq <5 x i7> [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq <5 x i7> [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret <5 x i1> [[R]] ; %cmp1 = icmp eq <5 x i7> %x, %y @@ -86,7 +86,7 @@ define <5 x i1> @icmp_eq_common_op01(<5 x i1> %c, <5 x i7> %x, <5 x i7> %y, <5 x define i1 @icmp_eq_common_op10(i1 %c, i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @icmp_eq_common_op10( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i32 [[Y:%.*]], i32 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i32 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp eq i32 %y, %x @@ -98,7 +98,7 @@ define i1 @icmp_eq_common_op10(i1 %c, i32 %x, i32 %y, i32 %z) { define i1 @icmp_eq_common_op11(i1 %c, i64 %x, i64 %y, i64 %z) { ; CHECK-LABEL: @icmp_eq_common_op11( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i64 [[Y:%.*]], i64 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp eq i64 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i64 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp eq i64 %y, %x @@ -112,7 +112,7 @@ define i1 @icmp_common_one_use_1(i1 %c, i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[CMP1:%.*]] = icmp eq i8 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: call void @use(i1 [[CMP1]]) ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i8 [[Y]], i8 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[R_V]], [[X]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp eq i8 %y, %x @@ -125,7 +125,7 @@ define i1 @icmp_common_one_use_1(i1 %c, i8 %x, i8 %y, i8 %z) { define i1 @icmp_slt_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_slt_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp sgt i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp slt i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp slt i6 %x, %y @@ -137,7 +137,7 @@ define i1 @icmp_slt_common(i1 %c, i6 %x, i6 %y, i6 %z) { define i1 @icmp_sgt_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_sgt_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp slt i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sgt i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp sgt i6 %x, %y @@ -149,7 +149,7 @@ define i1 @icmp_sgt_common(i1 %c, i6 %x, i6 %y, i6 %z) { define i1 @icmp_sle_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_sle_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp sle i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sge i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp sle i6 %y, %x @@ -161,7 +161,7 @@ define i1 @icmp_sle_common(i1 %c, i6 %x, i6 %y, i6 %z) { define i1 @icmp_sge_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_sge_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp sge i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sle i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp sge i6 %y, %x @@ -173,7 +173,7 @@ define i1 @icmp_sge_common(i1 %c, i6 %x, i6 %y, i6 %z) { define i1 @icmp_slt_sgt_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_slt_sgt_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp sgt i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp slt i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp slt i6 %x, %y @@ -185,7 +185,7 @@ define i1 @icmp_slt_sgt_common(i1 %c, i6 %x, i6 %y, i6 %z) { define i1 @icmp_sle_sge_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_sle_sge_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp sle i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sge i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp sle i6 %y, %x @@ -197,7 +197,7 @@ define i1 @icmp_sle_sge_common(i1 %c, i6 %x, i6 %y, i6 %z) { define i1 @icmp_ult_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_ult_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp ugt i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp ult i6 %x, %y @@ -209,7 +209,7 @@ define i1 @icmp_ult_common(i1 %c, i6 %x, i6 %y, i6 %z) { define i1 @icmp_ule_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_ule_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp ule i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp uge i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp ule i6 %y, %x @@ -221,7 +221,7 @@ define i1 @icmp_ule_common(i1 %c, i6 %x, i6 %y, i6 %z) { define i1 @icmp_ugt_common(i1 %c, i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @icmp_ugt_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp ugt i8 %y, %x @@ -233,7 +233,7 @@ define i1 @icmp_ugt_common(i1 %c, i8 %x, i8 %y, i8 %z) { define i1 @icmp_uge_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_uge_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp uge i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp uge i6 %y, %x @@ -245,7 +245,7 @@ define i1 @icmp_uge_common(i1 %c, i6 %x, i6 %y, i6 %z) { define i1 @icmp_ult_ugt_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_ult_ugt_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp ugt i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp ult i6 %x, %y @@ -257,7 +257,7 @@ define i1 @icmp_ult_ugt_common(i1 %c, i6 %x, i6 %y, i6 %z) { define i1 @icmp_ule_uge_common(i1 %c, i6 %x, i6 %y, i6 %z) { ; CHECK-LABEL: @icmp_ule_uge_common( ; CHECK-NEXT: [[R_V:%.*]] = select i1 [[C:%.*]], i6 [[Y:%.*]], i6 [[Z:%.*]] -; CHECK-NEXT: [[R:%.*]] = icmp ule i6 [[R_V]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp uge i6 [[X:%.*]], [[R_V]] ; CHECK-NEXT: ret i1 [[R]] ; %cmp1 = icmp ule i6 %y, %x @@ -348,7 +348,7 @@ define i1 @icmp_no_common(i1 %c, i8 %x, i8 %y, i8 %z) { define i1 @test_select_inverse_eq(i64 %x, i1 %y) { ; CHECK-LABEL: @test_select_inverse_eq( ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i64 [[X:%.*]], 0 -; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[CMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[Y:%.*]], [[CMP2]] ; CHECK-NEXT: ret i1 [[SEL]] ; %cmp1 = icmp ne i64 %x, 0 @@ -360,7 +360,7 @@ define i1 @test_select_inverse_eq(i64 %x, i1 %y) { define i1 @test_select_inverse_signed(i64 %x, i1 %y) { ; CHECK-LABEL: @test_select_inverse_signed( ; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i64 [[X:%.*]], 0 -; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[CMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[Y:%.*]], [[CMP2]] ; CHECK-NEXT: ret i1 [[SEL]] ; %cmp1 = icmp sgt i64 %x, -1 @@ -372,7 +372,7 @@ define i1 @test_select_inverse_signed(i64 %x, i1 %y) { define i1 @test_select_inverse_unsigned(i64 %x, i1 %y) { ; CHECK-LABEL: @test_select_inverse_unsigned( ; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i64 [[X:%.*]], 10 -; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[CMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[Y:%.*]], [[CMP2]] ; CHECK-NEXT: ret i1 [[SEL]] ; %cmp1 = icmp ult i64 %x, 11 @@ -384,7 +384,7 @@ define i1 @test_select_inverse_unsigned(i64 %x, i1 %y) { define i1 @test_select_inverse_eq_ptr(ptr %x, i1 %y) { ; CHECK-LABEL: @test_select_inverse_eq_ptr( ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne ptr [[X:%.*]], null -; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[CMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[Y:%.*]], [[CMP2]] ; CHECK-NEXT: ret i1 [[SEL]] ; %cmp1 = icmp eq ptr %x, null @@ -409,7 +409,7 @@ define i1 @test_select_inverse_fail(i64 %x, i1 %y) { define <2 x i1> @test_select_inverse_vec(<2 x i64> %x, <2 x i1> %y) { ; CHECK-LABEL: @test_select_inverse_vec( ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq <2 x i64> [[X:%.*]], zeroinitializer -; CHECK-NEXT: [[SEL:%.*]] = xor <2 x i1> [[CMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SEL:%.*]] = xor <2 x i1> [[Y:%.*]], [[CMP2]] ; CHECK-NEXT: ret <2 x i1> [[SEL]] ; %cmp1 = icmp ne <2 x i64> %x, zeroinitializer @@ -434,7 +434,7 @@ define <2 x i1> @test_select_inverse_vec_fail(<2 x i64> %x, i1 %y) { define i1 @test_select_inverse_nonconst1(i64 %x, i64 %y, i1 %cond) { ; CHECK-LABEL: @test_select_inverse_nonconst1( ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i64 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[CMP2]], [[COND:%.*]] +; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[COND:%.*]], [[CMP2]] ; CHECK-NEXT: ret i1 [[SEL]] ; %cmp1 = icmp ne i64 %x, %y @@ -446,7 +446,7 @@ define i1 @test_select_inverse_nonconst1(i64 %x, i64 %y, i1 %cond) { define i1 @test_select_inverse_nonconst2(i64 %x, i64 %y, i1 %cond) { ; CHECK-LABEL: @test_select_inverse_nonconst2( ; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i64 [[Y:%.*]], [[X:%.*]] -; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[CMP2]], [[COND:%.*]] +; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[COND:%.*]], [[CMP2]] ; CHECK-NEXT: ret i1 [[SEL]] ; %cmp1 = icmp ne i64 %x, %y @@ -458,7 +458,7 @@ define i1 @test_select_inverse_nonconst2(i64 %x, i64 %y, i1 %cond) { define i1 @test_select_inverse_nonconst3(i64 %x, i64 %y, i1 %cond) { ; CHECK-LABEL: @test_select_inverse_nonconst3( ; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i64 [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[CMP2]], [[COND:%.*]] +; CHECK-NEXT: [[SEL:%.*]] = xor i1 [[COND:%.*]], [[CMP2]] ; CHECK-NEXT: ret i1 [[SEL]] ; %cmp1 = icmp ult i64 %x, %y diff --git a/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll b/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll index 59d33ee3b39df..cc8f5d53fdddd 100644 --- a/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll +++ b/llvm/test/Transforms/InstCombine/select-ctlz-to-cttz.ll @@ -154,10 +154,10 @@ define i32 @select_clz_to_ctz_wrong_sub(i32 %a) { define i64 @select_clz_to_ctz_i64_wrong_xor(i64 %a) { ; CHECK-LABEL: @select_clz_to_ctz_i64_wrong_xor( ; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[A:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i64 [[SUB]], [[A]] +; CHECK-NEXT: [[AND:%.*]] = and i64 [[A]], [[SUB]] ; CHECK-NEXT: [[LZ:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[AND]], i1 true) -; CHECK-NEXT: [[SUB11:%.*]] = or disjoint i64 [[LZ]], 64 -; CHECK-NEXT: ret i64 [[SUB11]] +; CHECK-NEXT: [[SUB1:%.*]] = or disjoint i64 [[LZ]], 64 +; CHECK-NEXT: ret i64 [[SUB1]] ; %sub = sub i64 0, %a %and = and i64 %sub, %a @@ -187,7 +187,7 @@ define i64 @select_clz_to_ctz_i64_wrong_icmp_cst(i64 %a) { define i64 @select_clz_to_ctz_i64_wrong_icmp_pred(i64 %a) { ; CHECK-LABEL: @select_clz_to_ctz_i64_wrong_icmp_pred( ; CHECK-NEXT: [[SUB:%.*]] = sub i64 0, [[A:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i64 [[SUB]], [[A]] +; CHECK-NEXT: [[AND:%.*]] = and i64 [[A]], [[SUB]] ; CHECK-NEXT: [[LZ:%.*]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 [[AND]], i1 true) ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp slt i64 [[A]], 0 ; CHECK-NEXT: [[SUB1:%.*]] = xor i64 [[LZ]], 63 @@ -206,7 +206,7 @@ define i64 @select_clz_to_ctz_i64_wrong_icmp_pred(i64 %a) { define <2 x i32> @select_clz_to_ctz_vec_with_undef(<2 x i32> %a) { ; CHECK-LABEL: @select_clz_to_ctz_vec_with_undef( ; CHECK-NEXT: [[SUB:%.*]] = sub <2 x i32> zeroinitializer, [[A:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[SUB]], [[A]] +; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[A]], [[SUB]] ; CHECK-NEXT: [[LZ:%.*]] = tail call range(i32 0, 33) <2 x i32> @llvm.ctlz.v2i32(<2 x i32> [[AND]], i1 true) ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq <2 x i32> [[A]], zeroinitializer ; CHECK-NEXT: [[SUB1:%.*]] = xor <2 x i32> [[LZ]], @@ -225,7 +225,7 @@ define <2 x i32> @select_clz_to_ctz_vec_with_undef(<2 x i32> %a) { define i32 @select_clz_to_ctz_wrong_constant_for_zero(i32 %a) { ; CHECK-LABEL: @select_clz_to_ctz_wrong_constant_for_zero( ; CHECK-NEXT: [[SUB:%.*]] = sub i32 0, [[A:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i32 [[SUB]], [[A]] +; CHECK-NEXT: [[AND:%.*]] = and i32 [[A]], [[SUB]] ; CHECK-NEXT: [[LZ:%.*]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[AND]], i1 false) ; CHECK-NEXT: [[TOBOOL:%.*]] = icmp eq i32 [[A]], 0 ; CHECK-NEXT: [[SUB1:%.*]] = xor i32 [[LZ]], 31 diff --git a/llvm/test/Transforms/InstCombine/select-divrem.ll b/llvm/test/Transforms/InstCombine/select-divrem.ll index e0c460c37451d..e11afd7b543b2 100644 --- a/llvm/test/Transforms/InstCombine/select-divrem.ll +++ b/llvm/test/Transforms/InstCombine/select-divrem.ll @@ -311,7 +311,7 @@ define i8 @rem_euclid_non_const_pow2(i8 %0, i8 %1) { ; CHECK-LABEL: @rem_euclid_non_const_pow2( ; CHECK-NEXT: [[NOTMASK:%.*]] = shl nsw i8 -1, [[TMP0:%.*]] ; CHECK-NEXT: [[TMP3:%.*]] = xor i8 [[NOTMASK]], -1 -; CHECK-NEXT: [[SEL:%.*]] = and i8 [[TMP3]], [[TMP1:%.*]] +; CHECK-NEXT: [[SEL:%.*]] = and i8 [[TMP1:%.*]], [[TMP3]] ; CHECK-NEXT: ret i8 [[SEL]] ; %pow2 = shl i8 1, %0 diff --git a/llvm/test/Transforms/InstCombine/select-factorize.ll b/llvm/test/Transforms/InstCombine/select-factorize.ll index 386c8e522759e..ab9d9f6b24754 100644 --- a/llvm/test/Transforms/InstCombine/select-factorize.ll +++ b/llvm/test/Transforms/InstCombine/select-factorize.ll @@ -230,7 +230,7 @@ define i1 @and_logic_and_logic_or_5(i1 %c, i1 %a, i1 %b) { define i1 @and_logic_and_logic_or_6(i1 %c, i1 %a, i1 %b) { ; CHECK-LABEL: @and_logic_and_logic_or_6( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 true, i1 [[A:%.*]] -; CHECK-NEXT: [[OR:%.*]] = and i1 [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = and i1 [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; %ac = and i1 %c, %a @@ -254,7 +254,7 @@ define i1 @and_logic_and_logic_or_7(i1 %c, i1 %a, i1 %b) { define i1 @and_logic_and_logic_or_8(i1 %c, i1 %a, i1 %b) { ; CHECK-LABEL: @and_logic_and_logic_or_8( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 true, i1 [[A:%.*]] -; CHECK-NEXT: [[OR:%.*]] = and i1 [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = and i1 [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; %ac = and i1 %a, %c @@ -319,7 +319,7 @@ define i1 @and_logic_and_logic_or_not_one_use(i1 %c, i1 %a, i1 %b) { define i1 @and_and_logic_or_1(i1 %c, i1 %a, i1 %b) { ; CHECK-LABEL: @and_and_logic_or_1( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[A:%.*]], i1 true, i1 [[B:%.*]] -; CHECK-NEXT: [[OR:%.*]] = and i1 [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = and i1 [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; %ac = and i1 %c, %a @@ -331,7 +331,7 @@ define i1 @and_and_logic_or_1(i1 %c, i1 %a, i1 %b) { define i1 @and_and_logic_or_2(i1 %c, i1 %a, i1 %b) { ; CHECK-LABEL: @and_and_logic_or_2( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 true, i1 [[A:%.*]] -; CHECK-NEXT: [[OR:%.*]] = and i1 [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = and i1 [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; %ac = and i1 %a, %c @@ -343,7 +343,7 @@ define i1 @and_and_logic_or_2(i1 %c, i1 %a, i1 %b) { define <3 x i1> @and_and_logic_or_vector(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) { ; CHECK-LABEL: @and_and_logic_or_vector( ; CHECK-NEXT: [[TMP1:%.*]] = select <3 x i1> [[A:%.*]], <3 x i1> , <3 x i1> [[B:%.*]] -; CHECK-NEXT: [[OR:%.*]] = and <3 x i1> [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = and <3 x i1> [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret <3 x i1> [[OR]] ; %ac = and <3 x i1> %c, %a @@ -355,7 +355,7 @@ define <3 x i1> @and_and_logic_or_vector(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) define <3 x i1> @and_and_logic_or_vector_poison(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) { ; CHECK-LABEL: @and_and_logic_or_vector_poison( ; CHECK-NEXT: [[TMP1:%.*]] = select <3 x i1> [[A:%.*]], <3 x i1> , <3 x i1> [[B:%.*]] -; CHECK-NEXT: [[OR:%.*]] = and <3 x i1> [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = and <3 x i1> [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret <3 x i1> [[OR]] ; %ac = and <3 x i1> %c, %a @@ -584,7 +584,7 @@ define i1 @or_logic_or_logic_and_3(i1 %c, i1 %a, i1 %b) { define i1 @or_logic_or_logic_and_4(i1 %c, i1 %a, i1 %b) { ; CHECK-LABEL: @or_logic_or_logic_and_4( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 [[A:%.*]], i1 false -; CHECK-NEXT: [[OR:%.*]] = or i1 [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i1 [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; %ac = or i1 %c, %a @@ -632,7 +632,7 @@ define i1 @or_logic_or_logic_and_7(i1 %c, i1 %a, i1 %b) { define i1 @or_logic_or_logic_and_8(i1 %c, i1 %a, i1 %b) { ; CHECK-LABEL: @or_logic_or_logic_and_8( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 [[A:%.*]], i1 false -; CHECK-NEXT: [[OR:%.*]] = or i1 [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i1 [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; %ac = or i1 %a, %c @@ -697,7 +697,7 @@ define i1 @or_logic_or_logic_and_not_one_use(i1 %c, i1 %a, i1 %b) { define i1 @or_or_logic_and_1(i1 %c, i1 %a, i1 %b) { ; CHECK-LABEL: @or_or_logic_and_1( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[A:%.*]], i1 [[B:%.*]], i1 false -; CHECK-NEXT: [[OR:%.*]] = or i1 [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i1 [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; %ac = or i1 %c, %a @@ -709,7 +709,7 @@ define i1 @or_or_logic_and_1(i1 %c, i1 %a, i1 %b) { define i1 @or_or_logic_and_2(i1 %c, i1 %a, i1 %b) { ; CHECK-LABEL: @or_or_logic_and_2( ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[B:%.*]], i1 [[A:%.*]], i1 false -; CHECK-NEXT: [[OR:%.*]] = or i1 [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i1 [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[OR]] ; %ac = or i1 %c, %a @@ -721,7 +721,7 @@ define i1 @or_or_logic_and_2(i1 %c, i1 %a, i1 %b) { define <3 x i1> @or_or_logic_and_vector(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) { ; CHECK-LABEL: @or_or_logic_and_vector( ; CHECK-NEXT: [[TMP1:%.*]] = select <3 x i1> [[A:%.*]], <3 x i1> [[B:%.*]], <3 x i1> zeroinitializer -; CHECK-NEXT: [[OR:%.*]] = or <3 x i1> [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or <3 x i1> [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret <3 x i1> [[OR]] ; %ac = or <3 x i1> %c, %a @@ -733,7 +733,7 @@ define <3 x i1> @or_or_logic_and_vector(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) { define <3 x i1> @or_or_logic_and_vector_poison(<3 x i1> %c, <3 x i1> %a, <3 x i1> %b) { ; CHECK-LABEL: @or_or_logic_and_vector_poison( ; CHECK-NEXT: [[TMP1:%.*]] = select <3 x i1> [[A:%.*]], <3 x i1> [[B:%.*]], <3 x i1> zeroinitializer -; CHECK-NEXT: [[OR:%.*]] = or <3 x i1> [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or <3 x i1> [[C:%.*]], [[TMP1]] ; CHECK-NEXT: ret <3 x i1> [[OR]] ; %ac = or <3 x i1> %c, %a diff --git a/llvm/test/Transforms/InstCombine/select-masked_gather.ll b/llvm/test/Transforms/InstCombine/select-masked_gather.ll index 70d798ecd5085..a232bdbca0df4 100644 --- a/llvm/test/Transforms/InstCombine/select-masked_gather.ll +++ b/llvm/test/Transforms/InstCombine/select-masked_gather.ll @@ -95,7 +95,7 @@ define @masked_gather_and_zero_inactive_7( define @masked_gather_and_zero_inactive_8( %ptr, %inv_mask, %cond) { ; CHECK-LABEL: @masked_gather_and_zero_inactive_8( ; CHECK-NEXT: [[MASK:%.*]] = xor [[INV_MASK:%.*]], shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer) -; CHECK-NEXT: [[PG:%.*]] = and [[MASK]], [[COND:%.*]] +; CHECK-NEXT: [[PG:%.*]] = and [[COND:%.*]], [[MASK]] ; CHECK-NEXT: [[GATHER:%.*]] = call @llvm.masked.gather.nxv2f32.nxv2p0( [[PTR:%.*]], i32 4, [[PG]], zeroinitializer) ; CHECK-NEXT: ret [[GATHER]] ; diff --git a/llvm/test/Transforms/InstCombine/select-masked_load.ll b/llvm/test/Transforms/InstCombine/select-masked_load.ll index 0e82def113e96..51525e5ee8346 100644 --- a/llvm/test/Transforms/InstCombine/select-masked_load.ll +++ b/llvm/test/Transforms/InstCombine/select-masked_load.ll @@ -92,7 +92,7 @@ define <4 x i32> @masked_load_and_zero_inactive_7(ptr %ptr, <4 x i1> %mask1, <4 define <4 x float> @masked_load_and_zero_inactive_8(ptr %ptr, <4 x i1> %inv_mask, <4 x i1> %cond) { ; CHECK-LABEL: @masked_load_and_zero_inactive_8( ; CHECK-NEXT: [[MASK:%.*]] = xor <4 x i1> [[INV_MASK:%.*]], -; CHECK-NEXT: [[PG:%.*]] = and <4 x i1> [[MASK]], [[COND:%.*]] +; CHECK-NEXT: [[PG:%.*]] = and <4 x i1> [[COND:%.*]], [[MASK]] ; CHECK-NEXT: [[LOAD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[PTR:%.*]], i32 4, <4 x i1> [[PG]], <4 x float> zeroinitializer) ; CHECK-NEXT: ret <4 x float> [[LOAD]] ; diff --git a/llvm/test/Transforms/InstCombine/select-of-bittest.ll b/llvm/test/Transforms/InstCombine/select-of-bittest.ll index 50d3c87f199c3..0c7624018cb02 100644 --- a/llvm/test/Transforms/InstCombine/select-of-bittest.ll +++ b/llvm/test/Transforms/InstCombine/select-of-bittest.ll @@ -158,7 +158,7 @@ define <3 x i32> @and_and_vec_poison(<3 x i32> %arg) { define i32 @f_var0(i32 %arg, i32 %arg1) { ; CHECK-LABEL: @f_var0( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARG1:%.*]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARG:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARG:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 ; CHECK-NEXT: [[T5:%.*]] = zext i1 [[TMP3]] to i32 ; CHECK-NEXT: ret i32 [[T5]] @@ -175,7 +175,7 @@ define i32 @f_var0(i32 %arg, i32 %arg1) { define i32 @f_var0_commutative_and(i32 %arg, i32 %arg1) { ; CHECK-LABEL: @f_var0_commutative_and( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARG1:%.*]], 2 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARG:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARG:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 ; CHECK-NEXT: [[T5:%.*]] = zext i1 [[TMP3]] to i32 ; CHECK-NEXT: ret i32 [[T5]] @@ -191,7 +191,7 @@ define i32 @f_var0_commutative_and(i32 %arg, i32 %arg1) { define <2 x i32> @f_var0_splatvec(<2 x i32> %arg, <2 x i32> %arg1) { ; CHECK-LABEL: @f_var0_splatvec( ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[ARG1:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[ARG:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[ARG:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[T5:%.*]] = zext <2 x i1> [[TMP3]] to <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[T5]] @@ -207,7 +207,7 @@ define <2 x i32> @f_var0_splatvec(<2 x i32> %arg, <2 x i32> %arg1) { define <2 x i32> @f_var0_vec(<2 x i32> %arg, <2 x i32> %arg1) { ; CHECK-LABEL: @f_var0_vec( ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[ARG1:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[ARG:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[ARG:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[T5:%.*]] = zext <2 x i1> [[TMP3]] to <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[T5]] @@ -223,7 +223,7 @@ define <2 x i32> @f_var0_vec(<2 x i32> %arg, <2 x i32> %arg1) { define <3 x i32> @f_var0_vec_poison(<3 x i32> %arg, <3 x i32> %arg1) { ; CHECK-LABEL: @f_var0_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = or <3 x i32> [[ARG1:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[ARG:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[ARG:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[T5:%.*]] = zext <3 x i1> [[TMP3]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[T5]] @@ -240,7 +240,7 @@ define <3 x i32> @f_var0_vec_poison(<3 x i32> %arg, <3 x i32> %arg1) { define i32 @f_var1(i32 %arg, i32 %arg1) { ; CHECK-LABEL: @f_var1( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARG1:%.*]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARG:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARG:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 ; CHECK-NEXT: [[T4:%.*]] = zext i1 [[TMP3]] to i32 ; CHECK-NEXT: ret i32 [[T4]] @@ -256,7 +256,7 @@ define i32 @f_var1(i32 %arg, i32 %arg1) { define i32 @f_var1_commutative_and(i32 %arg, i32 %arg1) { ; CHECK-LABEL: @f_var1_commutative_and( ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[ARG1:%.*]], 1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[ARG:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[ARG:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0 ; CHECK-NEXT: [[T4:%.*]] = zext i1 [[TMP3]] to i32 ; CHECK-NEXT: ret i32 [[T4]] @@ -271,7 +271,7 @@ define i32 @f_var1_commutative_and(i32 %arg, i32 %arg1) { define <2 x i32> @f_var1_vec(<2 x i32> %arg, <2 x i32> %arg1) { ; CHECK-LABEL: @f_var1_vec( ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[ARG1:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], [[ARG:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[ARG:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <2 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[T4:%.*]] = zext <2 x i1> [[TMP3]] to <2 x i32> ; CHECK-NEXT: ret <2 x i32> [[T4]] @@ -286,7 +286,7 @@ define <2 x i32> @f_var1_vec(<2 x i32> %arg, <2 x i32> %arg1) { define <3 x i32> @f_var1_vec_poison(<3 x i32> %arg, <3 x i32> %arg1) { ; CHECK-LABEL: @f_var1_vec_poison( ; CHECK-NEXT: [[TMP1:%.*]] = or <3 x i32> [[ARG1:%.*]], -; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[TMP1]], [[ARG:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <3 x i32> [[ARG:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <3 x i32> [[TMP2]], zeroinitializer ; CHECK-NEXT: [[T4:%.*]] = zext <3 x i1> [[TMP3]] to <3 x i32> ; CHECK-NEXT: ret <3 x i32> [[T4]] diff --git a/llvm/test/Transforms/InstCombine/select-safe-transforms.ll b/llvm/test/Transforms/InstCombine/select-safe-transforms.ll index 70e6271233321..e0306972e48e2 100644 --- a/llvm/test/Transforms/InstCombine/select-safe-transforms.ll +++ b/llvm/test/Transforms/InstCombine/select-safe-transforms.ll @@ -194,7 +194,7 @@ define i1 @andn_or_cmp_2_logical(i16 %a, i16 %b, i1 %y) { define i1 @andn_or_cmp_2_partial_logical(i16 %a, i16 %b, i1 %y) { ; CHECK-LABEL: @andn_or_cmp_2_partial_logical( ; CHECK-NEXT: [[X_INV:%.*]] = icmp slt i16 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[AND:%.*]] = and i1 [[X_INV]], [[Y:%.*]] +; CHECK-NEXT: [[AND:%.*]] = and i1 [[Y:%.*]], [[X_INV]] ; CHECK-NEXT: ret i1 [[AND]] ; %x = icmp sge i16 %a, %b @@ -735,7 +735,7 @@ define i1 @orn_and_cmp_2_logical(i16 %a, i16 %b, i1 %y) { define i1 @orn_and_cmp_2_partial_logical(i16 %a, i16 %b, i1 %y) { ; CHECK-LABEL: @orn_and_cmp_2_partial_logical( ; CHECK-NEXT: [[X_INV:%.*]] = icmp slt i16 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[OR:%.*]] = or i1 [[X_INV]], [[Y:%.*]] +; CHECK-NEXT: [[OR:%.*]] = or i1 [[Y:%.*]], [[X_INV]] ; CHECK-NEXT: ret i1 [[OR]] ; %x = icmp sge i16 %a, %b diff --git a/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll b/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll index 416a6d71055b6..1647233595b37 100644 --- a/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll +++ b/llvm/test/Transforms/InstCombine/select-with-bitwise-ops.ll @@ -10,7 +10,7 @@ define i32 @select_icmp_eq_and_1_0_or_2(i32 %x, i32 %y) { ; CHECK-LABEL: @select_icmp_eq_and_1_0_or_2( ; CHECK-NEXT: [[AND:%.*]] = shl i32 [[X:%.*]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 1 @@ -24,7 +24,7 @@ define <2 x i32> @select_icmp_eq_and_1_0_or_2_vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @select_icmp_eq_and_1_0_or_2_vec( ; CHECK-NEXT: [[AND:%.*]] = shl <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i32> %x, @@ -38,7 +38,7 @@ define <2 x i32> @select_icmp_eq_and_1_0_or_2_vec_poison1(<2 x i32> %x, <2 x i32 ; CHECK-LABEL: @select_icmp_eq_and_1_0_or_2_vec_poison1( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw <2 x i32> [[AND]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i32> %x, @@ -52,7 +52,7 @@ define <2 x i32> @select_icmp_eq_and_1_0_or_2_vec_poison2(<2 x i32> %x, <2 x i32 ; CHECK-LABEL: @select_icmp_eq_and_1_0_or_2_vec_poison2( ; CHECK-NEXT: [[AND:%.*]] = shl <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i32> %x, @@ -66,7 +66,7 @@ define <2 x i32> @select_icmp_eq_and_1_0_or_2_vec_poison3(<2 x i32> %x, <2 x i32 ; CHECK-LABEL: @select_icmp_eq_and_1_0_or_2_vec_poison3( ; CHECK-NEXT: [[AND:%.*]] = shl <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i32> %x, @@ -80,7 +80,7 @@ define i32 @select_icmp_eq_and_1_0_xor_2(i32 %x, i32 %y) { ; CHECK-LABEL: @select_icmp_eq_and_1_0_xor_2( ; CHECK-NEXT: [[AND:%.*]] = shl i32 [[X:%.*]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2 -; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 1 @@ -109,7 +109,7 @@ define i32 @select_icmp_eq_and_32_0_or_8(i32 %x, i32 %y) { ; CHECK-LABEL: @select_icmp_eq_and_32_0_or_8( ; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 8 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 32 @@ -123,7 +123,7 @@ define <2 x i32> @select_icmp_eq_and_32_0_or_8_vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @select_icmp_eq_and_32_0_or_8_vec( ; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i32> %x, @@ -137,7 +137,7 @@ define i32 @select_icmp_eq_and_32_0_xor_8(i32 %x, i32 %y) { ; CHECK-LABEL: @select_icmp_eq_and_32_0_xor_8( ; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 2 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 8 -; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 32 @@ -166,7 +166,7 @@ define i32 @select_icmp_ne_0_and_4096_or_4096(i32 %x, i32 %y) { ; CHECK-LABEL: @select_icmp_ne_0_and_4096_or_4096( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 4096 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 4096 @@ -180,7 +180,7 @@ define <2 x i32> @select_icmp_ne_0_and_4096_or_4096_vec(<2 x i32> %x, <2 x i32> ; CHECK-LABEL: @select_icmp_ne_0_and_4096_or_4096_vec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[AND]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i32> %x, @@ -222,7 +222,7 @@ define i32 @select_icmp_ne_0_and_4096_and_not_4096(i32 %x, i32 %y) { define i32 @select_icmp_eq_and_4096_0_or_4096(i32 %x, i32 %y) { ; CHECK-LABEL: @select_icmp_eq_and_4096_0_or_4096( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[AND]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[AND]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 4096 @@ -235,7 +235,7 @@ define i32 @select_icmp_eq_and_4096_0_or_4096(i32 %x, i32 %y) { define <2 x i32> @select_icmp_eq_and_4096_0_or_4096_vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @select_icmp_eq_and_4096_0_or_4096_vec( ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[AND]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[AND]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i32> %x, @@ -248,7 +248,7 @@ define <2 x i32> @select_icmp_eq_and_4096_0_or_4096_vec(<2 x i32> %x, <2 x i32> define i32 @select_icmp_eq_and_4096_0_xor_4096(i32 %x, i32 %y) { ; CHECK-LABEL: @select_icmp_eq_and_4096_0_xor_4096( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 -; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[AND]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[AND]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 4096 @@ -277,7 +277,7 @@ define i32 @select_icmp_eq_0_and_1_or_1(i64 %x, i32 %y) { ; CHECK-LABEL: @select_icmp_eq_0_and_1_or_1( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 1 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i64 %x, 1 @@ -291,7 +291,7 @@ define <2 x i32> @select_icmp_eq_0_and_1_or_1_vec(<2 x i64> %x, <2 x i32> %y) { ; CHECK-LABEL: @select_icmp_eq_0_and_1_or_1_vec( ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i64> [[X:%.*]] to <2 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i64> %x, @@ -305,7 +305,7 @@ define i32 @select_icmp_eq_0_and_1_xor_1(i64 %x, i32 %y) { ; CHECK-LABEL: @select_icmp_eq_0_and_1_xor_1( ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], 1 -; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i64 %x, 1 @@ -335,7 +335,7 @@ define i32 @select_icmp_ne_0_and_4096_or_32(i32 %x, i32 %y) { ; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 7 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 32 ; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], 32 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 4096 @@ -380,7 +380,7 @@ define i32 @select_icmp_ne_0_and_32_or_4096(i32 %x, i32 %y) { ; CHECK-NEXT: [[AND:%.*]] = shl i32 [[X:%.*]], 7 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 4096 ; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], 4096 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 32 @@ -395,7 +395,7 @@ define <2 x i32> @select_icmp_ne_0_and_32_or_4096_vec(<2 x i32> %x, <2 x i32> %y ; CHECK-NEXT: [[AND:%.*]] = shl <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], ; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i32> %x, @@ -570,7 +570,7 @@ define i64 @select_icmp_x_and_8_eq_0_y_xor_8(i32 %x, i64 %y) { ; CHECK-LABEL: @select_icmp_x_and_8_eq_0_y_xor_8( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 8 ; CHECK-NEXT: [[TMP1:%.*]] = zext nneg i32 [[AND]] to i64 -; CHECK-NEXT: [[Y_XOR:%.*]] = xor i64 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[Y_XOR:%.*]] = xor i64 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i64 [[Y_XOR]] ; %and = and i32 %x, 8 @@ -585,7 +585,7 @@ define i64 @select_icmp_x_and_8_ne_0_y_xor_8(i32 %x, i64 %y) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 8 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 8 ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64 -; CHECK-NEXT: [[XOR_Y:%.*]] = xor i64 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[XOR_Y:%.*]] = xor i64 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i64 [[XOR_Y]] ; %and = and i32 %x, 8 @@ -600,7 +600,7 @@ define i64 @select_icmp_x_and_8_ne_0_y_or_8(i32 %x, i64 %y) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 8 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 8 ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64 -; CHECK-NEXT: [[OR_Y:%.*]] = or i64 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[OR_Y:%.*]] = or i64 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i64 [[OR_Y]] ; %and = and i32 %x, 8 @@ -615,7 +615,7 @@ define <2 x i64> @select_icmp_x_and_8_ne_0_y_or_8_vec(<2 x i32> %x, <2 x i64> %y ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[AND]], ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64> -; CHECK-NEXT: [[OR_Y:%.*]] = or <2 x i64> [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[OR_Y:%.*]] = or <2 x i64> [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret <2 x i64> [[OR_Y]] ; %and = and <2 x i32> %x, @@ -680,7 +680,7 @@ define i32 @test68(i32 %x, i32 %y) { ; CHECK-LABEL: @test68( ; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 6 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 128 @@ -694,7 +694,7 @@ define <2 x i32> @test68vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @test68vec( ; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i32> %x, @@ -708,7 +708,7 @@ define i32 @test68_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @test68_xor( ; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 6 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2 -; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 128 @@ -738,7 +738,7 @@ define i32 @test69(i32 %x, i32 %y) { ; CHECK-NEXT: [[AND:%.*]] = lshr i32 [[X:%.*]], 6 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], 2 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i32 [[SELECT]] ; %and = and i32 %x, 128 @@ -753,7 +753,7 @@ define <2 x i32> @test69vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-NEXT: [[AND:%.*]] = lshr <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[AND]], ; CHECK-NEXT: [[TMP2:%.*]] = xor <2 x i32> [[TMP1]], -; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or <2 x i32> [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret <2 x i32> [[SELECT]] ; %and = and <2 x i32> %x, @@ -797,7 +797,7 @@ define i8 @test70(i8 %x, i8 %y) { ; CHECK-LABEL: @test70( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i8 [[X:%.*]], 6 ; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], 2 -; CHECK-NEXT: [[SELECT:%.*]] = or i8 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i8 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i8 [[SELECT]] ; %cmp = icmp slt i8 %x, 0 @@ -826,7 +826,7 @@ define i32 @shift_no_xor_multiuse_or(i32 %x, i32 %y) { ; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 2 ; CHECK-NEXT: [[AND:%.*]] = shl i32 [[X:%.*]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y]], [[TMP1]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[OR]] ; CHECK-NEXT: ret i32 [[RES]] ; @@ -843,7 +843,7 @@ define i32 @shift_no_xor_multiuse_xor(i32 %x, i32 %y) { ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 2 ; CHECK-NEXT: [[AND:%.*]] = shl i32 [[X:%.*]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[AND]], 2 -; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y]], [[TMP1]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[XOR]] ; CHECK-NEXT: ret i32 [[RES]] ; @@ -876,7 +876,7 @@ define i32 @no_shift_no_xor_multiuse_or(i32 %x, i32 %y) { ; CHECK-LABEL: @no_shift_no_xor_multiuse_or( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 4096 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[AND]], [[Y]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y]], [[AND]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[OR]] ; CHECK-NEXT: ret i32 [[RES]] ; @@ -892,7 +892,7 @@ define i32 @no_shift_no_xor_multiuse_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @no_shift_no_xor_multiuse_xor( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 4096 -; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[AND]], [[Y]] +; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y]], [[AND]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[XOR]] ; CHECK-NEXT: ret i32 [[RES]] ; @@ -926,7 +926,7 @@ define i32 @no_shift_xor_multiuse_or(i32 %x, i32 %y) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 4096 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 4096 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y]], [[TMP1]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[OR]] ; CHECK-NEXT: ret i32 [[RES]] ; @@ -1028,7 +1028,7 @@ define i32 @shift_no_xor_multiuse_cmp(i32 %x, i32 %y, i32 %z, i32 %w) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i32 [[AND]], 1 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]] ; CHECK-NEXT: ret i32 [[RES]] @@ -1047,7 +1047,7 @@ define i32 @shift_no_xor_multiuse_cmp_with_xor(i32 %x, i32 %y, i32 %z, i32 %w) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 1 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i32 [[AND]], 1 -; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]] ; CHECK-NEXT: ret i32 [[RES]] @@ -1084,7 +1084,7 @@ define i32 @no_shift_no_xor_multiuse_cmp(i32 %x, i32 %y, i32 %z, i32 %w) { ; CHECK-LABEL: @no_shift_no_xor_multiuse_cmp( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[AND]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[AND]] ; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]] ; CHECK-NEXT: ret i32 [[RES]] @@ -1102,7 +1102,7 @@ define i32 @no_shift_no_xor_multiuse_cmp_with_xor(i32 %x, i32 %y, i32 %z, i32 %w ; CHECK-LABEL: @no_shift_no_xor_multiuse_cmp_with_xor( ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 -; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[AND]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y:%.*]], [[AND]] ; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]] ; CHECK-NEXT: ret i32 [[RES]] @@ -1140,7 +1140,7 @@ define i32 @no_shift_xor_multiuse_cmp(i32 %x, i32 %y, i32 %z, i32 %w) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 ; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[AND]], 0 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[AND]], 4096 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP_NOT]], i32 [[W:%.*]], i32 [[Z:%.*]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]] ; CHECK-NEXT: ret i32 [[RES]] @@ -1317,7 +1317,7 @@ define i32 @no_shift_no_xor_multiuse_cmp_or(i32 %x, i32 %y, i32 %z, i32 %w) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[Y:%.*]], 4096 -; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[AND]], [[Y]] +; CHECK-NEXT: [[SELECT:%.*]] = or i32 [[Y]], [[AND]] ; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]] ; CHECK-NEXT: [[RES2:%.*]] = mul i32 [[RES]], [[OR]] @@ -1338,7 +1338,7 @@ define i32 @no_shift_no_xor_multiuse_cmp_xor(i32 %x, i32 %y, i32 %z, i32 %w) { ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 4096 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[AND]], 0 ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[Y:%.*]], 4096 -; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[AND]], [[Y]] +; CHECK-NEXT: [[SELECT:%.*]] = xor i32 [[Y]], [[AND]] ; CHECK-NEXT: [[SELECT2:%.*]] = select i1 [[CMP]], i32 [[Z:%.*]], i32 [[W:%.*]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[SELECT]], [[SELECT2]] ; CHECK-NEXT: [[RES2:%.*]] = mul i32 [[RES]], [[XOR]] @@ -1641,7 +1641,7 @@ define i64 @xor_i8_to_i64_shl_save_and_ne(i8 %x, i64 %y) { ; CHECK-LABEL: @xor_i8_to_i64_shl_save_and_ne( ; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[X:%.*]] to i64 ; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 63 -; CHECK-NEXT: [[R:%.*]] = xor i64 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = xor i64 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: ret i64 [[R]] ; %xx = and i8 %x, 1 diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll index 1369be305ec13..7d62b41942440 100644 --- a/llvm/test/Transforms/InstCombine/select.ll +++ b/llvm/test/Transforms/InstCombine/select.ll @@ -221,7 +221,7 @@ define i32 @test11(i32 %a) { define i32 @test12(i1 %cond, i32 %a) { ; CHECK-LABEL: @test12( ; CHECK-NEXT: [[B:%.*]] = zext i1 [[COND:%.*]] to i32 -; CHECK-NEXT: [[C:%.*]] = or i32 [[B]], [[A:%.*]] +; CHECK-NEXT: [[C:%.*]] = or i32 [[A:%.*]], [[B]] ; CHECK-NEXT: ret i32 [[C]] ; %b = or i32 %a, 1 @@ -232,7 +232,7 @@ define i32 @test12(i1 %cond, i32 %a) { define <2 x i32> @test12vec(<2 x i1> %cond, <2 x i32> %a) { ; CHECK-LABEL: @test12vec( ; CHECK-NEXT: [[B:%.*]] = zext <2 x i1> [[COND:%.*]] to <2 x i32> -; CHECK-NEXT: [[C:%.*]] = or <2 x i32> [[B]], [[A:%.*]] +; CHECK-NEXT: [[C:%.*]] = or <2 x i32> [[A:%.*]], [[B]] ; CHECK-NEXT: ret <2 x i32> [[C]] ; %b = or <2 x i32> %a, @@ -686,7 +686,7 @@ define i1 @test40(i1 %cond) { define i32 @test41(i1 %cond, i32 %x, i32 %y) { ; CHECK-LABEL: @test41( -; CHECK-NEXT: [[R:%.*]] = and i32 [[X:%.*]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = and i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; %z = and i32 %x, %y @@ -699,7 +699,7 @@ define i32 @test42(i32 %x, i32 %y) { ; CHECK-LABEL: @test42( ; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[X:%.*]], 0 ; CHECK-NEXT: [[B:%.*]] = sext i1 [[COND]] to i32 -; CHECK-NEXT: [[C:%.*]] = add i32 [[B]], [[Y:%.*]] +; CHECK-NEXT: [[C:%.*]] = add i32 [[Y:%.*]], [[B]] ; CHECK-NEXT: ret i32 [[C]] ; %b = add i32 %y, -1 @@ -712,7 +712,7 @@ define <2 x i32> @test42vec(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @test42vec( ; CHECK-NEXT: [[COND:%.*]] = icmp eq <2 x i32> [[X:%.*]], zeroinitializer ; CHECK-NEXT: [[B:%.*]] = sext <2 x i1> [[COND]] to <2 x i32> -; CHECK-NEXT: [[C:%.*]] = add <2 x i32> [[B]], [[Y:%.*]] +; CHECK-NEXT: [[C:%.*]] = add <2 x i32> [[Y:%.*]], [[B]] ; CHECK-NEXT: ret <2 x i32> [[C]] ; %b = add <2 x i32> %y, @@ -1569,7 +1569,7 @@ define i8 @test88(i1 %cond, i8 %w, i8 %x, i8 %y, i8 %z) { ; select(C, Z, binop(W, select(C, X, Y))) -> select(C, binop(X, W), Z) define i8 @test89(i1 %cond, i8 %w, i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test89( -; CHECK-NEXT: [[B:%.*]] = and i8 [[X:%.*]], [[W:%.*]] +; CHECK-NEXT: [[B:%.*]] = and i8 [[W:%.*]], [[X:%.*]] ; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], i8 [[B]], i8 [[Z:%.*]] ; CHECK-NEXT: ret i8 [[C]] ; @@ -1582,7 +1582,7 @@ define i8 @test89(i1 %cond, i8 %w, i8 %x, i8 %y, i8 %z) { ; select(C, Z, binop(W, select(C, X, Y))) -> select(C, Z, binop(W, Y)) define i8 @test90(i1 %cond, i8 %w, i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @test90( -; CHECK-NEXT: [[B:%.*]] = or i8 [[Y:%.*]], [[W:%.*]] +; CHECK-NEXT: [[B:%.*]] = or i8 [[W:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[C:%.*]] = select i1 [[COND:%.*]], i8 [[Z:%.*]], i8 [[B]] ; CHECK-NEXT: ret i8 [[C]] ; @@ -2889,7 +2889,7 @@ define i8 @select_replacement_sub_noundef(i8 %x, i8 noundef %y, i8 %z) { define i8 @select_replacement_sub_noundef_but_may_be_poison(i8 %x, i8 noundef %yy, i8 %z) { ; CHECK-LABEL: @select_replacement_sub_noundef_but_may_be_poison( ; CHECK-NEXT: [[Y:%.*]] = shl nuw i8 [[YY:%.*]], 1 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[Y]], [[X:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i8 [[X:%.*]], [[Y]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 0, i8 [[Z:%.*]] ; CHECK-NEXT: ret i8 [[SEL]] ; @@ -2975,7 +2975,7 @@ define i8 @select_replacement_loop3(i32 noundef %x) { ; CHECK-NEXT: [[TRUNC:%.*]] = trunc i32 [[X:%.*]] to i8 ; CHECK-NEXT: [[REV:%.*]] = call i8 @llvm.bitreverse.i8(i8 [[TRUNC]]) ; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[REV]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[EXT]], [[X]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X]], [[EXT]] ; CHECK-NEXT: [[SEL:%.*]] = select i1 [[CMP]], i8 [[TRUNC]], i8 0 ; CHECK-NEXT: ret i8 [[SEL]] ; @@ -3016,7 +3016,7 @@ define ptr @select_replacement_gep_inbounds(ptr %base, i64 %offset) { define i8 @replace_false_op_eq_shl_or_disjoint(i8 %x) { ; CHECK-LABEL: @replace_false_op_eq_shl_or_disjoint( ; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[X:%.*]], 3 -; CHECK-NEXT: [[OR:%.*]] = or i8 [[SHL]], [[X]] +; CHECK-NEXT: [[OR:%.*]] = or i8 [[X]], [[SHL]] ; CHECK-NEXT: ret i8 [[OR]] ; %eq0 = icmp eq i8 %x, -1 @@ -3057,7 +3057,7 @@ define <2 x i1> @partial_false_undef_condval(<2 x i1> %x) { define i32 @mul_select_eq_zero(i32 %x, i32 %y) { ; CHECK-LABEL: @mul_select_eq_zero( ; CHECK-NEXT: [[Y_FR:%.*]] = freeze i32 [[Y:%.*]] -; CHECK-NEXT: [[M:%.*]] = mul i32 [[Y_FR]], [[X:%.*]] +; CHECK-NEXT: [[M:%.*]] = mul i32 [[X:%.*]], [[Y_FR]] ; CHECK-NEXT: ret i32 [[M]] ; %c = icmp eq i32 %x, 0 @@ -3083,7 +3083,7 @@ define i32 @mul_select_eq_zero_commute(i32 %x, i32 %y) { define i32 @mul_select_eq_zero_copy_flags(i32 %x, i32 %y) { ; CHECK-LABEL: @mul_select_eq_zero_copy_flags( ; CHECK-NEXT: [[Y_FR:%.*]] = freeze i32 [[Y:%.*]] -; CHECK-NEXT: [[M:%.*]] = mul nuw nsw i32 [[Y_FR]], [[X:%.*]] +; CHECK-NEXT: [[M:%.*]] = mul nuw nsw i32 [[X:%.*]], [[Y_FR]] ; CHECK-NEXT: ret i32 [[M]] ; %c = icmp eq i32 %x, 0 @@ -3098,7 +3098,7 @@ define i32 @mul_select_ne_zero(i32 %x, i32 %y) { ; CHECK-LABEL: @mul_select_ne_zero( ; CHECK-NEXT: [[C:%.*]] = icmp ne i32 [[X:%.*]], 0 ; CHECK-NEXT: [[Y_FR:%.*]] = freeze i32 [[Y:%.*]] -; CHECK-NEXT: [[M:%.*]] = mul i32 [[Y_FR]], [[X]] +; CHECK-NEXT: [[M:%.*]] = mul i32 [[X]], [[Y_FR]] ; CHECK-NEXT: call void @use(i1 [[C]]) ; CHECK-NEXT: ret i32 [[M]] ; @@ -3115,7 +3115,7 @@ define i32 @mul_select_ne_zero(i32 %x, i32 %y) { define i32 @mul_select_eq_zero_sel_undef(i32 %x, i32 %y) { ; CHECK-LABEL: @mul_select_eq_zero_sel_undef( ; CHECK-NEXT: [[Y_FR:%.*]] = freeze i32 [[Y:%.*]] -; CHECK-NEXT: [[M:%.*]] = mul i32 [[Y_FR]], [[X:%.*]] +; CHECK-NEXT: [[M:%.*]] = mul i32 [[X:%.*]], [[Y_FR]] ; CHECK-NEXT: ret i32 [[M]] ; %c = icmp eq i32 %x, 0 @@ -3129,7 +3129,7 @@ define i32 @mul_select_eq_zero_sel_undef(i32 %x, i32 %y) { define i32 @mul_select_eq_zero_multiple_users(i32 %x, i32 %y) { ; CHECK-LABEL: @mul_select_eq_zero_multiple_users( ; CHECK-NEXT: [[Y_FR:%.*]] = freeze i32 [[Y:%.*]] -; CHECK-NEXT: [[M:%.*]] = mul i32 [[Y_FR]], [[X:%.*]] +; CHECK-NEXT: [[M:%.*]] = mul i32 [[X:%.*]], [[Y_FR]] ; CHECK-NEXT: call void @use_i32(i32 [[M]]) ; CHECK-NEXT: call void @use_i32(i32 [[M]]) ; CHECK-NEXT: call void @use_i32(i32 [[M]]) @@ -3163,7 +3163,7 @@ define i32 @mul_select_eq_zero_unrelated_condition(i32 %x, i32 %y, i32 %z) { define <4 x i32> @mul_select_eq_zero_vector(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: @mul_select_eq_zero_vector( ; CHECK-NEXT: [[Y_FR:%.*]] = freeze <4 x i32> [[Y:%.*]] -; CHECK-NEXT: [[M:%.*]] = mul <4 x i32> [[Y_FR]], [[X:%.*]] +; CHECK-NEXT: [[M:%.*]] = mul <4 x i32> [[X:%.*]], [[Y_FR]] ; CHECK-NEXT: ret <4 x i32> [[M]] ; %c = icmp eq <4 x i32> %x, zeroinitializer @@ -3194,7 +3194,7 @@ define <2 x i32> @mul_select_eq_poison_vector(<2 x i32> %x, <2 x i32> %y) { define <2 x i32> @mul_select_eq_zero_sel_poison_vector(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @mul_select_eq_zero_sel_poison_vector( ; CHECK-NEXT: [[Y_FR:%.*]] = freeze <2 x i32> [[Y:%.*]] -; CHECK-NEXT: [[M:%.*]] = mul <2 x i32> [[Y_FR]], [[X:%.*]] +; CHECK-NEXT: [[M:%.*]] = mul <2 x i32> [[X:%.*]], [[Y_FR]] ; CHECK-NEXT: ret <2 x i32> [[M]] ; %c = icmp eq <2 x i32> %x, zeroinitializer @@ -4028,7 +4028,7 @@ define i32 @src_or_eq_C_and_andnotxorC(i32 %x, i32 %y, i32 %c) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[TMP0]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT]], [[C:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C:%.*]], [[NOT]] ; CHECK-NEXT: ret i32 [[AND1]] ; entry: @@ -4064,7 +4064,7 @@ define i32 @src_or_eq_C_xor_andnotandC(i32 %x, i32 %y, i32 %c) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[AND:%.*]] = and i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[NOT:%.*]] = xor i32 [[AND]], -1 -; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NOT]], [[C:%.*]] +; CHECK-NEXT: [[AND1:%.*]] = and i32 [[C:%.*]], [[NOT]] ; CHECK-NEXT: ret i32 [[AND1]] ; entry: diff --git a/llvm/test/Transforms/InstCombine/select_meta.ll b/llvm/test/Transforms/InstCombine/select_meta.ll index 3898fd9fa1f57..d8f945b8d1b32 100644 --- a/llvm/test/Transforms/InstCombine/select_meta.ll +++ b/llvm/test/Transforms/InstCombine/select_meta.ll @@ -6,7 +6,7 @@ define i32 @foo(i32) local_unnamed_addr #0 { ; CHECK-LABEL: @foo( ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP0:%.*]], 2 ; CHECK-NEXT: [[DOTV:%.*]] = select i1 [[TMP2]], i32 20, i32 -20, !prof [[PROF0:![0-9]+]] -; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[DOTV]], [[TMP0]] +; CHECK-NEXT: [[TMP3:%.*]] = add i32 [[TMP0]], [[DOTV]] ; CHECK-NEXT: ret i32 [[TMP3]] ; %2 = icmp sgt i32 %0, 2 @@ -51,7 +51,7 @@ define i32 @foo2(i32, i32) local_unnamed_addr #0 { ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[TMP0:%.*]], 2 ; CHECK-NEXT: [[TMP4:%.*]] = sub i32 0, [[TMP1:%.*]] ; CHECK-NEXT: [[DOTP:%.*]] = select i1 [[TMP3]], i32 [[TMP1]], i32 [[TMP4]], !prof [[PROF0]] -; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[DOTP]], [[TMP0]] +; CHECK-NEXT: [[TMP5:%.*]] = add i32 [[TMP0]], [[DOTP]] ; CHECK-NEXT: ret i32 [[TMP5]] ; %3 = icmp sgt i32 %0, 2 @@ -317,7 +317,7 @@ define <2 x i32> @not_cond_vec_poison(<2 x i1> %c, <2 x i32> %tv, <2 x i32> %fv) define i64 @select_add(i1 %cond, i64 %x, i64 %y) { ; CHECK-LABEL: @select_add( ; CHECK-NEXT: [[OP:%.*]] = select i1 [[COND:%.*]], i64 [[Y:%.*]], i64 0, !prof [[PROF0]], !unpredictable [[META2:![0-9]+]] -; CHECK-NEXT: [[RET:%.*]] = add i64 [[OP]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = add i64 [[X:%.*]], [[OP]] ; CHECK-NEXT: ret i64 [[RET]] ; %op = add i64 %x, %y @@ -328,7 +328,7 @@ define i64 @select_add(i1 %cond, i64 %x, i64 %y) { define <2 x i32> @select_or(<2 x i1> %cond, <2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @select_or( ; CHECK-NEXT: [[OP:%.*]] = select <2 x i1> [[COND:%.*]], <2 x i32> [[Y:%.*]], <2 x i32> zeroinitializer, !prof [[PROF0]], !unpredictable [[META2]] -; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[OP]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[X:%.*]], [[OP]] ; CHECK-NEXT: ret <2 x i32> [[RET]] ; %op = or <2 x i32> %x, %y @@ -361,7 +361,7 @@ define i128 @select_ashr(i1 %cond, i128 %x, i128 %y) { define double @select_fmul(i1 %cond, double %x, double %y) { ; CHECK-LABEL: @select_fmul( ; CHECK-NEXT: [[OP:%.*]] = select nnan i1 [[COND:%.*]], double [[Y:%.*]], double 1.000000e+00, !prof [[PROF0]], !unpredictable [[META2]] -; CHECK-NEXT: [[RET:%.*]] = fmul double [[OP]], [[X:%.*]] +; CHECK-NEXT: [[RET:%.*]] = fmul double [[X:%.*]], [[OP]] ; CHECK-NEXT: ret double [[RET]] ; %op = fmul double %x, %y diff --git a/llvm/test/Transforms/InstCombine/set.ll b/llvm/test/Transforms/InstCombine/set.ll index 50329ddf7caac..f44ac83f7f591 100644 --- a/llvm/test/Transforms/InstCombine/set.ll +++ b/llvm/test/Transforms/InstCombine/set.ll @@ -135,7 +135,7 @@ define i1 @test12(i1 %A) { define i1 @test13(i1 %A, i1 %B) { ; CHECK-LABEL: @test13( ; CHECK-NEXT: [[TMP1:%.*]] = xor i1 [[B:%.*]], true -; CHECK-NEXT: [[C:%.*]] = or i1 [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[C:%.*]] = or i1 [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[C]] ; %C = icmp uge i1 %A, %B @@ -145,7 +145,7 @@ define i1 @test13(i1 %A, i1 %B) { define <2 x i1> @test13vec(<2 x i1> %A, <2 x i1> %B) { ; CHECK-LABEL: @test13vec( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i1> [[B:%.*]], -; CHECK-NEXT: [[C:%.*]] = or <2 x i1> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[C:%.*]] = or <2 x i1> [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[C]] ; %C = icmp uge <2 x i1> %A, %B diff --git a/llvm/test/Transforms/InstCombine/shift-add.ll b/llvm/test/Transforms/InstCombine/shift-add.ll index 7f948848844c5..016f877a9efb5 100644 --- a/llvm/test/Transforms/InstCombine/shift-add.ll +++ b/llvm/test/Transforms/InstCombine/shift-add.ll @@ -505,7 +505,7 @@ define i2 @ashr_2_add_zext_basic(i1 %a, i1 %b) { define i32 @lshr_16_add_zext_basic(i16 %a, i16 %b) { ; CHECK-LABEL: @lshr_16_add_zext_basic( ; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1 -; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i16 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i32 ; CHECK-NEXT: ret i32 [[LSHR]] ; @@ -566,7 +566,7 @@ define i32 @lshr_16_add_not_known_16_leading_zeroes(i32 %a, i32 %b) { define i64 @lshr_32_add_zext_basic(i32 %a, i32 %b) { ; CHECK-LABEL: @lshr_32_add_zext_basic( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i32 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64 ; CHECK-NEXT: ret i64 [[LSHR]] ; @@ -623,7 +623,7 @@ define i64 @lshr_33_i32_add_zext_basic(i32 %a, i32 %b) { define i64 @lshr_16_to_64_add_zext_basic(i16 %a, i16 %b) { ; CHECK-LABEL: @lshr_16_to_64_add_zext_basic( ; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1 -; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i16 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64 ; CHECK-NEXT: ret i64 [[LSHR]] ; @@ -668,7 +668,7 @@ define i64 @lshr_32_add_not_known_32_leading_zeroes(i64 %a, i64 %b) { define i32 @ashr_16_add_zext_basic(i16 %a, i16 %b) { ; CHECK-LABEL: @ashr_16_add_zext_basic( ; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1 -; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i16 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i32 ; CHECK-NEXT: ret i32 [[LSHR]] ; @@ -682,7 +682,7 @@ define i32 @ashr_16_add_zext_basic(i16 %a, i16 %b) { define i64 @ashr_32_add_zext_basic(i32 %a, i32 %b) { ; CHECK-LABEL: @ashr_32_add_zext_basic( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i32 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64 ; CHECK-NEXT: ret i64 [[LSHR]] ; @@ -696,7 +696,7 @@ define i64 @ashr_32_add_zext_basic(i32 %a, i32 %b) { define i64 @ashr_16_to_64_add_zext_basic(i16 %a, i16 %b) { ; CHECK-LABEL: @ashr_16_to_64_add_zext_basic( ; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], -1 -; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ult i16 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[ADD_NARROWED_OVERFLOW:%.*]] = icmp ugt i16 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[LSHR:%.*]] = zext i1 [[ADD_NARROWED_OVERFLOW]] to i64 ; CHECK-NEXT: ret i64 [[LSHR]] ; diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll index a0a3c8edfb4b5..c4260f4cb2bf8 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-lshr.ll @@ -294,7 +294,7 @@ define i1 @t10_almost_highest_bit(i32 %x, i64 %y, i32 %len) { define i1 @t11_no_shift(i32 %x, i64 %y, i32 %len) { ; CHECK-LABEL: @t11_no_shift( ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[X:%.*]] to i64 -; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i64 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[T5]] ; diff --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll index 3a85f19d8a037..6e9552e2af4cc 100644 --- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll +++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest-with-truncation-shl.ll @@ -17,7 +17,7 @@ define i1 @t0_const_after_fold_lshr_shl_ne(i32 %x, i64 %y, i32 %len) { ; CHECK-LABEL: @t0_const_after_fold_lshr_shl_ne( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31 ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64 -; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP3]], 0 ; CHECK-NEXT: ret i1 [[T5]] ; @@ -40,7 +40,7 @@ define <2 x i1> @t1_vec_splat(<2 x i32> %x, <2 x i64> %y, <2 x i32> %len) { ; CHECK-LABEL: @t1_vec_splat( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg <2 x i32> [[TMP1]] to <2 x i64> -; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i64> [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = and <2 x i64> [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: [[T5:%.*]] = icmp ne <2 x i64> [[TMP3]], zeroinitializer ; CHECK-NEXT: ret <2 x i1> [[T5]] ; @@ -212,7 +212,7 @@ define i1 @t6_oneuse3(i32 %x, i64 %y, i32 %len) { ; CHECK-NEXT: call void @use64(i64 [[T3]]) ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31 ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64 -; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], [[Y]] +; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[Y]], [[TMP2]] ; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP3]], 0 ; CHECK-NEXT: ret i1 [[T5]] ; @@ -244,7 +244,7 @@ define i1 @t7_oneuse4(i32 %x, i64 %y, i32 %len) { ; CHECK-NEXT: call void @use32(i32 [[T3_TRUNC]]) ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31 ; CHECK-NEXT: [[TMP2:%.*]] = zext nneg i32 [[TMP1]] to i64 -; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[TMP2]], [[Y]] +; CHECK-NEXT: [[TMP3:%.*]] = and i64 [[Y]], [[TMP2]] ; CHECK-NEXT: [[T5:%.*]] = icmp ne i64 [[TMP3]], 0 ; CHECK-NEXT: ret i1 [[T5]] ; diff --git a/llvm/test/Transforms/InstCombine/shift-direction-in-bit-test.ll b/llvm/test/Transforms/InstCombine/shift-direction-in-bit-test.ll index a8f4644f1ae42..ebb53e36a3f21 100644 --- a/llvm/test/Transforms/InstCombine/shift-direction-in-bit-test.ll +++ b/llvm/test/Transforms/InstCombine/shift-direction-in-bit-test.ll @@ -239,7 +239,7 @@ define i1 @t13_shift_of_const1(i32 %x, i32 %y, i32 %z) { define i1 @t14_and_with_const0(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @t14_and_with_const0( ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 1, [[Y:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[T2]] ; @@ -251,7 +251,7 @@ define i1 @t14_and_with_const0(i32 %x, i32 %y, i32 %z) { define i1 @t15_and_with_const1(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @t15_and_with_const1( ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i32 1, [[Y:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: [[T2:%.*]] = icmp eq i32 [[TMP2]], 0 ; CHECK-NEXT: ret i1 [[T2]] ; diff --git a/llvm/test/Transforms/InstCombine/shift-logic.ll b/llvm/test/Transforms/InstCombine/shift-logic.ll index 3d4547e0bb9ca..593a22bec6490 100644 --- a/llvm/test/Transforms/InstCombine/shift-logic.ll +++ b/llvm/test/Transforms/InstCombine/shift-logic.ll @@ -189,7 +189,7 @@ define i32 @ashr_xor(i32 %x, i32 %py) { define i32 @shr_mismatch_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @shr_mismatch_xor( ; CHECK-NEXT: [[SH0:%.*]] = ashr i32 [[X:%.*]], 5 -; CHECK-NEXT: [[R:%.*]] = xor i32 [[SH0]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = xor i32 [[Y:%.*]], [[SH0]] ; CHECK-NEXT: [[SH1:%.*]] = lshr i32 [[R]], 7 ; CHECK-NEXT: ret i32 [[SH1]] ; @@ -202,7 +202,7 @@ define i32 @shr_mismatch_xor(i32 %x, i32 %y) { define i32 @ashr_overshift_xor(i32 %x, i32 %y) { ; CHECK-LABEL: @ashr_overshift_xor( ; CHECK-NEXT: [[SH0:%.*]] = ashr i32 [[X:%.*]], 15 -; CHECK-NEXT: [[R:%.*]] = xor i32 [[SH0]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = xor i32 [[Y:%.*]], [[SH0]] ; CHECK-NEXT: [[SH1:%.*]] = ashr i32 [[R]], 17 ; CHECK-NEXT: ret i32 [[SH1]] ; @@ -215,7 +215,7 @@ define i32 @ashr_overshift_xor(i32 %x, i32 %y) { define <2 x i32> @ashr_poison_poison_xor(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @ashr_poison_poison_xor( ; CHECK-NEXT: [[SH0:%.*]] = ashr <2 x i32> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = xor <2 x i32> [[SH0]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = xor <2 x i32> [[Y:%.*]], [[SH0]] ; CHECK-NEXT: [[SH1:%.*]] = ashr <2 x i32> [[R]], ; CHECK-NEXT: ret <2 x i32> [[SH1]] ; diff --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll index f0bfd0171b265..558f4ffbfcabe 100644 --- a/llvm/test/Transforms/InstCombine/shift.ll +++ b/llvm/test/Transforms/InstCombine/shift.ll @@ -1692,7 +1692,7 @@ define i177 @lshr_out_of_range(i177 %Y, ptr %A2, ptr %ptr) { ; CHECK-LABEL: @lshr_out_of_range( ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i177 [[Y:%.*]], -1 ; CHECK-NEXT: [[B4:%.*]] = sext i1 [[TMP1]] to i177 -; CHECK-NEXT: [[C8:%.*]] = icmp ult i177 [[B4]], [[Y]] +; CHECK-NEXT: [[C8:%.*]] = icmp ugt i177 [[Y]], [[B4]] ; CHECK-NEXT: [[TMP2:%.*]] = sext i1 [[C8]] to i64 ; CHECK-NEXT: [[G18:%.*]] = getelementptr ptr, ptr [[A2:%.*]], i64 [[TMP2]] ; CHECK-NEXT: store ptr [[G18]], ptr [[PTR:%.*]], align 8 @@ -1810,7 +1810,7 @@ define void @ossfuzz_38078(i32 %arg, i32 %arg1, ptr %ptr, ptr %ptr2, ptr %ptr3, ; CHECK-NEXT: bb: ; CHECK-NEXT: [[G1:%.*]] = getelementptr i8, ptr [[PTR:%.*]], i64 -4 ; CHECK-NEXT: [[I2:%.*]] = sub i32 0, [[ARG1:%.*]] -; CHECK-NEXT: [[I5:%.*]] = icmp eq i32 [[I2]], [[ARG:%.*]] +; CHECK-NEXT: [[I5:%.*]] = icmp eq i32 [[ARG:%.*]], [[I2]] ; CHECK-NEXT: call void @llvm.assume(i1 [[I5]]) ; CHECK-NEXT: store volatile i32 2147483647, ptr [[G1]], align 4 ; CHECK-NEXT: br label [[BB:%.*]] @@ -2047,7 +2047,7 @@ define i32 @ashr_sdiv_extra_use(i32 %x) { define i32 @shl1_cttz(i32 %x) { ; CHECK-LABEL: @shl1_cttz( ; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[SHL:%.*]] = and i32 [[NEG]], [[X]] +; CHECK-NEXT: [[SHL:%.*]] = and i32 [[X]], [[NEG]] ; CHECK-NEXT: ret i32 [[SHL]] ; %tz = call i32 @llvm.cttz.i32(i32 %x, i1 true) @@ -2058,7 +2058,7 @@ define i32 @shl1_cttz(i32 %x) { define <2 x i8> @shl1_cttz_vec(<2 x i8> %x) { ; CHECK-LABEL: @shl1_cttz_vec( ; CHECK-NEXT: [[NEG:%.*]] = sub <2 x i8> zeroinitializer, [[X:%.*]] -; CHECK-NEXT: [[SHL:%.*]] = and <2 x i8> [[NEG]], [[X]] +; CHECK-NEXT: [[SHL:%.*]] = and <2 x i8> [[X]], [[NEG]] ; CHECK-NEXT: ret <2 x i8> [[SHL]] ; %tz = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %x, i1 false) @@ -2069,7 +2069,7 @@ define <2 x i8> @shl1_cttz_vec(<2 x i8> %x) { define <2 x i8> @shl1_cttz_vec_poison(<2 x i8> %x) { ; CHECK-LABEL: @shl1_cttz_vec_poison( ; CHECK-NEXT: [[NEG:%.*]] = sub <2 x i8> zeroinitializer, [[X:%.*]] -; CHECK-NEXT: [[SHL:%.*]] = and <2 x i8> [[NEG]], [[X]] +; CHECK-NEXT: [[SHL:%.*]] = and <2 x i8> [[X]], [[NEG]] ; CHECK-NEXT: ret <2 x i8> [[SHL]] ; %tz = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %x, i1 false) diff --git a/llvm/test/Transforms/InstCombine/shl-bo.ll b/llvm/test/Transforms/InstCombine/shl-bo.ll index ab6e8c28cf9fc..356c4a288f9e3 100644 --- a/llvm/test/Transforms/InstCombine/shl-bo.ll +++ b/llvm/test/Transforms/InstCombine/shl-bo.ll @@ -7,7 +7,7 @@ define i8 @lshr_add(i8 %a, i8 %y) { ; CHECK-LABEL: @lshr_add( ; CHECK-NEXT: [[X:%.*]] = srem i8 [[A:%.*]], 42 ; CHECK-NEXT: [[B1:%.*]] = shl i8 [[X]], 5 -; CHECK-NEXT: [[R2:%.*]] = add i8 [[B1]], [[Y:%.*]] +; CHECK-NEXT: [[R2:%.*]] = add i8 [[Y:%.*]], [[B1]] ; CHECK-NEXT: [[L:%.*]] = and i8 [[R2]], -32 ; CHECK-NEXT: ret i8 [[L]] ; @@ -22,7 +22,7 @@ define <2 x i8> @lshr_add_commute_splat(<2 x i8> %a, <2 x i8> %y) { ; CHECK-LABEL: @lshr_add_commute_splat( ; CHECK-NEXT: [[X:%.*]] = srem <2 x i8> [[A:%.*]], ; CHECK-NEXT: [[B1:%.*]] = shl <2 x i8> [[X]], -; CHECK-NEXT: [[R2:%.*]] = add <2 x i8> [[B1]], [[Y:%.*]] +; CHECK-NEXT: [[R2:%.*]] = add <2 x i8> [[Y:%.*]], [[B1]] ; CHECK-NEXT: [[L:%.*]] = and <2 x i8> [[R2]], ; CHECK-NEXT: ret <2 x i8> [[L]] ; @@ -67,7 +67,7 @@ define i8 @lshr_and(i8 %a, i8 %y) { ; CHECK-LABEL: @lshr_and( ; CHECK-NEXT: [[X:%.*]] = srem i8 [[A:%.*]], 42 ; CHECK-NEXT: [[B1:%.*]] = shl i8 [[X]], 6 -; CHECK-NEXT: [[R2:%.*]] = and i8 [[B1]], [[Y:%.*]] +; CHECK-NEXT: [[R2:%.*]] = and i8 [[Y:%.*]], [[B1]] ; CHECK-NEXT: ret i8 [[R2]] ; %x = srem i8 %a, 42 ; thwart complexity-based canonicalization @@ -81,7 +81,7 @@ define <2 x i8> @lshr_and_commute_splat(<2 x i8> %a, <2 x i8> %y) { ; CHECK-LABEL: @lshr_and_commute_splat( ; CHECK-NEXT: [[X:%.*]] = srem <2 x i8> [[A:%.*]], ; CHECK-NEXT: [[B1:%.*]] = shl <2 x i8> [[X]], -; CHECK-NEXT: [[R2:%.*]] = and <2 x i8> [[B1]], [[Y:%.*]] +; CHECK-NEXT: [[R2:%.*]] = and <2 x i8> [[Y:%.*]], [[B1]] ; CHECK-NEXT: ret <2 x i8> [[R2]] ; %x = srem <2 x i8> %a, ; thwart complexity-based canonicalization @@ -96,7 +96,7 @@ define i8 @lshr_or(i8 %a, i8 %y) { ; CHECK-NEXT: [[X:%.*]] = srem i8 [[A:%.*]], 42 ; CHECK-NEXT: [[B1:%.*]] = shl i8 [[X]], 4 ; CHECK-NEXT: [[Y_MASKED:%.*]] = and i8 [[Y:%.*]], -16 -; CHECK-NEXT: [[L:%.*]] = or i8 [[B1]], [[Y_MASKED]] +; CHECK-NEXT: [[L:%.*]] = or i8 [[Y_MASKED]], [[B1]] ; CHECK-NEXT: ret i8 [[L]] ; %x = srem i8 %a, 42 ; thwart complexity-based canonicalization @@ -111,7 +111,7 @@ define <2 x i8> @lshr_or_commute_splat(<2 x i8> %a, <2 x i8> %y) { ; CHECK-NEXT: [[X:%.*]] = srem <2 x i8> [[A:%.*]], ; CHECK-NEXT: [[B1:%.*]] = shl <2 x i8> [[X]], ; CHECK-NEXT: [[Y_MASKED:%.*]] = and <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[L:%.*]] = or <2 x i8> [[B1]], [[Y_MASKED]] +; CHECK-NEXT: [[L:%.*]] = or <2 x i8> [[Y_MASKED]], [[B1]] ; CHECK-NEXT: ret <2 x i8> [[L]] ; %x = srem <2 x i8> %a, ; thwart complexity-based canonicalization @@ -126,7 +126,7 @@ define i8 @lshr_xor(i8 %a, i8 %y) { ; CHECK-NEXT: [[X:%.*]] = srem i8 [[A:%.*]], 42 ; CHECK-NEXT: [[B1:%.*]] = shl i8 [[X]], 3 ; CHECK-NEXT: [[Y_MASKED:%.*]] = and i8 [[Y:%.*]], -8 -; CHECK-NEXT: [[L:%.*]] = xor i8 [[B1]], [[Y_MASKED]] +; CHECK-NEXT: [[L:%.*]] = xor i8 [[Y_MASKED]], [[B1]] ; CHECK-NEXT: ret i8 [[L]] ; %x = srem i8 %a, 42 ; thwart complexity-based canonicalization @@ -141,7 +141,7 @@ define <2 x i8> @lshr_xor_commute_splat(<2 x i8> %a, <2 x i8> %y) { ; CHECK-NEXT: [[X:%.*]] = srem <2 x i8> [[A:%.*]], ; CHECK-NEXT: [[B1:%.*]] = shl <2 x i8> [[X]], ; CHECK-NEXT: [[Y_MASKED:%.*]] = and <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[L:%.*]] = xor <2 x i8> [[B1]], [[Y_MASKED]] +; CHECK-NEXT: [[L:%.*]] = xor <2 x i8> [[Y_MASKED]], [[B1]] ; CHECK-NEXT: ret <2 x i8> [[L]] ; %x = srem <2 x i8> %a, ; thwart complexity-based canonicalization @@ -380,7 +380,7 @@ define i8 @lshr_and_add_use1(i8 %x, i8 %y) { ; CHECK-NEXT: [[R:%.*]] = lshr i8 [[Y:%.*]], 3 ; CHECK-NEXT: call void @use(i8 [[R]]) ; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12 -; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]] +; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]] ; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3 ; CHECK-NEXT: ret i8 [[L]] ; @@ -397,7 +397,7 @@ define i8 @lshr_and_add_use2(i8 %x, i8 %y) { ; CHECK-NEXT: [[R:%.*]] = lshr i8 [[Y:%.*]], 3 ; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12 ; CHECK-NEXT: call void @use(i8 [[M]]) -; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]] +; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]] ; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3 ; CHECK-NEXT: ret i8 [[L]] ; @@ -413,7 +413,7 @@ define i8 @lshr_and_add_use3(i8 %x, i8 %y) { ; CHECK-LABEL: @lshr_and_add_use3( ; CHECK-NEXT: [[R:%.*]] = lshr i8 [[Y:%.*]], 3 ; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12 -; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]] +; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]] ; CHECK-NEXT: call void @use(i8 [[B]]) ; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3 ; CHECK-NEXT: ret i8 [[L]] @@ -432,7 +432,7 @@ define i8 @lshr_and_add_use4(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use(i8 [[R]]) ; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12 ; CHECK-NEXT: call void @use(i8 [[M]]) -; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]] +; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]] ; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3 ; CHECK-NEXT: ret i8 [[L]] ; @@ -450,7 +450,7 @@ define i8 @lshr_and_add_use5(i8 %x, i8 %y) { ; CHECK-NEXT: [[R:%.*]] = lshr i8 [[Y:%.*]], 3 ; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12 ; CHECK-NEXT: call void @use(i8 [[M]]) -; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]] +; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]] ; CHECK-NEXT: call void @use(i8 [[B]]) ; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3 ; CHECK-NEXT: ret i8 [[L]] @@ -470,7 +470,7 @@ define i8 @lshr_and_add_use6(i8 %x, i8 %y) { ; CHECK-NEXT: call void @use(i8 [[R]]) ; CHECK-NEXT: [[M:%.*]] = and i8 [[R]], 12 ; CHECK-NEXT: call void @use(i8 [[M]]) -; CHECK-NEXT: [[B:%.*]] = add i8 [[M]], [[X:%.*]] +; CHECK-NEXT: [[B:%.*]] = add i8 [[X:%.*]], [[M]] ; CHECK-NEXT: [[L:%.*]] = shl i8 [[B]], 3 ; CHECK-NEXT: ret i8 [[L]] ; @@ -541,7 +541,7 @@ define <2 x i32> @lshr_add_and_shl_v2i32_undef(<2 x i32> %x, <2 x i32> %y) { ; CHECK-LABEL: @lshr_add_and_shl_v2i32_undef( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], -; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = shl <2 x i32> [[TMP3]], ; CHECK-NEXT: ret <2 x i32> [[TMP4]] ; @@ -556,7 +556,7 @@ define <2 x i32> @lshr_add_and_shl_v2i32_nonuniform(<2 x i32> %x, <2 x i32> %y) ; CHECK-LABEL: @lshr_add_and_shl_v2i32_nonuniform( ; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[X:%.*]], ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i32> [[TMP1]], -; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[TMP2]], [[Y:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = add <2 x i32> [[Y:%.*]], [[TMP2]] ; CHECK-NEXT: [[TMP4:%.*]] = shl <2 x i32> [[TMP3]], ; CHECK-NEXT: ret <2 x i32> [[TMP4]] ; diff --git a/llvm/test/Transforms/InstCombine/shuffle-binop.ll b/llvm/test/Transforms/InstCombine/shuffle-binop.ll index 8460f8b2c6cd3..8ab7f315dbf54 100644 --- a/llvm/test/Transforms/InstCombine/shuffle-binop.ll +++ b/llvm/test/Transforms/InstCombine/shuffle-binop.ll @@ -82,7 +82,7 @@ define <4 x i8> @splat_binop_splat_x_splat_y(<4 x i8> %x, <4 x i8> %y) { ; CHECK-NEXT: call void @use(<4 x i8> [[XSPLAT]]) ; CHECK-NEXT: [[YSPLAT:%.*]] = shufflevector <4 x i8> [[Y:%.*]], <4 x i8> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: call void @use(<4 x i8> [[YSPLAT]]) -; CHECK-NEXT: [[TMP1:%.*]] = mul nuw <4 x i8> [[Y]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = mul nuw <4 x i8> [[X]], [[Y]] ; CHECK-NEXT: [[BSPLAT:%.*]] = shufflevector <4 x i8> [[TMP1]], <4 x i8> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: ret <4 x i8> [[BSPLAT]] ; @@ -101,7 +101,7 @@ define <4 x float> @splat_binop_splat_x_splat_y_fmath_flags(<4 x float> %x, <4 x ; CHECK-NEXT: call void @use(<4 x float> [[XSPLAT]]) ; CHECK-NEXT: [[YSPLAT:%.*]] = shufflevector <4 x float> [[Y:%.*]], <4 x float> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: call void @use(<4 x float> [[YSPLAT]]) -; CHECK-NEXT: [[TMP1:%.*]] = fmul fast <4 x float> [[Y]], [[X]] +; CHECK-NEXT: [[TMP1:%.*]] = fmul fast <4 x float> [[X]], [[Y]] ; CHECK-NEXT: [[BSPLAT:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> zeroinitializer ; CHECK-NEXT: ret <4 x float> [[BSPLAT]] ; diff --git a/llvm/test/Transforms/InstCombine/signed-truncation-check.ll b/llvm/test/Transforms/InstCombine/signed-truncation-check.ll index 7e762627e5ec0..513fb69ab7463 100644 --- a/llvm/test/Transforms/InstCombine/signed-truncation-check.ll +++ b/llvm/test/Transforms/InstCombine/signed-truncation-check.ll @@ -612,7 +612,7 @@ define zeroext i1 @oneuse_trunc_sext(i32 %arg) { ; CHECK-NEXT: call void @use8(i8 [[T3]]) ; CHECK-NEXT: [[T4:%.*]] = sext i8 [[T3]] to i32 ; CHECK-NEXT: call void @use32(i32 [[T4]]) -; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], [[ARG]] +; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[ARG]], [[T4]] ; CHECK-NEXT: call void @use1(i1 [[T5]]) ; CHECK-NEXT: [[T6:%.*]] = and i1 [[T2]], [[T5]] ; CHECK-NEXT: ret i1 [[T6]] @@ -641,7 +641,7 @@ define zeroext i1 @oneuse_trunc_sext_logical(i32 %arg) { ; CHECK-NEXT: call void @use8(i8 [[T3]]) ; CHECK-NEXT: [[T4:%.*]] = sext i8 [[T3]] to i32 ; CHECK-NEXT: call void @use32(i32 [[T4]]) -; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[T4]], [[ARG]] +; CHECK-NEXT: [[T5:%.*]] = icmp eq i32 [[ARG]], [[T4]] ; CHECK-NEXT: call void @use1(i1 [[T5]]) ; CHECK-NEXT: [[T6:%.*]] = select i1 [[T2]], i1 [[T5]], i1 false ; CHECK-NEXT: ret i1 [[T6]] diff --git a/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll b/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll index e4fb7764ba9e5..403f3bacf34d8 100644 --- a/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll +++ b/llvm/test/Transforms/InstCombine/simplify-demanded-fpclass.ll @@ -16,7 +16,7 @@ define float @ninf_user_select_inf(i1 %cond, float %x, float %y) { ; CHECK-LABEL: define float @ninf_user_select_inf ; CHECK-SAME: (i1 [[COND:%.*]], float [[X:%.*]], float [[Y:%.*]]) { ; CHECK-NEXT: [[SELECT:%.*]] = select i1 [[COND]], float [[X]], float 0x7FF0000000000000 -; CHECK-NEXT: [[NINF_USER:%.*]] = fmul ninf float [[SELECT]], [[Y]] +; CHECK-NEXT: [[NINF_USER:%.*]] = fmul ninf float [[Y]], [[SELECT]] ; CHECK-NEXT: ret float [[NINF_USER]] ; %select = select i1 %cond, float %x, float 0x7FF0000000000000 diff --git a/llvm/test/Transforms/InstCombine/sink-not-into-and.ll b/llvm/test/Transforms/InstCombine/sink-not-into-and.ll index 9db6440a49ee7..1f3b46cdc386d 100644 --- a/llvm/test/Transforms/InstCombine/sink-not-into-and.ll +++ b/llvm/test/Transforms/InstCombine/sink-not-into-and.ll @@ -40,7 +40,7 @@ define i1 @n1(i1 %i1, i32 %v2, i32 %v3) { define i1 @n2(i32 %v0, i32 %v1, i1 %i2) { ; CHECK-LABEL: @n2( ; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]] -; CHECK-NEXT: [[I3:%.*]] = and i1 [[I1]], [[I2:%.*]] +; CHECK-NEXT: [[I3:%.*]] = and i1 [[I2:%.*]], [[I1]] ; CHECK-NEXT: [[I4:%.*]] = xor i1 [[I3]], true ; CHECK-NEXT: ret i1 [[I4]] ; diff --git a/llvm/test/Transforms/InstCombine/sink-not-into-or.ll b/llvm/test/Transforms/InstCombine/sink-not-into-or.ll index 0b758112f699e..8e6c983b71fe3 100644 --- a/llvm/test/Transforms/InstCombine/sink-not-into-or.ll +++ b/llvm/test/Transforms/InstCombine/sink-not-into-or.ll @@ -40,7 +40,7 @@ define i1 @n1(i1 %i1, i32 %v2, i32 %v3) { define i1 @n2(i32 %v0, i32 %v1, i1 %i2) { ; CHECK-LABEL: @n2( ; CHECK-NEXT: [[I1:%.*]] = icmp eq i32 [[V0:%.*]], [[V1:%.*]] -; CHECK-NEXT: [[I3:%.*]] = or i1 [[I1]], [[I2:%.*]] +; CHECK-NEXT: [[I3:%.*]] = or i1 [[I2:%.*]], [[I1]] ; CHECK-NEXT: [[I4:%.*]] = xor i1 [[I3]], true ; CHECK-NEXT: ret i1 [[I4]] ; diff --git a/llvm/test/Transforms/InstCombine/smax-icmp.ll b/llvm/test/Transforms/InstCombine/smax-icmp.ll index 022ec6ad4f346..4c9cbed9d9ebf 100644 --- a/llvm/test/Transforms/InstCombine/smax-icmp.ll +++ b/llvm/test/Transforms/InstCombine/smax-icmp.ll @@ -95,7 +95,7 @@ define i1 @sle_smax2(i32 %x, i32 %y) { define i1 @sle_smax3(i32 %a, i32 %y) { ; CHECK-LABEL: @sle_smax3( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -110,7 +110,7 @@ define i1 @sle_smax3(i32 %a, i32 %y) { define i1 @sle_smax4(i32 %a, i32 %y) { ; CHECK-LABEL: @sle_smax4( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -207,7 +207,7 @@ define i1 @sgt_smax2(i32 %x, i32 %y) { define i1 @sgt_smax3(i32 %a, i32 %y) { ; CHECK-LABEL: @sgt_smax3( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -222,7 +222,7 @@ define i1 @sgt_smax3(i32 %a, i32 %y) { define i1 @sgt_smax4(i32 %a, i32 %y) { ; CHECK-LABEL: @sgt_smax4( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization diff --git a/llvm/test/Transforms/InstCombine/smin-icmp.ll b/llvm/test/Transforms/InstCombine/smin-icmp.ll index c97f29f5eff8d..d1283d8afc0a7 100644 --- a/llvm/test/Transforms/InstCombine/smin-icmp.ll +++ b/llvm/test/Transforms/InstCombine/smin-icmp.ll @@ -94,7 +94,7 @@ define i1 @sge_smin2(i32 %x, i32 %y) { define i1 @sge_smin3(i32 %a, i32 %y) { ; CHECK-LABEL: @sge_smin3( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -109,7 +109,7 @@ define i1 @sge_smin3(i32 %a, i32 %y) { define i1 @sge_smin4(i32 %a, i32 %y) { ; CHECK-LABEL: @sge_smin4( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp sge i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -206,7 +206,7 @@ define i1 @slt_smin2(i32 %x, i32 %y) { define i1 @slt_smin3(i32 %a, i32 %y) { ; CHECK-LABEL: @slt_smin3( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -221,7 +221,7 @@ define i1 @slt_smin3(i32 %a, i32 %y) { define i1 @slt_smin4(i32 %a, i32 %y) { ; CHECK-LABEL: @slt_smin4( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp sgt i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp slt i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization diff --git a/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll b/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll index 0379f82f4a783..e21ca605fc5af 100644 --- a/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll +++ b/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll @@ -242,7 +242,7 @@ define i32 @sub_ashr_or_i32_extra_use_ashr(i32 %x, i32 %y, ptr %p) { ; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[Y:%.*]], [[X:%.*]] ; CHECK-NEXT: [[SHR:%.*]] = sext i1 [[TMP1]] to i32 ; CHECK-NEXT: store i32 [[SHR]], ptr [[P:%.*]], align 4 -; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[X]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X]], [[SHR]] ; CHECK-NEXT: ret i32 [[OR]] ; %sub = sub nsw i32 %y, %x @@ -268,7 +268,7 @@ define i32 @sub_ashr_or_i32_no_nsw_nuw(i32 %x, i32 %y) { define i32 @neg_or_extra_use_ashr_i32(i32 %x, ptr %p) { ; CHECK-LABEL: @neg_or_extra_use_ashr_i32( ; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X]], [[NEG]] ; CHECK-NEXT: [[SHR:%.*]] = ashr i32 [[OR]], 31 ; CHECK-NEXT: store i32 [[OR]], ptr [[P:%.*]], align 4 ; CHECK-NEXT: ret i32 [[SHR]] diff --git a/llvm/test/Transforms/InstCombine/sub-gep.ll b/llvm/test/Transforms/InstCombine/sub-gep.ll index 5130883409b28..b773d106b2c98 100644 --- a/llvm/test/Transforms/InstCombine/sub-gep.ll +++ b/llvm/test/Transforms/InstCombine/sub-gep.ll @@ -422,7 +422,7 @@ define i64 @nullptrtoint_scalable_x(i64 %x) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4 -; CHECK-NEXT: [[PTR_IDX:%.*]] = mul nsw i64 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[PTR_IDX:%.*]] = mul nsw i64 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i64 [[PTR_IDX]] ; entry: diff --git a/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll b/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll index 5ecf4b8da0c49..33c02d77c45b9 100644 --- a/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll +++ b/llvm/test/Transforms/InstCombine/sub-lshr-or-to-icmp-select.ll @@ -81,7 +81,7 @@ define i32 @neg_extra_use_or_lshr_i32(i32 %x, ptr %p) { define i32 @neg_or_extra_use_lshr_i32(i32 %x, ptr %p) { ; CHECK-LABEL: @neg_or_extra_use_lshr_i32( ; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[OR:%.*]] = or i32 [[NEG]], [[X]] +; CHECK-NEXT: [[OR:%.*]] = or i32 [[X]], [[NEG]] ; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[OR]], 31 ; CHECK-NEXT: store i32 [[OR]], ptr [[P:%.*]], align 4 ; CHECK-NEXT: ret i32 [[SHR]] diff --git a/llvm/test/Transforms/InstCombine/sub-minmax.ll b/llvm/test/Transforms/InstCombine/sub-minmax.ll index c9ce165c38988..c5af57449bf71 100644 --- a/llvm/test/Transforms/InstCombine/sub-minmax.ll +++ b/llvm/test/Transforms/InstCombine/sub-minmax.ll @@ -770,7 +770,7 @@ define i8 @sub_add_umin(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: define {{[^@]+}}@sub_add_umin ; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[Y]], i8 [[Z]]) -; CHECK-NEXT: [[S:%.*]] = add i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[S:%.*]] = add i8 [[X]], [[TMP1]] ; CHECK-NEXT: ret i8 [[S]] ; %a = add i8 %x, %y @@ -783,7 +783,7 @@ define i8 @sub_add_umin_commute_umin(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: define {{[^@]+}}@sub_add_umin_commute_umin ; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[Y]], i8 [[Z]]) -; CHECK-NEXT: [[S:%.*]] = add i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[S:%.*]] = add i8 [[X]], [[TMP1]] ; CHECK-NEXT: ret i8 [[S]] ; %a = add i8 %x, %y @@ -796,7 +796,7 @@ define i8 @sub_add_umin_commute_add(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: define {{[^@]+}}@sub_add_umin_commute_add ; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[Y]], i8 [[Z]]) -; CHECK-NEXT: [[S:%.*]] = add i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[S:%.*]] = add i8 [[X]], [[TMP1]] ; CHECK-NEXT: ret i8 [[S]] ; %a = add i8 %y, %x @@ -809,7 +809,7 @@ define i8 @sub_add_umin_commute_add_umin(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: define {{[^@]+}}@sub_add_umin_commute_add_umin ; CHECK-SAME: (i8 [[X:%.*]], i8 [[Y:%.*]], i8 [[Z:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.usub.sat.i8(i8 [[Y]], i8 [[Z]]) -; CHECK-NEXT: [[S:%.*]] = add i8 [[TMP1]], [[X]] +; CHECK-NEXT: [[S:%.*]] = add i8 [[X]], [[TMP1]] ; CHECK-NEXT: ret i8 [[S]] ; %a = add i8 %y, %x @@ -822,7 +822,7 @@ define <2 x i8> @sub_add_umin_vec(<2 x i8> %x, <2 x i8> %y, <2 x i8> %z) { ; CHECK-LABEL: define {{[^@]+}}@sub_add_umin_vec ; CHECK-SAME: (<2 x i8> [[X:%.*]], <2 x i8> [[Y:%.*]], <2 x i8> [[Z:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> [[Y]], <2 x i8> [[Z]]) -; CHECK-NEXT: [[S:%.*]] = add <2 x i8> [[TMP1]], [[X]] +; CHECK-NEXT: [[S:%.*]] = add <2 x i8> [[X]], [[TMP1]] ; CHECK-NEXT: ret <2 x i8> [[S]] ; %a = add <2 x i8> %x, %y diff --git a/llvm/test/Transforms/InstCombine/sub-not.ll b/llvm/test/Transforms/InstCombine/sub-not.ll index 89ccf5aa3c8f4..5053319162f0d 100644 --- a/llvm/test/Transforms/InstCombine/sub-not.ll +++ b/llvm/test/Transforms/InstCombine/sub-not.ll @@ -6,7 +6,7 @@ declare void @use(i8) define i8 @sub_not(i8 %x, i8 %y) { ; CHECK-LABEL: @sub_not( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = add i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[R]] ; %s = sub i8 %x, %y @@ -30,7 +30,7 @@ define i8 @sub_not_extra_use(i8 %x, i8 %y) { define <2 x i8> @sub_not_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @sub_not_vec( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %s = sub <2 x i8> %x, %y @@ -41,7 +41,7 @@ define <2 x i8> @sub_not_vec(<2 x i8> %x, <2 x i8> %y) { define i8 @dec_sub(i8 %x, i8 %y) { ; CHECK-LABEL: @dec_sub( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = add i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[R]] ; %s = sub i8 %x, %y @@ -65,7 +65,7 @@ define i8 @dec_sub_extra_use(i8 %x, i8 %y) { define <2 x i8> @dec_sub_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @dec_sub_vec( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %s = sub <2 x i8> %x, %y @@ -76,7 +76,7 @@ define <2 x i8> @dec_sub_vec(<2 x i8> %x, <2 x i8> %y) { define i8 @sub_inc(i8 %x, i8 %y) { ; CHECK-LABEL: @sub_inc( ; CHECK-NEXT: [[S_NEG:%.*]] = xor i8 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = add i8 [[S_NEG]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = add i8 [[Y:%.*]], [[S_NEG]] ; CHECK-NEXT: ret i8 [[R]] ; %s = add i8 %x, 1 @@ -100,7 +100,7 @@ define i8 @sub_inc_extra_use(i8 %x, i8 %y) { define <2 x i8> @sub_inc_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @sub_inc_vec( ; CHECK-NEXT: [[S_NEG:%.*]] = xor <2 x i8> [[X:%.*]], -; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[S_NEG]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[Y:%.*]], [[S_NEG]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %s = add <2 x i8> %x, @@ -111,7 +111,7 @@ define <2 x i8> @sub_inc_vec(<2 x i8> %x, <2 x i8> %y) { define i8 @sub_dec(i8 %x, i8 %y) { ; CHECK-LABEL: @sub_dec( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = add i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = add i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[R]] ; %s = add i8 %x, -1 @@ -135,7 +135,7 @@ define i8 @sub_dec_extra_use(i8 %x, i8 %y) { define <2 x i8> @sub_dec_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @sub_dec_vec( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = add <2 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i8> [[R]] ; %s = add <2 x i8> %x, diff --git a/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll b/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll index 76a172302999a..60607041ad2f9 100644 --- a/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/sub-of-negatible-inseltpoison.ll @@ -262,7 +262,7 @@ define i8 @t12(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[T1:%.*]] = sub i8 0, [[Z:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y]], [[Z]] -; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = add i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[T3]] ; %t0 = sub i8 0, %y @@ -296,7 +296,7 @@ define i8 @n14(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y]], [[Z]] ; CHECK-NEXT: [[T2:%.*]] = sub i8 0, [[TMP1]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = add i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[T3]] ; %t0 = sub i8 0, %y @@ -399,7 +399,7 @@ define i8 @n16(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @n16( ; CHECK-NEXT: [[T0:%.*]] = sub i8 0, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = mul i8 [[T0]], [[Z:%.*]] +; CHECK-NEXT: [[T1:%.*]] = mul i8 [[Z:%.*]], [[T0]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = sub i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: ret i8 [[T2]] @@ -535,7 +535,7 @@ define i8 @t20(i8 %x, i16 %y) { ; CHECK-LABEL: @t20( ; CHECK-NEXT: [[T0_NEG:%.*]] = shl i16 42, [[Y:%.*]] ; CHECK-NEXT: [[T1_NEG:%.*]] = trunc i16 [[T0_NEG]] to i8 -; CHECK-NEXT: [[T2:%.*]] = add i8 [[T1_NEG]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add i8 [[X:%.*]], [[T1_NEG]] ; CHECK-NEXT: ret i8 [[T2]] ; %t0 = shl i16 -42, %y @@ -742,7 +742,7 @@ define i8 @negate_lshr_wrongshift(i8 %x, i8 %y) { define i8 @negate_sext(i8 %x, i1 %y) { ; CHECK-LABEL: @negate_sext( ; CHECK-NEXT: [[T0_NEG:%.*]] = zext i1 [[Y:%.*]] to i8 -; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0_NEG]], [[X:%.*]] +; CHECK-NEXT: [[T1:%.*]] = add i8 [[X:%.*]], [[T0_NEG]] ; CHECK-NEXT: ret i8 [[T1]] ; %t0 = sext i1 %y to i8 @@ -752,7 +752,7 @@ define i8 @negate_sext(i8 %x, i1 %y) { define i8 @negate_zext(i8 %x, i1 %y) { ; CHECK-LABEL: @negate_zext( ; CHECK-NEXT: [[T0_NEG:%.*]] = sext i1 [[Y:%.*]] to i8 -; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0_NEG]], [[X:%.*]] +; CHECK-NEXT: [[T1:%.*]] = add i8 [[X:%.*]], [[T0_NEG]] ; CHECK-NEXT: ret i8 [[T1]] ; %t0 = zext i1 %y to i8 @@ -1009,7 +1009,7 @@ define i8 @negation_of_increment_via_or_with_no_common_bits_set(i8 %x, i8 %y) { ; CHECK-LABEL: @negation_of_increment_via_or_with_no_common_bits_set( ; CHECK-NEXT: [[T0:%.*]] = shl i8 [[Y:%.*]], 1 ; CHECK-NEXT: [[T1_NEG:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = add i8 [[T1_NEG]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add i8 [[X:%.*]], [[T1_NEG]] ; CHECK-NEXT: ret i8 [[T2]] ; %t0 = shl i8 %y, 1 @@ -1312,7 +1312,7 @@ define i8 @negate_nabs(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = sub i8 0, [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X]], i1 false) -; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[T3:%.*]] = add i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[T3]] ; %t0 = sub i8 0, %x diff --git a/llvm/test/Transforms/InstCombine/sub-of-negatible.ll b/llvm/test/Transforms/InstCombine/sub-of-negatible.ll index b2e14ceaca1b0..b19eae4d8f9a4 100644 --- a/llvm/test/Transforms/InstCombine/sub-of-negatible.ll +++ b/llvm/test/Transforms/InstCombine/sub-of-negatible.ll @@ -286,7 +286,7 @@ define i8 @t12(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[T1:%.*]] = sub i8 0, [[Z:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y]], [[Z]] -; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = add i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[T3]] ; %t0 = sub i8 0, %y @@ -320,7 +320,7 @@ define i8 @n14(i8 %x, i8 %y, i8 %z) { ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[Y]], [[Z]] ; CHECK-NEXT: [[T2:%.*]] = sub i8 0, [[TMP1]] ; CHECK-NEXT: call void @use8(i8 [[T2]]) -; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[T3:%.*]] = add i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[T3]] ; %t0 = sub i8 0, %y @@ -423,7 +423,7 @@ define i8 @n16(i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @n16( ; CHECK-NEXT: [[T0:%.*]] = sub i8 0, [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[T1:%.*]] = mul i8 [[T0]], [[Z:%.*]] +; CHECK-NEXT: [[T1:%.*]] = mul i8 [[Z:%.*]], [[T0]] ; CHECK-NEXT: call void @use8(i8 [[T1]]) ; CHECK-NEXT: [[T2:%.*]] = sub i8 [[X:%.*]], [[T1]] ; CHECK-NEXT: ret i8 [[T2]] @@ -559,7 +559,7 @@ define i8 @t20(i8 %x, i16 %y) { ; CHECK-LABEL: @t20( ; CHECK-NEXT: [[T0_NEG:%.*]] = shl i16 42, [[Y:%.*]] ; CHECK-NEXT: [[T1_NEG:%.*]] = trunc i16 [[T0_NEG]] to i8 -; CHECK-NEXT: [[T2:%.*]] = add i8 [[T1_NEG]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add i8 [[X:%.*]], [[T1_NEG]] ; CHECK-NEXT: ret i8 [[T2]] ; %t0 = shl i16 -42, %y @@ -766,7 +766,7 @@ define i8 @negate_lshr_wrongshift(i8 %x, i8 %y) { define i8 @negate_sext(i8 %x, i1 %y) { ; CHECK-LABEL: @negate_sext( ; CHECK-NEXT: [[T0_NEG:%.*]] = zext i1 [[Y:%.*]] to i8 -; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0_NEG]], [[X:%.*]] +; CHECK-NEXT: [[T1:%.*]] = add i8 [[X:%.*]], [[T0_NEG]] ; CHECK-NEXT: ret i8 [[T1]] ; %t0 = sext i1 %y to i8 @@ -776,7 +776,7 @@ define i8 @negate_sext(i8 %x, i1 %y) { define i8 @negate_zext(i8 %x, i1 %y) { ; CHECK-LABEL: @negate_zext( ; CHECK-NEXT: [[T0_NEG:%.*]] = sext i1 [[Y:%.*]] to i8 -; CHECK-NEXT: [[T1:%.*]] = add i8 [[T0_NEG]], [[X:%.*]] +; CHECK-NEXT: [[T1:%.*]] = add i8 [[X:%.*]], [[T0_NEG]] ; CHECK-NEXT: ret i8 [[T1]] ; %t0 = zext i1 %y to i8 @@ -1033,7 +1033,7 @@ define i8 @negation_of_increment_via_or_with_no_common_bits_set(i8 %x, i8 %y) { ; CHECK-LABEL: @negation_of_increment_via_or_with_no_common_bits_set( ; CHECK-NEXT: [[T0:%.*]] = shl i8 [[Y:%.*]], 1 ; CHECK-NEXT: [[T1_NEG:%.*]] = xor i8 [[T0]], -1 -; CHECK-NEXT: [[T2:%.*]] = add i8 [[T1_NEG]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add i8 [[X:%.*]], [[T1_NEG]] ; CHECK-NEXT: ret i8 [[T2]] ; %t0 = shl i8 %y, 1 @@ -1071,7 +1071,7 @@ define i8 @negation_of_increment_via_or_common_bits_set(i8 %x, i8 %y) { define i8 @negation_of_increment_via_or_disjoint(i8 %x, i8 %y) { ; CHECK-LABEL: @negation_of_increment_via_or_disjoint( ; CHECK-NEXT: [[T1_NEG:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[T2:%.*]] = add i8 [[T1_NEG]], [[X:%.*]] +; CHECK-NEXT: [[T2:%.*]] = add i8 [[X:%.*]], [[T1_NEG]] ; CHECK-NEXT: ret i8 [[T2]] ; %t1 = or disjoint i8 %y, 1 @@ -1347,7 +1347,7 @@ define i8 @negate_nabs(i8 %x, i8 %y) { ; CHECK-NEXT: [[T0:%.*]] = sub i8 0, [[X:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) ; CHECK-NEXT: [[TMP1:%.*]] = call i8 @llvm.abs.i8(i8 [[X]], i1 false) -; CHECK-NEXT: [[T3:%.*]] = add i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[T3:%.*]] = add i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i8 [[T3]] ; %t0 = sub i8 0, %x diff --git a/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll b/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll index 461c9b0fb1e0c..acbc29db871e8 100644 --- a/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll +++ b/llvm/test/Transforms/InstCombine/sub-xor-cmp.ll @@ -58,7 +58,7 @@ define i64 @sext_non_bool_xor_sub(i64 %a, i8 %b) { ; CHECK-LABEL: define i64 @sext_non_bool_xor_sub( ; CHECK-SAME: i64 [[A:%.*]], i8 [[B:%.*]]) { ; CHECK-NEXT: [[C:%.*]] = sext i8 [[B]] to i64 -; CHECK-NEXT: [[D:%.*]] = xor i64 [[C]], [[A]] +; CHECK-NEXT: [[D:%.*]] = xor i64 [[A]], [[C]] ; CHECK-NEXT: [[R:%.*]] = sub i64 [[D]], [[C]] ; CHECK-NEXT: ret i64 [[R]] ; @@ -72,7 +72,7 @@ define i64 @sext_non_bool_xor_sub_1(i64 %a, i8 %b) { ; CHECK-LABEL: define i64 @sext_non_bool_xor_sub_1( ; CHECK-SAME: i64 [[A:%.*]], i8 [[B:%.*]]) { ; CHECK-NEXT: [[C:%.*]] = sext i8 [[B]] to i64 -; CHECK-NEXT: [[D:%.*]] = xor i64 [[C]], [[A]] +; CHECK-NEXT: [[D:%.*]] = xor i64 [[A]], [[C]] ; CHECK-NEXT: [[R:%.*]] = sub i64 [[D]], [[C]] ; CHECK-NEXT: ret i64 [[R]] ; @@ -135,9 +135,9 @@ define i64 @xor_multi_uses(i64 %a, i1 %b, i64 %x) { ; CHECK-LABEL: define i64 @xor_multi_uses( ; CHECK-SAME: i64 [[A:%.*]], i1 [[B:%.*]], i64 [[X:%.*]]) { ; CHECK-NEXT: [[C:%.*]] = sext i1 [[B]] to i64 -; CHECK-NEXT: [[D:%.*]] = xor i64 [[C]], [[A]] +; CHECK-NEXT: [[D:%.*]] = xor i64 [[A]], [[C]] ; CHECK-NEXT: [[E:%.*]] = sub i64 [[D]], [[C]] -; CHECK-NEXT: [[F:%.*]] = mul i64 [[D]], [[X]] +; CHECK-NEXT: [[F:%.*]] = mul i64 [[X]], [[D]] ; CHECK-NEXT: [[R:%.*]] = add i64 [[F]], [[E]] ; CHECK-NEXT: ret i64 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/sub.ll b/llvm/test/Transforms/InstCombine/sub.ll index cb308ab66b093..ec88984c49cca 100644 --- a/llvm/test/Transforms/InstCombine/sub.ll +++ b/llvm/test/Transforms/InstCombine/sub.ll @@ -230,7 +230,7 @@ define i32 @test5(i32 %A, i32 %B, i32 %C) { define i32 @test6(i32 %A, i32 %B) { ; CHECK-LABEL: @test6( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1 -; CHECK-NEXT: [[D:%.*]] = and i32 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = and i32 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret i32 [[D]] ; %C = and i32 %A, %B @@ -241,7 +241,7 @@ define i32 @test6(i32 %A, i32 %B) { define i32 @test6commuted(i32 %A, i32 %B) { ; CHECK-LABEL: @test6commuted( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1 -; CHECK-NEXT: [[D:%.*]] = and i32 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = and i32 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret i32 [[D]] ; %C = and i32 %B, %A @@ -686,7 +686,7 @@ define <2 x i32> @test27commutedvecmixed(<2 x i32> %x, <2 x i32> %y) { define i32 @test28(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @test28( ; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[Z:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[SUB:%.*]] = add i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[SUB]] ; %neg = sub i32 0, %z @@ -698,7 +698,7 @@ define i32 @test28(i32 %x, i32 %y, i32 %z) { define i32 @test28commuted(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @test28commuted( ; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[Z:%.*]], [[Y:%.*]] -; CHECK-NEXT: [[SUB:%.*]] = add i32 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i32 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i32 [[SUB]] ; %neg = sub i32 0, %z @@ -893,7 +893,7 @@ define i32 @test45commuted(i32 %x, i32 %y) { define i32 @test46(i32 %x, i32 %y) { ; CHECK-LABEL: @test46( ; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[SUB:%.*]] = and i32 [[X_NOT]], [[Y:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = and i32 [[Y:%.*]], [[X_NOT]] ; CHECK-NEXT: ret i32 [[SUB]] ; %or = or i32 %x, %y @@ -904,7 +904,7 @@ define i32 @test46(i32 %x, i32 %y) { define i32 @test46commuted(i32 %x, i32 %y) { ; CHECK-LABEL: @test46commuted( ; CHECK-NEXT: [[X_NOT:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[SUB:%.*]] = and i32 [[X_NOT]], [[Y:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = and i32 [[Y:%.*]], [[X_NOT]] ; CHECK-NEXT: ret i32 [[SUB]] ; %or = or i32 %y, %x @@ -1368,7 +1368,7 @@ define i32 @test71(i32 %A, i32 %B) { define <2 x i32> @test72(<2 x i32> %A, <2 x i32> %B) { ; CHECK-LABEL: @test72( ; CHECK-NEXT: [[B_NOT:%.*]] = xor <2 x i32> [[B:%.*]], -; CHECK-NEXT: [[D:%.*]] = and <2 x i32> [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[D:%.*]] = and <2 x i32> [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret <2 x i32> [[D]] ; %C = or <2 x i32> %A, %B @@ -1460,7 +1460,7 @@ define i8 @sub_add_sub_reassoc(i8 %w, i8 %x, i8 %y, i8 %z) { define <2 x i8> @sub_add_sub_reassoc_commute(<2 x i8> %w, <2 x i8> %x, <2 x i8> %y, <2 x i8> %z) { ; CHECK-LABEL: @sub_add_sub_reassoc_commute( ; CHECK-NEXT: [[D:%.*]] = sdiv <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[D]], [[W:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[W:%.*]], [[D]] ; CHECK-NEXT: [[TMP2:%.*]] = add <2 x i8> [[X:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[S2:%.*]] = sub <2 x i8> [[TMP1]], [[TMP2]] ; CHECK-NEXT: ret <2 x i8> [[S2]] @@ -1478,7 +1478,7 @@ define i8 @sub_add_sub_reassoc_twice(i8 %v, i8 %w, i8 %x, i8 %y, i8 %z) { ; CHECK-LABEL: @sub_add_sub_reassoc_twice( ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[W:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = add i8 [[X:%.*]], [[V:%.*]] -; CHECK-NEXT: [[TMP3:%.*]] = add i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = add i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: [[S3:%.*]] = sub i8 [[TMP2]], [[TMP3]] ; CHECK-NEXT: ret i8 [[S3]] ; @@ -2044,7 +2044,7 @@ define i16 @urem_zext_noundef(i8 noundef %x, i8 %y) { define i8 @mul_sub_common_factor_commute1(i8 %x, i8 %y) { ; CHECK-LABEL: @mul_sub_common_factor_commute1( ; CHECK-NEXT: [[X1:%.*]] = add i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[A:%.*]] = mul i8 [[X1]], [[X:%.*]] +; CHECK-NEXT: [[A:%.*]] = mul i8 [[X:%.*]], [[X1]] ; CHECK-NEXT: ret i8 [[A]] ; %m = mul nsw i8 %x, %y @@ -2070,7 +2070,7 @@ define <2 x i8> @mul_sub_common_factor_commute2(<2 x i8> %x, <2 x i8> %y) { define i8 @mul_sub_common_factor_commute3(i8 %x, i8 %y) { ; CHECK-LABEL: @mul_sub_common_factor_commute3( ; CHECK-NEXT: [[M1:%.*]] = sub i8 1, [[Y:%.*]] -; CHECK-NEXT: [[A:%.*]] = mul i8 [[M1]], [[X:%.*]] +; CHECK-NEXT: [[A:%.*]] = mul i8 [[X:%.*]], [[M1]] ; CHECK-NEXT: ret i8 [[A]] ; %m = mul nuw i8 %x, %y @@ -2081,7 +2081,7 @@ define i8 @mul_sub_common_factor_commute3(i8 %x, i8 %y) { define i8 @mul_sub_common_factor_commute4(i8 %x, i8 %y) { ; CHECK-LABEL: @mul_sub_common_factor_commute4( ; CHECK-NEXT: [[M1:%.*]] = sub i8 1, [[Y:%.*]] -; CHECK-NEXT: [[A:%.*]] = mul i8 [[M1]], [[X:%.*]] +; CHECK-NEXT: [[A:%.*]] = mul i8 [[X:%.*]], [[M1]] ; CHECK-NEXT: ret i8 [[A]] ; %m = mul nsw i8 %y, %x @@ -2734,7 +2734,7 @@ if.else: define i1 @sub_infer_nuw_from_domcond_fold3(i16 %xx, i32 range(i32 0, 12) %y) { ; CHECK-LABEL: @sub_infer_nuw_from_domcond_fold3( ; CHECK-NEXT: [[X:%.*]] = zext i16 [[XX:%.*]] to i32 -; CHECK-NEXT: [[COND:%.*]] = icmp ult i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[COND:%.*]] = icmp ugt i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: br i1 [[COND]], label [[IF_THEN:%.*]], label [[IF_ELSE:%.*]] ; CHECK: if.then: ; CHECK-NEXT: ret i1 false diff --git a/llvm/test/Transforms/InstCombine/trunc-binop-ext.ll b/llvm/test/Transforms/InstCombine/trunc-binop-ext.ll index e3103906911af..4593730b8809f 100644 --- a/llvm/test/Transforms/InstCombine/trunc-binop-ext.ll +++ b/llvm/test/Transforms/InstCombine/trunc-binop-ext.ll @@ -5,7 +5,7 @@ define i16 @narrow_sext_and(i16 %x16, i32 %y32) { ; CHECK-LABEL: define i16 @narrow_sext_and( ; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16 -; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[X16]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %x32 = sext i16 %x16 to i32 @@ -18,7 +18,7 @@ define i16 @narrow_zext_and(i16 %x16, i32 %y32) { ; CHECK-LABEL: define i16 @narrow_zext_and( ; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16 -; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = and i16 [[X16]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %x32 = zext i16 %x16 to i32 @@ -31,7 +31,7 @@ define i16 @narrow_sext_or(i16 %x16, i32 %y32) { ; CHECK-LABEL: define i16 @narrow_sext_or( ; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16 -; CHECK-NEXT: [[R:%.*]] = or i16 [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = or i16 [[X16]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %x32 = sext i16 %x16 to i32 @@ -44,7 +44,7 @@ define i16 @narrow_zext_or(i16 %x16, i32 %y32) { ; CHECK-LABEL: define i16 @narrow_zext_or( ; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16 -; CHECK-NEXT: [[R:%.*]] = or i16 [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = or i16 [[X16]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %x32 = zext i16 %x16 to i32 @@ -57,7 +57,7 @@ define i16 @narrow_sext_xor(i16 %x16, i32 %y32) { ; CHECK-LABEL: define i16 @narrow_sext_xor( ; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16 -; CHECK-NEXT: [[R:%.*]] = xor i16 [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = xor i16 [[X16]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %x32 = sext i16 %x16 to i32 @@ -70,7 +70,7 @@ define i16 @narrow_zext_xor(i16 %x16, i32 %y32) { ; CHECK-LABEL: define i16 @narrow_zext_xor( ; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16 -; CHECK-NEXT: [[R:%.*]] = xor i16 [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = xor i16 [[X16]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %x32 = zext i16 %x16 to i32 @@ -83,7 +83,7 @@ define i16 @narrow_sext_add(i16 %x16, i32 %y32) { ; CHECK-LABEL: define i16 @narrow_sext_add( ; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16 -; CHECK-NEXT: [[R:%.*]] = add i16 [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = add i16 [[X16]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %x32 = sext i16 %x16 to i32 @@ -96,7 +96,7 @@ define i16 @narrow_zext_add(i16 %x16, i32 %y32) { ; CHECK-LABEL: define i16 @narrow_zext_add( ; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16 -; CHECK-NEXT: [[R:%.*]] = add i16 [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = add i16 [[X16]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %x32 = zext i16 %x16 to i32 @@ -135,7 +135,7 @@ define i16 @narrow_sext_mul(i16 %x16, i32 %y32) { ; CHECK-LABEL: define i16 @narrow_sext_mul( ; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16 -; CHECK-NEXT: [[R:%.*]] = mul i16 [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = mul i16 [[X16]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %x32 = sext i16 %x16 to i32 @@ -148,7 +148,7 @@ define i16 @narrow_zext_mul(i16 %x16, i32 %y32) { ; CHECK-LABEL: define i16 @narrow_zext_mul( ; CHECK-SAME: i16 [[X16:%.*]], i32 [[Y32:%.*]]) { ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[Y32]] to i16 -; CHECK-NEXT: [[R:%.*]] = mul i16 [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = mul i16 [[X16]], [[TMP1]] ; CHECK-NEXT: ret i16 [[R]] ; %x32 = zext i16 %x16 to i32 @@ -165,7 +165,7 @@ define <2 x i16> @narrow_sext_and_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) { ; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> -; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[X16]], [[TMP1]] ; CHECK-NEXT: ret <2 x i16> [[R]] ; %y32op0 = sdiv <2 x i32> %y32, @@ -180,7 +180,7 @@ define <2 x i16> @narrow_zext_and_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) { ; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> -; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = and <2 x i16> [[X16]], [[TMP1]] ; CHECK-NEXT: ret <2 x i16> [[R]] ; %y32op0 = sdiv <2 x i32> %y32, @@ -195,7 +195,7 @@ define <2 x i16> @narrow_sext_or_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) { ; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> -; CHECK-NEXT: [[R:%.*]] = or <2 x i16> [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = or <2 x i16> [[X16]], [[TMP1]] ; CHECK-NEXT: ret <2 x i16> [[R]] ; %y32op0 = sdiv <2 x i32> %y32, @@ -210,7 +210,7 @@ define <2 x i16> @narrow_zext_or_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) { ; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> -; CHECK-NEXT: [[R:%.*]] = or <2 x i16> [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = or <2 x i16> [[X16]], [[TMP1]] ; CHECK-NEXT: ret <2 x i16> [[R]] ; %y32op0 = sdiv <2 x i32> %y32, @@ -225,7 +225,7 @@ define <2 x i16> @narrow_sext_xor_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) { ; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> -; CHECK-NEXT: [[R:%.*]] = xor <2 x i16> [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = xor <2 x i16> [[X16]], [[TMP1]] ; CHECK-NEXT: ret <2 x i16> [[R]] ; %y32op0 = sdiv <2 x i32> %y32, @@ -240,7 +240,7 @@ define <2 x i16> @narrow_zext_xor_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) { ; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> -; CHECK-NEXT: [[R:%.*]] = xor <2 x i16> [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = xor <2 x i16> [[X16]], [[TMP1]] ; CHECK-NEXT: ret <2 x i16> [[R]] ; %y32op0 = sdiv <2 x i32> %y32, @@ -255,7 +255,7 @@ define <2 x i16> @narrow_sext_add_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) { ; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> -; CHECK-NEXT: [[R:%.*]] = add <2 x i16> [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = add <2 x i16> [[X16]], [[TMP1]] ; CHECK-NEXT: ret <2 x i16> [[R]] ; %y32op0 = sdiv <2 x i32> %y32, @@ -270,7 +270,7 @@ define <2 x i16> @narrow_zext_add_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) { ; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> -; CHECK-NEXT: [[R:%.*]] = add <2 x i16> [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = add <2 x i16> [[X16]], [[TMP1]] ; CHECK-NEXT: ret <2 x i16> [[R]] ; %y32op0 = sdiv <2 x i32> %y32, @@ -315,7 +315,7 @@ define <2 x i16> @narrow_sext_mul_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) { ; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> -; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[X16]], [[TMP1]] ; CHECK-NEXT: ret <2 x i16> [[R]] ; %y32op0 = sdiv <2 x i32> %y32, @@ -330,7 +330,7 @@ define <2 x i16> @narrow_zext_mul_commute(<2 x i16> %x16, <2 x i32> %y32) { ; CHECK-SAME: <2 x i16> [[X16:%.*]], <2 x i32> [[Y32:%.*]]) { ; CHECK-NEXT: [[Y32OP0:%.*]] = sdiv <2 x i32> [[Y32]], ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[Y32OP0]] to <2 x i16> -; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[TMP1]], [[X16]] +; CHECK-NEXT: [[R:%.*]] = mul <2 x i16> [[X16]], [[TMP1]] ; CHECK-NEXT: ret <2 x i16> [[R]] ; %y32op0 = sdiv <2 x i32> %y32, diff --git a/llvm/test/Transforms/InstCombine/uaddo.ll b/llvm/test/Transforms/InstCombine/uaddo.ll index c638c0adef055..9b56dce8b4585 100644 --- a/llvm/test/Transforms/InstCombine/uaddo.ll +++ b/llvm/test/Transforms/InstCombine/uaddo.ll @@ -5,7 +5,7 @@ define i32 @uaddo_commute1(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @uaddo_commute1( ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]] -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[Z:%.*]], i32 [[A]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -20,7 +20,7 @@ define <2 x i32> @uaddo_commute2(<2 x i32> %x, <2 x i32> %y, <2 x i32> %z) { ; CHECK-LABEL: @uaddo_commute2( ; CHECK-NEXT: [[NOTY:%.*]] = xor <2 x i32> [[Y:%.*]], ; CHECK-NEXT: [[A:%.*]] = add <2 x i32> [[Y]], [[X:%.*]] -; CHECK-NEXT: [[C:%.*]] = icmp ult <2 x i32> [[NOTY]], [[X]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt <2 x i32> [[X]], [[NOTY]] ; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[C]], <2 x i32> [[Z:%.*]], <2 x i32> [[A]] ; CHECK-NEXT: ret <2 x i32> [[R]] ; @@ -35,7 +35,7 @@ define i32 @uaddo_commute3(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @uaddo_commute3( ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]] -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[Z:%.*]], i32 [[A]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -50,7 +50,7 @@ define i32 @uaddo_commute4(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @uaddo_commute4( ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[A:%.*]] = add i32 [[Y]], [[X:%.*]] -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[Z:%.*]], i32 [[A]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -65,7 +65,7 @@ define i32 @uaddo_commute5(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @uaddo_commute5( ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]] -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 [[Z:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -80,7 +80,7 @@ define i32 @uaddo_commute6(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @uaddo_commute6( ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[A:%.*]] = add i32 [[Y]], [[X:%.*]] -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 [[Z:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -95,7 +95,7 @@ define i32 @uaddo_commute7(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @uaddo_commute7( ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]] -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 [[Z:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -110,7 +110,7 @@ define i32 @uaddo_commute8(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @uaddo_commute8( ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[A:%.*]] = add i32 [[Y]], [[X:%.*]] -; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[NOTY]], [[X]] +; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[X]], [[NOTY]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[A]], i32 [[Z:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -125,7 +125,7 @@ define i32 @uaddo_wrong_pred1(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @uaddo_wrong_pred1( ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]] -; CHECK-NEXT: [[C:%.*]] = icmp ugt i32 [[NOTY]], [[X]] +; CHECK-NEXT: [[C:%.*]] = icmp ult i32 [[X]], [[NOTY]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C]], i32 [[Z:%.*]], i32 [[A]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -140,7 +140,7 @@ define i32 @uaddo_wrong_pred2(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @uaddo_wrong_pred2( ; CHECK-NEXT: [[NOTY:%.*]] = xor i32 [[Y:%.*]], -1 ; CHECK-NEXT: [[A:%.*]] = add i32 [[X:%.*]], [[Y]] -; CHECK-NEXT: [[C_NOT:%.*]] = icmp ugt i32 [[NOTY]], [[X]] +; CHECK-NEXT: [[C_NOT:%.*]] = icmp ult i32 [[X]], [[NOTY]] ; CHECK-NEXT: [[R:%.*]] = select i1 [[C_NOT]], i32 [[A]], i32 [[Z:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/umax-icmp.ll b/llvm/test/Transforms/InstCombine/umax-icmp.ll index 9946f3c390f0f..b4eea30bfc6af 100644 --- a/llvm/test/Transforms/InstCombine/umax-icmp.ll +++ b/llvm/test/Transforms/InstCombine/umax-icmp.ll @@ -95,7 +95,7 @@ define i1 @ule_umax2(i32 %x, i32 %y) { define i1 @ule_umax3(i32 %a, i32 %y) { ; CHECK-LABEL: @ule_umax3( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -110,7 +110,7 @@ define i1 @ule_umax3(i32 %a, i32 %y) { define i1 @ule_umax4(i32 %a, i32 %y) { ; CHECK-LABEL: @ule_umax4( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -207,7 +207,7 @@ define i1 @ugt_umax2(i32 %x, i32 %y) { define i1 @ugt_umax3(i32 %a, i32 %y) { ; CHECK-LABEL: @ugt_umax3( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -222,7 +222,7 @@ define i1 @ugt_umax3(i32 %a, i32 %y) { define i1 @ugt_umax4(i32 %a, i32 %y) { ; CHECK-LABEL: @ugt_umax4( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization diff --git a/llvm/test/Transforms/InstCombine/umin-icmp.ll b/llvm/test/Transforms/InstCombine/umin-icmp.ll index da901c6c5e484..cb23b2f00d292 100644 --- a/llvm/test/Transforms/InstCombine/umin-icmp.ll +++ b/llvm/test/Transforms/InstCombine/umin-icmp.ll @@ -95,7 +95,7 @@ define i1 @uge_umin2(i32 %x, i32 %y) { define i1 @uge_umin3(i32 %a, i32 %y) { ; CHECK-LABEL: @uge_umin3( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -110,7 +110,7 @@ define i1 @uge_umin3(i32 %a, i32 %y) { define i1 @uge_umin4(i32 %a, i32 %y) { ; CHECK-LABEL: @uge_umin4( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp ule i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp uge i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -207,7 +207,7 @@ define i1 @ult_umin2(i32 %x, i32 %y) { define i1 @ult_umin3(i32 %a, i32 %y) { ; CHECK-LABEL: @ult_umin3( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization @@ -222,7 +222,7 @@ define i1 @ult_umin3(i32 %a, i32 %y) { define i1 @ult_umin4(i32 %a, i32 %y) { ; CHECK-LABEL: @ult_umin4( ; CHECK-NEXT: [[X:%.*]] = add i32 [[A:%.*]], 3 -; CHECK-NEXT: [[CMP2:%.*]] = icmp ugt i32 [[X]], [[Y:%.*]] +; CHECK-NEXT: [[CMP2:%.*]] = icmp ult i32 [[Y:%.*]], [[X]] ; CHECK-NEXT: ret i1 [[CMP2]] ; %x = add i32 %a, 3 ; thwart complexity-based canonicalization diff --git a/llvm/test/Transforms/InstCombine/unordered-compare-and-ordered.ll b/llvm/test/Transforms/InstCombine/unordered-compare-and-ordered.ll index 8ab1f130f1cda..ec015e8ad2aaa 100644 --- a/llvm/test/Transforms/InstCombine/unordered-compare-and-ordered.ll +++ b/llvm/test/Transforms/InstCombine/unordered-compare-and-ordered.ll @@ -360,7 +360,7 @@ define i1 @fcmp_ord_and_fneg_ueq(half %x, half %y) { ; CHECK-LABEL: @fcmp_ord_and_fneg_ueq( ; CHECK-NEXT: [[FNEG_X:%.*]] = fneg half [[X:%.*]] ; CHECK-NEXT: [[ORD:%.*]] = fcmp ord half [[X]], 0xH0000 -; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[FNEG_X]], [[Y:%.*]] +; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[Y:%.*]], [[FNEG_X]] ; CHECK-NEXT: [[AND:%.*]] = and i1 [[ORD]], [[UEQ]] ; CHECK-NEXT: ret i1 [[AND]] ; @@ -389,7 +389,7 @@ define i1 @fcmp_ord_fneg_and_fneg_ueq(half %x, half %y) { ; CHECK-LABEL: @fcmp_ord_fneg_and_fneg_ueq( ; CHECK-NEXT: [[FNEG_X:%.*]] = fneg half [[X:%.*]] ; CHECK-NEXT: [[ORD:%.*]] = fcmp ord half [[X]], 0xH0000 -; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[FNEG_X]], [[Y:%.*]] +; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[Y:%.*]], [[FNEG_X]] ; CHECK-NEXT: [[AND:%.*]] = and i1 [[ORD]], [[UEQ]] ; CHECK-NEXT: ret i1 [[AND]] ; @@ -405,7 +405,7 @@ define i1 @fcmp_ord_and_fneg_fabs_ueq(half %x, half %y) { ; CHECK-NEXT: [[FABS_X:%.*]] = call half @llvm.fabs.f16(half [[X:%.*]]) ; CHECK-NEXT: [[FNEG_FABS_X:%.*]] = fneg half [[FABS_X]] ; CHECK-NEXT: [[ORD:%.*]] = fcmp ord half [[X]], 0xH0000 -; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[FNEG_FABS_X]], [[Y:%.*]] +; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[Y:%.*]], [[FNEG_FABS_X]] ; CHECK-NEXT: [[AND:%.*]] = and i1 [[ORD]], [[UEQ]] ; CHECK-NEXT: ret i1 [[AND]] ; @@ -451,7 +451,7 @@ define i1 @fcmp_ord_and_copysign_ueq_commute(half %x, half %y, half %z) { ; CHECK-LABEL: @fcmp_ord_and_copysign_ueq_commute( ; CHECK-NEXT: [[COPYSIGN_X_Y:%.*]] = call half @llvm.copysign.f16(half [[X:%.*]], half [[Z:%.*]]) ; CHECK-NEXT: [[ORD:%.*]] = fcmp ord half [[X]], 0xH0000 -; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[COPYSIGN_X_Y]], [[Y:%.*]] +; CHECK-NEXT: [[UEQ:%.*]] = fcmp ueq half [[Y:%.*]], [[COPYSIGN_X_Y]] ; CHECK-NEXT: [[AND:%.*]] = and i1 [[ORD]], [[UEQ]] ; CHECK-NEXT: ret i1 [[AND]] ; diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll index c5be9a7b769ce..5a0d283ff8bb6 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-add.ll @@ -108,7 +108,7 @@ define i1 @t5_commutative(i8 %x) { define i1 @t6_no_extrause(i8 %x, i8 %y) { ; CHECK-LABEL: @t6_no_extrause( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = add i8 %x, %y diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll index 1b41f609705ef..17b32670ae9d7 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check-via-xor.ll @@ -15,7 +15,7 @@ define i1 @t0_basic(i8 %x, i8 %y) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -28,7 +28,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @t1_vec( ; CHECK-NEXT: [[T0:%.*]] = xor <2 x i8> [[Y:%.*]], ; CHECK-NEXT: call void @use2x8(<2 x i8> [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp uge <2 x i8> [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule <2 x i8> [[X:%.*]], [[T0]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; %t0 = xor <2 x i8> %y, @@ -61,7 +61,7 @@ define i1 @t2_commutative(i8 %y) { define i1 @t3_no_extrause(i8 %x, i8 %y) { ; CHECK-LABEL: @t3_no_extrause( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -75,7 +75,7 @@ define i1 @n4_wrong_pred0(i8 %x, i8 %y) { ; CHECK-LABEL: @n4_wrong_pred0( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -88,7 +88,7 @@ define i1 @n5_wrong_pred1(i8 %x, i8 %y) { ; CHECK-LABEL: @n5_wrong_pred1( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -101,7 +101,7 @@ define i1 @n6_wrong_pred2(i8 %x, i8 %y) { ; CHECK-LABEL: @n6_wrong_pred2( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -114,7 +114,7 @@ define i1 @n7_wrong_pred3(i8 %x, i8 %y) { ; CHECK-LABEL: @n7_wrong_pred3( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -127,7 +127,7 @@ define i1 @n8_wrong_pred4(i8 %x, i8 %y) { ; CHECK-LABEL: @n8_wrong_pred4( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -140,7 +140,7 @@ define i1 @n9_wrong_pred5(i8 %x, i8 %y) { ; CHECK-LABEL: @n9_wrong_pred5( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -153,7 +153,7 @@ define i1 @n10_wrong_pred6(i8 %x, i8 %y) { ; CHECK-LABEL: @n10_wrong_pred6( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -166,7 +166,7 @@ define i1 @n11_wrong_pred7(i8 %x, i8 %y) { ; CHECK-LABEL: @n11_wrong_pred7( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll index e7120a7d01cfa..677ef47456c01 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-add-lack-of-overflow-check.ll @@ -11,7 +11,7 @@ define i1 @t0_basic(i8 %x, i8 %y) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = add i8 %x, %y @@ -22,7 +22,7 @@ define i1 @t0_basic(i8 %x, i8 %y) { define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @t1_vec( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[R:%.*]] = icmp uge <2 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule <2 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; %t0 = add <2 x i8> %x, %y @@ -35,7 +35,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { define i1 @t2_symmetry(i8 %x, i8 %y) { ; CHECK-LABEL: @t2_symmetry( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = add i8 %x, %y @@ -49,7 +49,7 @@ define i1 @t3_commutative(i8 %x) { ; CHECK-LABEL: @t3_commutative( ; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %y = call i8 @gen8() @@ -61,7 +61,7 @@ define i1 @t3_commutative(i8 %x) { define i1 @t4_commutative(i8 %x, i8 %y) { ; CHECK-LABEL: @t4_commutative( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = add i8 %x, %y @@ -73,7 +73,7 @@ define i1 @t5_commutative(i8 %x) { ; CHECK-LABEL: @t5_commutative( ; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %y = call i8 @gen8() diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll index 23b89b7c1e65f..bfdcb8343f2d9 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-add.ll @@ -75,7 +75,7 @@ define i1 @t4_commutative(i8 %x, i8 %y) { ; CHECK-LABEL: @t4_commutative( ; CHECK-NEXT: [[T0:%.*]] = add i8 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[Y]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Y]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = add i8 %x, %y @@ -104,7 +104,7 @@ define i1 @t5_commutative(i8 %x) { define i1 @t6_no_extrause(i8 %x, i8 %y) { ; CHECK-LABEL: @t6_no_extrause( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = add i8 %x, %y diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll index 646bd635807a7..457a0e594b630 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check-via-xor.ll @@ -15,7 +15,7 @@ define i1 @t0_basic(i8 %x, i8 %y) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -28,7 +28,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @t1_vec( ; CHECK-NEXT: [[T0:%.*]] = xor <2 x i8> [[Y:%.*]], ; CHECK-NEXT: call void @use2x8(<2 x i8> [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i8> [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt <2 x i8> [[X:%.*]], [[T0]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; %t0 = xor <2 x i8> %y, @@ -61,7 +61,7 @@ define i1 @t2_commutative(i8 %y) { define i1 @t3_no_extrause(i8 %x, i8 %y) { ; CHECK-LABEL: @t3_no_extrause( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -75,7 +75,7 @@ define i1 @n4_wrong_pred0(i8 %x, i8 %y) { ; CHECK-LABEL: @n4_wrong_pred0( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -88,7 +88,7 @@ define i1 @n5_wrong_pred1(i8 %x, i8 %y) { ; CHECK-LABEL: @n5_wrong_pred1( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -101,7 +101,7 @@ define i1 @n6_wrong_pred2(i8 %x, i8 %y) { ; CHECK-LABEL: @n6_wrong_pred2( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -114,7 +114,7 @@ define i1 @n7_wrong_pred3(i8 %x, i8 %y) { ; CHECK-LABEL: @n7_wrong_pred3( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ne i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -127,7 +127,7 @@ define i1 @n8_wrong_pred4(i8 %x, i8 %y) { ; CHECK-LABEL: @n8_wrong_pred4( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -140,7 +140,7 @@ define i1 @n9_wrong_pred5(i8 %x, i8 %y) { ; CHECK-LABEL: @n9_wrong_pred5( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -153,7 +153,7 @@ define i1 @n10_wrong_pred6(i8 %x, i8 %y) { ; CHECK-LABEL: @n10_wrong_pred6( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp slt i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 @@ -166,7 +166,7 @@ define i1 @n11_wrong_pred7(i8 %x, i8 %y) { ; CHECK-LABEL: @n11_wrong_pred7( ; CHECK-NEXT: [[T0:%.*]] = xor i8 [[Y:%.*]], -1 ; CHECK-NEXT: call void @use8(i8 [[T0]]) -; CHECK-NEXT: [[R:%.*]] = icmp sge i8 [[T0]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp sle i8 [[X:%.*]], [[T0]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = xor i8 %y, -1 diff --git a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll index 3533c6a54a22a..94966a1eba328 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-add-overflow-check.ll @@ -11,7 +11,7 @@ define i1 @t0_basic(i8 %x, i8 %y) { ; CHECK-LABEL: @t0_basic( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = add i8 %x, %y @@ -22,7 +22,7 @@ define i1 @t0_basic(i8 %x, i8 %y) { define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @t1_vec( ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i8> [[Y:%.*]], -; CHECK-NEXT: [[R:%.*]] = icmp ult <2 x i8> [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt <2 x i8> [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret <2 x i1> [[R]] ; %t0 = add <2 x i8> %x, %y @@ -35,7 +35,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { define i1 @t2_symmetry(i8 %x, i8 %y) { ; CHECK-LABEL: @t2_symmetry( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[X:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[Y:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Y:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = add i8 %x, %y @@ -49,7 +49,7 @@ define i1 @t3_commutative(i8 %x) { ; CHECK-LABEL: @t3_commutative( ; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %y = call i8 @gen8() @@ -61,7 +61,7 @@ define i1 @t3_commutative(i8 %x) { define i1 @t4_commutative(i8 %x, i8 %y) { ; CHECK-LABEL: @t4_commutative( ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y:%.*]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = add i8 %x, %y @@ -73,7 +73,7 @@ define i1 @t5_commutative(i8 %x) { ; CHECK-LABEL: @t5_commutative( ; CHECK-NEXT: [[Y:%.*]] = call i8 @gen8() ; CHECK-NEXT: [[TMP1:%.*]] = xor i8 [[Y]], -1 -; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[TMP1]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[X:%.*]], [[TMP1]] ; CHECK-NEXT: ret i1 [[R]] ; %y = call i8 @gen8() diff --git a/llvm/test/Transforms/InstCombine/unsigned-sub-lack-of-overflow-check.ll b/llvm/test/Transforms/InstCombine/unsigned-sub-lack-of-overflow-check.ll index 500d61ac1b111..e844b321830a1 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-sub-lack-of-overflow-check.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-sub-lack-of-overflow-check.ll @@ -30,7 +30,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { define i1 @t2_commutative(i8 %x, i8 %y) { ; CHECK-LABEL: @t2_commutative( -; CHECK-NEXT: [[R:%.*]] = icmp ule i8 [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp uge i8 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = sub i8 %x, %y diff --git a/llvm/test/Transforms/InstCombine/unsigned-sub-overflow-check.ll b/llvm/test/Transforms/InstCombine/unsigned-sub-overflow-check.ll index 5b273026dafe7..5f37b1d962345 100644 --- a/llvm/test/Transforms/InstCombine/unsigned-sub-overflow-check.ll +++ b/llvm/test/Transforms/InstCombine/unsigned-sub-overflow-check.ll @@ -30,7 +30,7 @@ define <2 x i1> @t1_vec(<2 x i8> %x, <2 x i8> %y) { define i1 @t2_commutative(i8 %x, i8 %y) { ; CHECK-LABEL: @t2_commutative( -; CHECK-NEXT: [[R:%.*]] = icmp ugt i8 [[Y:%.*]], [[X:%.*]] +; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[X:%.*]], [[Y:%.*]] ; CHECK-NEXT: ret i1 [[R]] ; %t0 = sub i8 %x, %y diff --git a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll index a6d083276cbb5..1fd7903307cef 100644 --- a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll +++ b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll @@ -1163,7 +1163,7 @@ define i4 @common_binop_demand_via_extelt_op0_mismatch_elt1(<2 x i4> %x, <2 x i4 define <2 x i8> @common_binop_demand_via_splat_mask_poison(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @common_binop_demand_via_splat_mask_poison( ; CHECK-NEXT: [[YSPLAT:%.*]] = shufflevector <2 x i8> [[Y:%.*]], <2 x i8> poison, <2 x i32> -; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[YSPLAT]], [[X:%.*]] +; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[X:%.*]], [[YSPLAT]] ; CHECK-NEXT: [[MSPLAT:%.*]] = shufflevector <2 x i8> [[VV]], <2 x i8> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[RES:%.*]] = add <2 x i8> [[VV]], [[MSPLAT]] ; CHECK-NEXT: ret <2 x i8> [[RES]] @@ -1179,7 +1179,7 @@ define <2 x i8> @common_binop_demand_via_splat_mask_poison(<2 x i8> %x, <2 x i8> define <2 x i8> @common_binop_demand_via_splat_mask_poison_2(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @common_binop_demand_via_splat_mask_poison_2( ; CHECK-NEXT: [[YSPLAT:%.*]] = shufflevector <2 x i8> [[Y:%.*]], <2 x i8> poison, <2 x i32> -; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[YSPLAT]], [[X:%.*]] +; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[X:%.*]], [[YSPLAT]] ; CHECK-NEXT: [[M:%.*]] = add <2 x i8> [[X]], [[Y]] ; CHECK-NEXT: [[MSPLAT:%.*]] = shufflevector <2 x i8> [[M]], <2 x i8> [[Y]], <2 x i32> ; CHECK-NEXT: [[RES:%.*]] = add <2 x i8> [[VV]], [[MSPLAT]] @@ -1196,7 +1196,7 @@ define <2 x i8> @common_binop_demand_via_splat_mask_poison_2(<2 x i8> %x, <2 x i define <2 x i8> @common_binop_demand_via_splat_mask_poison_3(<2 x i8> %x, <2 x i8> %y) { ; CHECK-LABEL: @common_binop_demand_via_splat_mask_poison_3( ; CHECK-NEXT: [[YSPLAT:%.*]] = shufflevector <2 x i8> [[Y:%.*]], <2 x i8> poison, <2 x i32> -; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[YSPLAT]], [[X:%.*]] +; CHECK-NEXT: [[VV:%.*]] = add <2 x i8> [[X:%.*]], [[YSPLAT]] ; CHECK-NEXT: [[M:%.*]] = add <2 x i8> [[X]], [[Y]] ; CHECK-NEXT: [[MSPLAT:%.*]] = shufflevector <2 x i8> [[M]], <2 x i8> poison, <2 x i32> zeroinitializer ; CHECK-NEXT: [[RES:%.*]] = add <2 x i8> [[VV]], [[MSPLAT]] diff --git a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll index a9cdc8bd20247..0f233fbb4729e 100644 --- a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll +++ b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll @@ -1611,7 +1611,7 @@ define <2 x float> @splat_assoc_fmul(<2 x float> %x, <2 x float> %y) { define <3 x i8> @splat_assoc_mul(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) { ; CHECK-LABEL: @splat_assoc_mul( -; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]] ; CHECK-NEXT: ret <3 x i8> [[R]] @@ -1625,7 +1625,7 @@ define <3 x i8> @splat_assoc_mul(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) { define <3 x i8> @splat_assoc_mul_undef_elt1(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) { ; CHECK-LABEL: @splat_assoc_mul_undef_elt1( -; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]] ; CHECK-NEXT: ret <3 x i8> [[R]] @@ -1641,7 +1641,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt2(<3 x i8> %x, <3 x i8> %y, <3 x i8> % ; CHECK-LABEL: @splat_assoc_mul_undef_elt2( ; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> -; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[SPLATZ]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[Y:%.*]], [[SPLATZ]] ; CHECK-NEXT: [[R:%.*]] = mul nuw nsw <3 x i8> [[A]], [[SPLATX]] ; CHECK-NEXT: ret <3 x i8> [[R]] ; @@ -1654,7 +1654,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt2(<3 x i8> %x, <3 x i8> %y, <3 x i8> % define <3 x i8> @splat_assoc_mul_undef_elt_at_splat_index1(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) { ; CHECK-LABEL: @splat_assoc_mul_undef_elt_at_splat_index1( -; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]] ; CHECK-NEXT: ret <3 x i8> [[R]] @@ -1670,7 +1670,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt_at_splat_index2(<3 x i8> %x, <3 x i8> ; CHECK-LABEL: @splat_assoc_mul_undef_elt_at_splat_index2( ; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> -; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[SPLATZ]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[Y:%.*]], [[SPLATZ]] ; CHECK-NEXT: [[R:%.*]] = mul nuw nsw <3 x i8> [[A]], [[SPLATX]] ; CHECK-NEXT: ret <3 x i8> [[R]] ; @@ -1687,7 +1687,7 @@ define <3 x i8> @splat_assoc_or(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) { ; CHECK-LABEL: @splat_assoc_or( ; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> -; CHECK-NEXT: [[A:%.*]] = or <3 x i8> [[SPLATZ]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = or <3 x i8> [[Y:%.*]], [[SPLATZ]] ; CHECK-NEXT: [[R:%.*]] = or <3 x i8> [[A]], [[SPLATX]] ; CHECK-NEXT: ret <3 x i8> [[R]] ; @@ -1750,7 +1750,7 @@ define <3 x i32> @splat_assoc_and(<4 x i32> %x, <3 x i32> %y) { define <5 x i32> @splat_assoc_xor(<4 x i32> %x, <5 x i32> %y) { ; CHECK-LABEL: @splat_assoc_xor( ; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <5 x i32> zeroinitializer -; CHECK-NEXT: [[TMP1:%.*]] = xor <5 x i32> [[SPLATX]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor <5 x i32> [[Y:%.*]], [[SPLATX]] ; CHECK-NEXT: [[R:%.*]] = xor <5 x i32> [[TMP1]], ; CHECK-NEXT: ret <5 x i32> [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/vec_shuffle.ll b/llvm/test/Transforms/InstCombine/vec_shuffle.ll index 8c91efb473fae..75a84e51279b8 100644 --- a/llvm/test/Transforms/InstCombine/vec_shuffle.ll +++ b/llvm/test/Transforms/InstCombine/vec_shuffle.ll @@ -1616,7 +1616,7 @@ define <2 x float> @splat_assoc_fmul(<2 x float> %x, <2 x float> %y) { define <3 x i8> @splat_assoc_mul(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) { ; CHECK-LABEL: @splat_assoc_mul( -; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]] ; CHECK-NEXT: ret <3 x i8> [[R]] @@ -1630,7 +1630,7 @@ define <3 x i8> @splat_assoc_mul(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) { define <3 x i8> @splat_assoc_mul_undef_elt1(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) { ; CHECK-LABEL: @splat_assoc_mul_undef_elt1( -; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]] ; CHECK-NEXT: ret <3 x i8> [[R]] @@ -1646,7 +1646,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt2(<3 x i8> %x, <3 x i8> %y, <3 x i8> % ; CHECK-LABEL: @splat_assoc_mul_undef_elt2( ; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> -; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[SPLATZ]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[Y:%.*]], [[SPLATZ]] ; CHECK-NEXT: [[R:%.*]] = mul nuw nsw <3 x i8> [[A]], [[SPLATX]] ; CHECK-NEXT: ret <3 x i8> [[R]] ; @@ -1659,7 +1659,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt2(<3 x i8> %x, <3 x i8> %y, <3 x i8> % define <3 x i8> @splat_assoc_mul_undef_elt_at_splat_index1(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) { ; CHECK-LABEL: @splat_assoc_mul_undef_elt_at_splat_index1( -; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[Z:%.*]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = mul <3 x i8> [[X:%.*]], [[Z:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <3 x i8> [[TMP1]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[R:%.*]] = mul <3 x i8> [[TMP2]], [[Y:%.*]] ; CHECK-NEXT: ret <3 x i8> [[R]] @@ -1675,7 +1675,7 @@ define <3 x i8> @splat_assoc_mul_undef_elt_at_splat_index2(<3 x i8> %x, <3 x i8> ; CHECK-LABEL: @splat_assoc_mul_undef_elt_at_splat_index2( ; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> -; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[SPLATZ]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = mul nsw <3 x i8> [[Y:%.*]], [[SPLATZ]] ; CHECK-NEXT: [[R:%.*]] = mul nuw nsw <3 x i8> [[A]], [[SPLATX]] ; CHECK-NEXT: ret <3 x i8> [[R]] ; @@ -1692,7 +1692,7 @@ define <3 x i8> @splat_assoc_or(<3 x i8> %x, <3 x i8> %y, <3 x i8> %z) { ; CHECK-LABEL: @splat_assoc_or( ; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <3 x i8> [[X:%.*]], <3 x i8> poison, <3 x i32> ; CHECK-NEXT: [[SPLATZ:%.*]] = shufflevector <3 x i8> [[Z:%.*]], <3 x i8> poison, <3 x i32> -; CHECK-NEXT: [[A:%.*]] = or <3 x i8> [[SPLATZ]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = or <3 x i8> [[Y:%.*]], [[SPLATZ]] ; CHECK-NEXT: [[R:%.*]] = or <3 x i8> [[A]], [[SPLATX]] ; CHECK-NEXT: ret <3 x i8> [[R]] ; @@ -1755,7 +1755,7 @@ define <3 x i32> @splat_assoc_and(<4 x i32> %x, <3 x i32> %y) { define <5 x i32> @splat_assoc_xor(<4 x i32> %x, <5 x i32> %y) { ; CHECK-LABEL: @splat_assoc_xor( ; CHECK-NEXT: [[SPLATX:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <5 x i32> zeroinitializer -; CHECK-NEXT: [[TMP1:%.*]] = xor <5 x i32> [[SPLATX]], [[Y:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = xor <5 x i32> [[Y:%.*]], [[SPLATX]] ; CHECK-NEXT: [[R:%.*]] = xor <5 x i32> [[TMP1]], ; CHECK-NEXT: ret <5 x i32> [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/vector-reverse.ll b/llvm/test/Transforms/InstCombine/vector-reverse.ll index a1a6ee949a138..c9c68d2241b34 100644 --- a/llvm/test/Transforms/InstCombine/vector-reverse.ll +++ b/llvm/test/Transforms/InstCombine/vector-reverse.ll @@ -250,7 +250,7 @@ define @icmp_reverse_splat_RHS( %a, i32 %b) ; CHECK-LABEL: @icmp_reverse_splat_RHS( ; CHECK-NEXT: [[B_INSERT:%.*]] = insertelement poison, i32 [[B:%.*]], i64 0 ; CHECK-NEXT: [[B_SPLAT:%.*]] = shufflevector [[B_INSERT]], poison, zeroinitializer -; CHECK-NEXT: [[CMP1:%.*]] = icmp slt [[B_SPLAT]], [[A:%.*]] +; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt [[A:%.*]], [[B_SPLAT]] ; CHECK-NEXT: [[CMP:%.*]] = call @llvm.vector.reverse.nxv4i1( [[CMP1]]) ; CHECK-NEXT: ret [[CMP]] ; diff --git a/llvm/test/Transforms/InstCombine/vector-xor.ll b/llvm/test/Transforms/InstCombine/vector-xor.ll index 5c96f1a691ed0..13894ef85b5da 100644 --- a/llvm/test/Transforms/InstCombine/vector-xor.ll +++ b/llvm/test/Transforms/InstCombine/vector-xor.ll @@ -6,7 +6,7 @@ define <4 x i32> @test_v4i32_xor_repeated_and_0(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { ; CHECK-LABEL: @test_v4i32_xor_repeated_and_0( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[B:%.*]], [[C:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; %1 = and <4 x i32> %a, %b @@ -18,7 +18,7 @@ define <4 x i32> @test_v4i32_xor_repeated_and_0(<4 x i32> %a, <4 x i32> %b, <4 x define <4 x i32> @test_v4i32_xor_repeated_and_1(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) { ; CHECK-LABEL: @test_v4i32_xor_repeated_and_1( ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[B:%.*]], [[C:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[A:%.*]], [[TMP1]] ; CHECK-NEXT: ret <4 x i32> [[TMP2]] ; %1 = and <4 x i32> %a, %b @@ -69,7 +69,7 @@ define <4 x i32> @test_v4i32_xor_bswap_const_poison(<4 x i32> %a0) { define <4 x i32> @test_v4i32_demorgan_and(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: @test_v4i32_demorgan_and( ; CHECK-NEXT: [[Y_NOT:%.*]] = xor <4 x i32> [[Y:%.*]], -; CHECK-NEXT: [[TMP1:%.*]] = or <4 x i32> [[Y_NOT]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = or <4 x i32> [[X:%.*]], [[Y_NOT]] ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; %1 = xor <4 x i32> , %x @@ -83,7 +83,7 @@ define <4 x i32> @test_v4i32_demorgan_and(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @test_v4i32_demorgan_or(<4 x i32> %x, <4 x i32> %y) { ; CHECK-LABEL: @test_v4i32_demorgan_or( ; CHECK-NEXT: [[Y_NOT:%.*]] = xor <4 x i32> [[Y:%.*]], -; CHECK-NEXT: [[TMP1:%.*]] = and <4 x i32> [[Y_NOT]], [[X:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and <4 x i32> [[X:%.*]], [[Y_NOT]] ; CHECK-NEXT: ret <4 x i32> [[TMP1]] ; %1 = xor <4 x i32> , %x diff --git a/llvm/test/Transforms/InstCombine/widenable-conditions.ll b/llvm/test/Transforms/InstCombine/widenable-conditions.ll index 0e377c9fa4862..46a93580e9c78 100644 --- a/llvm/test/Transforms/InstCombine/widenable-conditions.ll +++ b/llvm/test/Transforms/InstCombine/widenable-conditions.ll @@ -7,7 +7,7 @@ target triple = "x86_64-unknown-linux-gnu" define i1 @test1(i1 %a, i1 %b) { ; CHECK-LABEL: @test1( ; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition() -; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B:%.*]] +; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B:%.*]], [[WC]] ; CHECK-NEXT: [[AND:%.*]] = and i1 [[LHS]], [[A:%.*]] ; CHECK-NEXT: ret i1 [[AND]] ; @@ -20,7 +20,7 @@ define i1 @test1(i1 %a, i1 %b) { define i1 @test1_logical(i1 %a, i1 %b) { ; CHECK-LABEL: @test1_logical( ; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition() -; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B:%.*]] +; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B:%.*]], [[WC]] ; CHECK-NEXT: [[AND:%.*]] = select i1 [[LHS]], i1 [[A:%.*]], i1 false ; CHECK-NEXT: ret i1 [[AND]] ; @@ -34,7 +34,7 @@ define i1 @test1_logical(i1 %a, i1 %b) { define i1 @test1b(i1 %a, i1 %b) { ; CHECK-LABEL: @test1b( ; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition() -; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B:%.*]] +; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B:%.*]], [[WC]] ; CHECK-NEXT: call void @use(i1 [[LHS]]) ; CHECK-NEXT: [[AND:%.*]] = and i1 [[LHS]], [[A:%.*]] ; CHECK-NEXT: ret i1 [[AND]] @@ -49,7 +49,7 @@ define i1 @test1b(i1 %a, i1 %b) { define i1 @test1b_logical(i1 %a, i1 %b) { ; CHECK-LABEL: @test1b_logical( ; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition() -; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B:%.*]] +; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B:%.*]], [[WC]] ; CHECK-NEXT: call void @use(i1 [[LHS]]) ; CHECK-NEXT: [[AND:%.*]] = select i1 [[LHS]], i1 [[A:%.*]], i1 false ; CHECK-NEXT: ret i1 [[AND]] @@ -68,7 +68,7 @@ define i1 @test1c(i1 %a, i1 %b) { ; CHECK-NEXT: call void @use(i1 [[B:%.*]]) ; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition() ; CHECK-NEXT: call void @use(i1 [[WC]]) -; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B]] +; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B]], [[WC]] ; CHECK-NEXT: [[AND:%.*]] = and i1 [[LHS]], [[A]] ; CHECK-NEXT: ret i1 [[AND]] ; @@ -87,7 +87,7 @@ define i1 @test1c_logical(i1 %a, i1 %b) { ; CHECK-NEXT: call void @use(i1 [[B:%.*]]) ; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition() ; CHECK-NEXT: call void @use(i1 [[WC]]) -; CHECK-NEXT: [[LHS:%.*]] = and i1 [[WC]], [[B]] +; CHECK-NEXT: [[LHS:%.*]] = and i1 [[B]], [[WC]] ; CHECK-NEXT: [[AND:%.*]] = select i1 [[LHS]], i1 [[A]], i1 false ; CHECK-NEXT: ret i1 [[AND]] ; @@ -132,7 +132,7 @@ define i1 @test3(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @test3( ; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition() ; CHECK-NEXT: [[LHS:%.*]] = and i1 [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: [[RHS:%.*]] = and i1 [[WC]], [[C:%.*]] +; CHECK-NEXT: [[RHS:%.*]] = and i1 [[C:%.*]], [[WC]] ; CHECK-NEXT: [[AND:%.*]] = and i1 [[LHS]], [[RHS]] ; CHECK-NEXT: ret i1 [[AND]] ; @@ -147,7 +147,7 @@ define i1 @test3_logical(i1 %a, i1 %b, i1 %c) { ; CHECK-LABEL: @test3_logical( ; CHECK-NEXT: [[WC:%.*]] = call i1 @llvm.experimental.widenable.condition() ; CHECK-NEXT: [[LHS:%.*]] = select i1 [[A:%.*]], i1 [[B:%.*]], i1 false -; CHECK-NEXT: [[RHS:%.*]] = and i1 [[WC]], [[C:%.*]] +; CHECK-NEXT: [[RHS:%.*]] = and i1 [[C:%.*]], [[WC]] ; CHECK-NEXT: [[AND:%.*]] = select i1 [[LHS]], i1 [[RHS]], i1 false ; CHECK-NEXT: ret i1 [[AND]] ; diff --git a/llvm/test/Transforms/InstCombine/xor.ll b/llvm/test/Transforms/InstCombine/xor.ll index 2ff95821f4e00..ea7f7382ee7c8 100644 --- a/llvm/test/Transforms/InstCombine/xor.ll +++ b/llvm/test/Transforms/InstCombine/xor.ll @@ -72,8 +72,8 @@ define i32 @test7(i32 %A, i32 %B) { ; CHECK-LABEL: @test7( ; CHECK-NEXT: [[A1:%.*]] = and i32 [[A:%.*]], 7 ; CHECK-NEXT: [[B1:%.*]] = and i32 [[B:%.*]], 128 -; CHECK-NEXT: [[C11:%.*]] = or disjoint i32 [[A1]], [[B1]] -; CHECK-NEXT: ret i32 [[C11]] +; CHECK-NEXT: [[C1:%.*]] = or disjoint i32 [[A1]], [[B1]] +; CHECK-NEXT: ret i32 [[C1]] ; %A1 = and i32 %A, 7 %B1 = and i32 %B, 128 @@ -122,8 +122,8 @@ define <2 x i1> @test9vec(<2 x i8> %a) { define i8 @test10(i8 %A) { ; CHECK-LABEL: @test10( ; CHECK-NEXT: [[B:%.*]] = and i8 [[A:%.*]], 3 -; CHECK-NEXT: [[C1:%.*]] = or disjoint i8 [[B]], 4 -; CHECK-NEXT: ret i8 [[C1]] +; CHECK-NEXT: [[C:%.*]] = or disjoint i8 [[B]], 4 +; CHECK-NEXT: ret i8 [[C]] ; %B = and i8 %A, 3 %C = xor i8 %B, 4 @@ -253,7 +253,7 @@ define i1 @test24(i32 %c, i32 %d) { define i32 @test25(i32 %g, i32 %h) { ; CHECK-LABEL: @test25( -; CHECK-NEXT: [[T4:%.*]] = and i32 [[H:%.*]], [[G:%.*]] +; CHECK-NEXT: [[T4:%.*]] = and i32 [[G:%.*]], [[H:%.*]] ; CHECK-NEXT: ret i32 [[T4]] ; %h2 = xor i32 %h, -1 @@ -487,7 +487,7 @@ define i32 @or_xor_extra_use(i32 %a, i32 %b, ptr %p) { ; CHECK-LABEL: @or_xor_extra_use( ; CHECK-NEXT: [[O:%.*]] = or i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: store i32 [[O]], ptr [[P:%.*]], align 4 -; CHECK-NEXT: [[R:%.*]] = xor i32 [[O]], [[B]] +; CHECK-NEXT: [[R:%.*]] = xor i32 [[B]], [[O]] ; CHECK-NEXT: ret i32 [[R]] ; %o = or i32 %a, %b @@ -572,7 +572,7 @@ define i32 @and_xor_extra_use(i32 %a, i32 %b, ptr %p) { ; CHECK-LABEL: @and_xor_extra_use( ; CHECK-NEXT: [[O:%.*]] = and i32 [[A:%.*]], [[B:%.*]] ; CHECK-NEXT: store i32 [[O]], ptr [[P:%.*]], align 4 -; CHECK-NEXT: [[R:%.*]] = xor i32 [[O]], [[B]] +; CHECK-NEXT: [[R:%.*]] = xor i32 [[B]], [[O]] ; CHECK-NEXT: ret i32 [[R]] ; %o = and i32 %a, %b @@ -773,7 +773,7 @@ define <4 x i32> @test46(<4 x i32> %x) { define i32 @test47(i32 %x, i32 %y, i32 %z) { ; CHECK-LABEL: @test47( ; CHECK-NEXT: [[NOTX:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[NOTX]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[UMAX:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[NOTX]]) ; CHECK-NEXT: [[UMIN:%.*]] = xor i32 [[UMAX]], -1 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[UMAX]], [[Z:%.*]] ; CHECK-NEXT: [[RES:%.*]] = mul i32 [[ADD]], [[UMIN]] @@ -988,7 +988,7 @@ define i4 @or_or_xor_use2(i4 %x, i4 %y, i4 %z, ptr %p) { define i32 @not_is_canonical(i32 %x, i32 %y) { ; CHECK-LABEL: @not_is_canonical( ; CHECK-NEXT: [[SUB:%.*]] = xor i32 [[X:%.*]], -1 -; CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB]], [[Y:%.*]] +; CHECK-NEXT: [[ADD:%.*]] = add i32 [[Y:%.*]], [[SUB]] ; CHECK-NEXT: [[MUL:%.*]] = shl i32 [[ADD]], 2 ; CHECK-NEXT: ret i32 [[MUL]] ; @@ -1175,7 +1175,7 @@ define <2 x i32> @xor_andn_commute1(<2 x i32> %a, <2 x i32> %b) { define i33 @xor_andn_commute2(i33 %a, i33 %pb) { ; CHECK-LABEL: @xor_andn_commute2( ; CHECK-NEXT: [[B:%.*]] = udiv i33 42, [[PB:%.*]] -; CHECK-NEXT: [[Z:%.*]] = or i33 [[B]], [[A:%.*]] +; CHECK-NEXT: [[Z:%.*]] = or i33 [[A:%.*]], [[B]] ; CHECK-NEXT: ret i33 [[Z]] ; %b = udiv i33 42, %pb ; thwart complexity-based canonicalization @@ -1252,7 +1252,7 @@ define i8 @xor_orn_commute1(i8 %pa, i8 %b) { define i32 @xor_orn_commute2(i32 %a, i32 %pb,ptr %s) { ; CHECK-LABEL: @xor_orn_commute2( ; CHECK-NEXT: [[B:%.*]] = udiv i32 42, [[PB:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[A:%.*]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], [[B]] ; CHECK-NEXT: [[Z:%.*]] = xor i32 [[TMP1]], -1 ; CHECK-NEXT: ret i32 [[Z]] ; @@ -1268,7 +1268,7 @@ define i32 @xor_orn_commute2_1use(i32 %a, i32 %pb,ptr %s) { ; CHECK-NEXT: [[B:%.*]] = udiv i32 42, [[PB:%.*]] ; CHECK-NEXT: [[NOTA:%.*]] = xor i32 [[A:%.*]], -1 ; CHECK-NEXT: store i32 [[NOTA]], ptr [[S:%.*]], align 4 -; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B]], [[A]] +; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A]], [[B]] ; CHECK-NEXT: [[Z:%.*]] = xor i32 [[TMP1]], -1 ; CHECK-NEXT: ret i32 [[Z]] ; @@ -1321,7 +1321,7 @@ define i32 @xor_orn_2use(i32 %a, i32 %b, ptr %s1, ptr %s2) { ; CHECK-LABEL: @xor_orn_2use( ; CHECK-NEXT: [[NOTA:%.*]] = xor i32 [[A:%.*]], -1 ; CHECK-NEXT: store i32 [[NOTA]], ptr [[S1:%.*]], align 4 -; CHECK-NEXT: [[L:%.*]] = or i32 [[NOTA]], [[B:%.*]] +; CHECK-NEXT: [[L:%.*]] = or i32 [[B:%.*]], [[NOTA]] ; CHECK-NEXT: store i32 [[L]], ptr [[S2:%.*]], align 4 ; CHECK-NEXT: [[Z:%.*]] = xor i32 [[L]], [[A]] ; CHECK-NEXT: ret i32 [[Z]] @@ -1367,7 +1367,7 @@ define <2 x i8> @cttz_pow2(<2 x i8> %x, <2 x i8> %y) { define i32 @ctlz_pow2_or_zero(i32 %x) { ; CHECK-LABEL: @ctlz_pow2_or_zero( ; CHECK-NEXT: [[N:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[A:%.*]] = and i32 [[N]], [[X]] +; CHECK-NEXT: [[A:%.*]] = and i32 [[X]], [[N]] ; CHECK-NEXT: [[Z:%.*]] = call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[A]], i1 false) ; CHECK-NEXT: [[R:%.*]] = xor i32 [[Z]], 31 ; CHECK-NEXT: ret i32 [[R]] @@ -1384,7 +1384,7 @@ define i32 @ctlz_pow2_or_zero(i32 %x) { define i32 @ctlz_pow2_wrong_const(i32 %x) { ; CHECK-LABEL: @ctlz_pow2_wrong_const( ; CHECK-NEXT: [[N:%.*]] = sub i32 0, [[X:%.*]] -; CHECK-NEXT: [[A:%.*]] = and i32 [[N]], [[X]] +; CHECK-NEXT: [[A:%.*]] = and i32 [[X]], [[N]] ; CHECK-NEXT: [[Z:%.*]] = call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 [[A]], i1 true) ; CHECK-NEXT: [[R:%.*]] = xor i32 [[Z]], 30 ; CHECK-NEXT: ret i32 [[R]] @@ -1459,7 +1459,7 @@ define i4 @PR96857_xor_with_noundef(i4 %val0, i4 %val1, i4 noundef %val2) { ; CHECK-LABEL: @PR96857_xor_with_noundef( ; CHECK-NEXT: [[VAL4:%.*]] = and i4 [[VAL2:%.*]], [[VAL0:%.*]] ; CHECK-NEXT: [[VAL5:%.*]] = xor i4 [[VAL2]], -1 -; CHECK-NEXT: [[VAL6:%.*]] = and i4 [[VAL5]], [[VAL1:%.*]] +; CHECK-NEXT: [[VAL6:%.*]] = and i4 [[VAL1:%.*]], [[VAL5]] ; CHECK-NEXT: [[VAL7:%.*]] = or disjoint i4 [[VAL4]], [[VAL6]] ; CHECK-NEXT: ret i4 [[VAL7]] ; @@ -1475,7 +1475,7 @@ define i4 @PR96857_xor_without_noundef(i4 %val0, i4 %val1, i4 %val2) { ; CHECK-LABEL: @PR96857_xor_without_noundef( ; CHECK-NEXT: [[VAL4:%.*]] = and i4 [[VAL2:%.*]], [[VAL0:%.*]] ; CHECK-NEXT: [[VAL5:%.*]] = xor i4 [[VAL2]], -1 -; CHECK-NEXT: [[VAL6:%.*]] = and i4 [[VAL5]], [[VAL1:%.*]] +; CHECK-NEXT: [[VAL6:%.*]] = and i4 [[VAL1:%.*]], [[VAL5]] ; CHECK-NEXT: [[VAL7:%.*]] = or i4 [[VAL4]], [[VAL6]] ; CHECK-NEXT: ret i4 [[VAL7]] ; diff --git a/llvm/test/Transforms/InstCombine/xor2.ll b/llvm/test/Transforms/InstCombine/xor2.ll index 7d12a00a8bd51..0b4fca76ed0a7 100644 --- a/llvm/test/Transforms/InstCombine/xor2.ll +++ b/llvm/test/Transforms/InstCombine/xor2.ll @@ -36,8 +36,8 @@ define i1 @test1(i32 %A) { define i32 @test2(i32 %t1) { ; CHECK-LABEL: @test2( ; CHECK-NEXT: [[OVM:%.*]] = and i32 [[T1:%.*]], 32 -; CHECK-NEXT: [[OV1101:%.*]] = or disjoint i32 [[OVM]], 8 -; CHECK-NEXT: ret i32 [[OV1101]] +; CHECK-NEXT: [[OV110:%.*]] = or disjoint i32 [[OVM]], 8 +; CHECK-NEXT: ret i32 [[OV110]] ; %ovm = and i32 %t1, 32 %ov3 = add i32 %ovm, 145 @@ -48,8 +48,8 @@ define i32 @test2(i32 %t1) { define i32 @test3(i32 %t1) { ; CHECK-LABEL: @test3( ; CHECK-NEXT: [[OVM:%.*]] = and i32 [[T1:%.*]], 32 -; CHECK-NEXT: [[OV1101:%.*]] = or disjoint i32 [[OVM]], 8 -; CHECK-NEXT: ret i32 [[OV1101]] +; CHECK-NEXT: [[OV110:%.*]] = or disjoint i32 [[OVM]], 8 +; CHECK-NEXT: ret i32 [[OV110]] ; %ovm = or i32 %t1, 145 %ov31 = and i32 %ovm, 177 @@ -99,7 +99,7 @@ define i32 @test6(i32 %x) { define i32 @test7(i32 %a, i32 %b) { ; CHECK-LABEL: @test7( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1 -; CHECK-NEXT: [[XOR:%.*]] = or i32 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = or i32 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret i32 [[XOR]] ; %or = or i32 %a, %b @@ -112,7 +112,7 @@ define i32 @test7(i32 %a, i32 %b) { define i32 @test8(i32 %a, i32 %b) { ; CHECK-LABEL: @test8( ; CHECK-NEXT: [[B_NOT:%.*]] = xor i32 [[B:%.*]], -1 -; CHECK-NEXT: [[XOR:%.*]] = or i32 [[B_NOT]], [[A:%.*]] +; CHECK-NEXT: [[XOR:%.*]] = or i32 [[A:%.*]], [[B_NOT]] ; CHECK-NEXT: ret i32 [[XOR]] ; %neg = xor i32 %a, -1 @@ -233,7 +233,7 @@ define i32 @test11e(i32 %A, i32 %B, i32 %C) { ; CHECK-LABEL: @test11e( ; CHECK-NEXT: [[FORCE:%.*]] = mul i32 [[B:%.*]], [[C:%.*]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[FORCE]], [[A:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[FORCE]], [[A]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], [[FORCE]] ; CHECK-NEXT: [[XOR2:%.*]] = xor i32 [[TMP1]], -1 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[XOR1]], [[XOR2]] ; CHECK-NEXT: ret i32 [[AND]] @@ -250,7 +250,7 @@ define i32 @test11f(i32 %A, i32 %B, i32 %C) { ; CHECK-LABEL: @test11f( ; CHECK-NEXT: [[FORCE:%.*]] = mul i32 [[B:%.*]], [[C:%.*]] ; CHECK-NEXT: [[XOR1:%.*]] = xor i32 [[FORCE]], [[A:%.*]] -; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[FORCE]], [[A]] +; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A]], [[FORCE]] ; CHECK-NEXT: [[XOR2:%.*]] = xor i32 [[TMP1]], -1 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[XOR1]], [[XOR2]] ; CHECK-NEXT: ret i32 [[AND]] @@ -324,7 +324,7 @@ define i32 @test13commuted(i32 %a, i32 %b) { define i32 @xor_or_xor_common_op_commute1(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: @xor_or_xor_common_op_commute1( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -339,7 +339,7 @@ define i32 @xor_or_xor_common_op_commute1(i32 %a, i32 %b, i32 %c) { define i32 @xor_or_xor_common_op_commute2(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: @xor_or_xor_common_op_commute2( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -354,7 +354,7 @@ define i32 @xor_or_xor_common_op_commute2(i32 %a, i32 %b, i32 %c) { define i32 @xor_or_xor_common_op_commute3(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: @xor_or_xor_common_op_commute3( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -369,7 +369,7 @@ define i32 @xor_or_xor_common_op_commute3(i32 %a, i32 %b, i32 %c) { define i32 @xor_or_xor_common_op_commute4(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: @xor_or_xor_common_op_commute4( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -384,7 +384,7 @@ define i32 @xor_or_xor_common_op_commute4(i32 %a, i32 %b, i32 %c) { define i32 @xor_or_xor_common_op_commute5(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: @xor_or_xor_common_op_commute5( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -399,7 +399,7 @@ define i32 @xor_or_xor_common_op_commute5(i32 %a, i32 %b, i32 %c) { define i32 @xor_or_xor_common_op_commute6(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: @xor_or_xor_common_op_commute6( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -414,7 +414,7 @@ define i32 @xor_or_xor_common_op_commute6(i32 %a, i32 %b, i32 %c) { define i32 @xor_or_xor_common_op_commute7(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: @xor_or_xor_common_op_commute7( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; @@ -429,7 +429,7 @@ define i32 @xor_or_xor_common_op_commute7(i32 %a, i32 %b, i32 %c) { define i32 @xor_or_xor_common_op_commute8(i32 %a, i32 %b, i32 %c) { ; CHECK-LABEL: @xor_or_xor_common_op_commute8( ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], -1 -; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[R:%.*]] = xor i32 [[TMP2]], [[C:%.*]] ; CHECK-NEXT: ret i32 [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll b/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll index 12739b5686a0a..c9da18d3d88bd 100644 --- a/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll +++ b/llvm/test/Transforms/InstCombine/zext-bool-add-sub.ll @@ -268,7 +268,7 @@ define <2 x i64> @sext_sub_const_vec_poison_elt(<2 x i1> %A) { define i8 @sext_sub(i8 %x, i1 %y) { ; CHECK-LABEL: @sext_sub( ; CHECK-NEXT: [[SEXT_NEG:%.*]] = zext i1 [[Y:%.*]] to i8 -; CHECK-NEXT: [[SUB:%.*]] = add i8 [[SEXT_NEG]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[SEXT_NEG]] ; CHECK-NEXT: ret i8 [[SUB]] ; %sext = sext i1 %y to i8 @@ -281,7 +281,7 @@ define i8 @sext_sub(i8 %x, i1 %y) { define <2 x i8> @sext_sub_vec(<2 x i8> %x, <2 x i1> %y) { ; CHECK-LABEL: @sext_sub_vec( ; CHECK-NEXT: [[SEXT_NEG:%.*]] = zext <2 x i1> [[Y:%.*]] to <2 x i8> -; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[SEXT_NEG]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[X:%.*]], [[SEXT_NEG]] ; CHECK-NEXT: ret <2 x i8> [[SUB]] ; %sext = sext <2 x i1> %y to <2 x i8> @@ -294,7 +294,7 @@ define <2 x i8> @sext_sub_vec(<2 x i8> %x, <2 x i1> %y) { define <2 x i8> @sext_sub_vec_nsw(<2 x i8> %x, <2 x i1> %y) { ; CHECK-LABEL: @sext_sub_vec_nsw( ; CHECK-NEXT: [[SEXT_NEG:%.*]] = zext <2 x i1> [[Y:%.*]] to <2 x i8> -; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[SEXT_NEG]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add <2 x i8> [[X:%.*]], [[SEXT_NEG]] ; CHECK-NEXT: ret <2 x i8> [[SUB]] ; %sext = sext <2 x i1> %y to <2 x i8> @@ -307,7 +307,7 @@ define <2 x i8> @sext_sub_vec_nsw(<2 x i8> %x, <2 x i1> %y) { define i8 @sext_sub_nuw(i8 %x, i1 %y) { ; CHECK-LABEL: @sext_sub_nuw( ; CHECK-NEXT: [[SEXT_NEG:%.*]] = zext i1 [[Y:%.*]] to i8 -; CHECK-NEXT: [[SUB:%.*]] = add i8 [[SEXT_NEG]], [[X:%.*]] +; CHECK-NEXT: [[SUB:%.*]] = add i8 [[X:%.*]], [[SEXT_NEG]] ; CHECK-NEXT: ret i8 [[SUB]] ; %sext = sext i1 %y to i8 @@ -318,7 +318,7 @@ define i8 @sext_sub_nuw(i8 %x, i1 %y) { define i32 @sextbool_add(i1 %c, i32 %x) { ; CHECK-LABEL: @sextbool_add( ; CHECK-NEXT: [[B:%.*]] = sext i1 [[C:%.*]] to i32 -; CHECK-NEXT: [[S:%.*]] = add i32 [[B]], [[X:%.*]] +; CHECK-NEXT: [[S:%.*]] = add i32 [[X:%.*]], [[B]] ; CHECK-NEXT: ret i32 [[S]] ; %b = sext i1 %c to i32 @@ -347,7 +347,7 @@ define i32 @sextbool_add_uses(i1 %c, i32 %x) { ; CHECK-LABEL: @sextbool_add_uses( ; CHECK-NEXT: [[B:%.*]] = sext i1 [[C:%.*]] to i32 ; CHECK-NEXT: call void @use32(i32 [[B]]) -; CHECK-NEXT: [[S:%.*]] = add i32 [[B]], [[X:%.*]] +; CHECK-NEXT: [[S:%.*]] = add i32 [[X:%.*]], [[B]] ; CHECK-NEXT: ret i32 [[S]] ; %b = sext i1 %c to i32 @@ -359,7 +359,7 @@ define i32 @sextbool_add_uses(i1 %c, i32 %x) { define <4 x i32> @sextbool_add_vector(<4 x i1> %c, <4 x i32> %x) { ; CHECK-LABEL: @sextbool_add_vector( ; CHECK-NEXT: [[B:%.*]] = sext <4 x i1> [[C:%.*]] to <4 x i32> -; CHECK-NEXT: [[S:%.*]] = add <4 x i32> [[B]], [[X:%.*]] +; CHECK-NEXT: [[S:%.*]] = add <4 x i32> [[X:%.*]], [[B]] ; CHECK-NEXT: ret <4 x i32> [[S]] ; %b = sext <4 x i1> %c to <4 x i32> @@ -394,7 +394,7 @@ define i32 @zextbool_sub_uses(i1 %c, i32 %x) { define <4 x i32> @zextbool_sub_vector(<4 x i1> %c, <4 x i32> %x) { ; CHECK-LABEL: @zextbool_sub_vector( ; CHECK-NEXT: [[B_NEG:%.*]] = sext <4 x i1> [[C:%.*]] to <4 x i32> -; CHECK-NEXT: [[S:%.*]] = add <4 x i32> [[B_NEG]], [[X:%.*]] +; CHECK-NEXT: [[S:%.*]] = add <4 x i32> [[X:%.*]], [[B_NEG]] ; CHECK-NEXT: ret <4 x i32> [[S]] ; %b = zext <4 x i1> %c to <4 x i32> diff --git a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll index a4b74aa8cc7dc..acf547b55722f 100644 --- a/llvm/test/Transforms/InstCombine/zext-or-icmp.ll +++ b/llvm/test/Transforms/InstCombine/zext-or-icmp.ll @@ -181,7 +181,7 @@ define i8 @PR49475_infloop(i32 %t0, i16 %insert, i64 %e, i8 %i162) { ; CHECK-NEXT: [[CONV18:%.*]] = ashr exact i64 [[SEXT]], 32 ; CHECK-NEXT: [[CMP:%.*]] = icmp sge i64 [[XOR]], [[CONV18]] ; CHECK-NEXT: [[TRUNC44:%.*]] = zext i1 [[CMP]] to i8 -; CHECK-NEXT: [[INC:%.*]] = add i8 [[TRUNC44]], [[I162]] +; CHECK-NEXT: [[INC:%.*]] = add i8 [[I162]], [[TRUNC44]] ; CHECK-NEXT: [[TOBOOL23_NOT:%.*]] = xor i1 [[CMP]], true ; CHECK-NEXT: call void @llvm.assume(i1 [[TOBOOL23_NOT]]) ; CHECK-NEXT: ret i8 [[INC]] diff --git a/llvm/test/Transforms/InstCombine/zext.ll b/llvm/test/Transforms/InstCombine/zext.ll index 88cd9c70af40d..7b2cf131c396a 100644 --- a/llvm/test/Transforms/InstCombine/zext.ll +++ b/llvm/test/Transforms/InstCombine/zext.ll @@ -546,7 +546,7 @@ define i64 @and_trunc_extra_use1(i64 %x, i32 %y) { ; CHECK-LABEL: @and_trunc_extra_use1( ; CHECK-NEXT: [[T:%.*]] = trunc i64 [[X:%.*]] to i32 ; CHECK-NEXT: call void @use32(i32 [[T]]) -; CHECK-NEXT: [[A:%.*]] = and i32 [[T]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], [[T]] ; CHECK-NEXT: [[Z:%.*]] = zext i32 [[A]] to i64 ; CHECK-NEXT: ret i64 [[Z]] ; @@ -581,7 +581,7 @@ define i64 @and_trunc_extra_use1_commute(i64 %x, i32 %p) { define i64 @and_trunc_extra_use2(i64 %x, i32 %y) { ; CHECK-LABEL: @and_trunc_extra_use2( ; CHECK-NEXT: [[T:%.*]] = trunc i64 [[X:%.*]] to i32 -; CHECK-NEXT: [[A:%.*]] = and i32 [[T]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], [[T]] ; CHECK-NEXT: call void @use32(i32 [[A]]) ; CHECK-NEXT: [[Z:%.*]] = zext i32 [[A]] to i64 ; CHECK-NEXT: ret i64 [[Z]] @@ -635,7 +635,7 @@ define i64 @and_trunc_extra_use1_wider_src(i65 %x, i32 %y) { ; CHECK-LABEL: @and_trunc_extra_use1_wider_src( ; CHECK-NEXT: [[T:%.*]] = trunc i65 [[X:%.*]] to i32 ; CHECK-NEXT: call void @use32(i32 [[T]]) -; CHECK-NEXT: [[A:%.*]] = and i32 [[T]], [[Y:%.*]] +; CHECK-NEXT: [[A:%.*]] = and i32 [[Y:%.*]], [[T]] ; CHECK-NEXT: [[Z:%.*]] = zext i32 [[A]] to i64 ; CHECK-NEXT: ret i64 [[Z]] ; @@ -782,7 +782,7 @@ define i64 @evaluate_zexted_const_expr(i1 %c) { define i16 @zext_nneg_flag_drop(i8 %x, i16 %y) { ; CHECK-LABEL: @zext_nneg_flag_drop( ; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[X:%.*]] to i16 -; CHECK-NEXT: [[OR1:%.*]] = or i16 [[EXT]], [[Y:%.*]] +; CHECK-NEXT: [[OR1:%.*]] = or i16 [[Y:%.*]], [[EXT]] ; CHECK-NEXT: [[OR2:%.*]] = or i16 [[OR1]], 128 ; CHECK-NEXT: ret i16 [[OR2]] ; diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll index ed8d8e15282d5..6953d6c48694c 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/deterministic-type-shrinkage.ll @@ -110,7 +110,7 @@ define void @test_pr25490(i32 %n, ptr noalias nocapture %a, ptr noalias nocaptur ; CHECK-NEXT: store i8 [[CONV12]], ptr [[ARRAYIDX8]], align 1 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll index 6f62f2f2096f1..4768167a9c69f 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-cond-inv-loads.ll @@ -7,12 +7,12 @@ define void @cond_inv_load_i32i32i16(ptr noalias nocapture %a, ptr noalias nocap ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[INV:%.*]], i64 0 @@ -31,7 +31,7 @@ define void @cond_inv_load_i32i32i16(ptr noalias nocapture %a, ptr noalias nocap ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -86,12 +86,12 @@ define void @cond_inv_load_f64f64f64(ptr noalias nocapture %a, ptr noalias nocap ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[INV:%.*]], i64 0 @@ -109,7 +109,7 @@ define void @cond_inv_load_f64f64f64(ptr noalias nocapture %a, ptr noalias nocap ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -162,12 +162,12 @@ define void @invariant_load_cond(ptr noalias nocapture %a, ptr nocapture readonl ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -189,7 +189,7 @@ define void @invariant_load_cond(ptr noalias nocapture %a, ptr nocapture readonl ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll index 2a80a7affa4f8..dac64c3d0f58d 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll @@ -7,12 +7,12 @@ define void @gather_nxv4i32_ind64(ptr noalias nocapture readonly %a, ptr noalias ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -28,7 +28,7 @@ define void @gather_nxv4i32_ind64(ptr noalias nocapture readonly %a, ptr noalias ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -74,12 +74,12 @@ define void @scatter_nxv4i32_ind32(ptr noalias nocapture %a, ptr noalias nocaptu ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] @@ -96,7 +96,7 @@ define void @scatter_nxv4i32_ind32(ptr noalias nocapture %a, ptr noalias nocaptu ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -141,12 +141,12 @@ define void @scatter_inv_nxv4i32(ptr noalias nocapture %inv, ptr noalias nocaptu ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[INV:%.*]], i64 0 @@ -162,7 +162,7 @@ define void @scatter_inv_nxv4i32(ptr noalias nocapture %inv, ptr noalias nocaptu ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -211,12 +211,12 @@ define void @gather_inv_nxv4i32(ptr noalias nocapture %a, ptr noalias nocapture ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -4 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2 ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement poison, ptr [[INV:%.*]], i64 0 @@ -233,7 +233,7 @@ define void @gather_inv_nxv4i32(ptr noalias nocapture %a, ptr noalias nocapture ; CHECK-NEXT: [[TMP7:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP7]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -286,12 +286,12 @@ define void @gather_nxv4i32_ind64_stride2(ptr noalias nocapture %a, ptr noalias ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 3 ; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.experimental.stepvector.nxv4i64() @@ -321,7 +321,7 @@ define void @gather_nxv4i32_ind64_stride2(ptr noalias nocapture %a, ptr noalias ; CHECK-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll index 965c71c008aa1..34fb5bb640471 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll @@ -16,7 +16,7 @@ define void @cond_ind64(ptr noalias nocapture %a, ptr noalias nocapture readonly ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll index d6794420c403f..ba8f69b63f060 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll @@ -1464,7 +1464,7 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) #1 { ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 2 ; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP5]] ; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[TMP7]], i64 6 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP2]], [[B]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]] ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP1]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll index e3bba1338e1df..81121019efe76 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse.ll @@ -16,49 +16,49 @@ define void @vector_reverse_f64(i64 %N, ptr noalias %a, ptr noalias %b) #0{ ; CHECK: for.body.preheader: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP30:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP31:%.*]] = shl i64 [[TMP30]], 4 +; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP5:%.*]] = shl i64 [[TMP4]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = xor i64 [[INDEX]], -1 -; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[TMP4]], [[N]] -; CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[B:%.*]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 3 -; CHECK-NEXT: [[TMP9:%.*]] = sub i64 1, [[TMP8]] -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP9]] -; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP12:%.*]] = shl i64 [[TMP11]], 3 -; CHECK-NEXT: [[TMP13:%.*]] = sub i64 0, [[TMP12]] -; CHECK-NEXT: [[TMP14:%.*]] = sub i64 1, [[TMP12]] -; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds double, ptr [[TMP6]], i64 [[TMP13]] -; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds double, ptr [[TMP15]], i64 [[TMP14]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP10]], align 8 -; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP16]], align 8 -; CHECK-NEXT: [[TMP17:%.*]] = fadd [[WIDE_LOAD]], shufflevector ( insertelement ( poison, double 1.000000e+00, i64 0), poison, zeroinitializer) -; CHECK-NEXT: [[TMP18:%.*]] = fadd [[WIDE_LOAD1]], shufflevector ( insertelement ( poison, double 1.000000e+00, i64 0), poison, zeroinitializer) -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP5]] -; CHECK-NEXT: [[TMP20:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP21:%.*]] = shl i64 [[TMP20]], 3 -; CHECK-NEXT: [[TMP22:%.*]] = sub i64 1, [[TMP21]] -; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds double, ptr [[TMP19]], i64 [[TMP22]] -; CHECK-NEXT: [[TMP24:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP25:%.*]] = shl i64 [[TMP24]], 3 -; CHECK-NEXT: [[TMP26:%.*]] = sub i64 0, [[TMP25]] -; CHECK-NEXT: [[TMP27:%.*]] = sub i64 1, [[TMP25]] -; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds double, ptr [[TMP19]], i64 [[TMP26]] -; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds double, ptr [[TMP28]], i64 [[TMP27]] -; CHECK-NEXT: store [[TMP17]], ptr [[TMP23]], align 8 -; CHECK-NEXT: store [[TMP18]], ptr [[TMP29]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP31]] +; CHECK-NEXT: [[TMP6:%.*]] = xor i64 [[INDEX]], -1 +; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[N]], [[TMP6]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, ptr [[B:%.*]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP9:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP10:%.*]] = shl i64 [[TMP9]], 3 +; CHECK-NEXT: [[TMP11:%.*]] = sub i64 1, [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds double, ptr [[TMP8]], i64 [[TMP11]] +; CHECK-NEXT: [[TMP13:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP14:%.*]] = shl i64 [[TMP13]], 3 +; CHECK-NEXT: [[TMP15:%.*]] = sub i64 0, [[TMP14]] +; CHECK-NEXT: [[TMP16:%.*]] = sub i64 1, [[TMP14]] +; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds double, ptr [[TMP8]], i64 [[TMP15]] +; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds double, ptr [[TMP17]], i64 [[TMP16]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP12]], align 8 +; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load , ptr [[TMP18]], align 8 +; CHECK-NEXT: [[TMP19:%.*]] = fadd [[WIDE_LOAD]], shufflevector ( insertelement ( poison, double 1.000000e+00, i64 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP20:%.*]] = fadd [[WIDE_LOAD1]], shufflevector ( insertelement ( poison, double 1.000000e+00, i64 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds double, ptr [[A:%.*]], i64 [[TMP7]] +; CHECK-NEXT: [[TMP22:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP23:%.*]] = shl i64 [[TMP22]], 3 +; CHECK-NEXT: [[TMP24:%.*]] = sub i64 1, [[TMP23]] +; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds double, ptr [[TMP21]], i64 [[TMP24]] +; CHECK-NEXT: [[TMP26:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP27:%.*]] = shl i64 [[TMP26]], 3 +; CHECK-NEXT: [[TMP28:%.*]] = sub i64 0, [[TMP27]] +; CHECK-NEXT: [[TMP29:%.*]] = sub i64 1, [[TMP27]] +; CHECK-NEXT: [[TMP30:%.*]] = getelementptr inbounds double, ptr [[TMP21]], i64 [[TMP28]] +; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds double, ptr [[TMP30]], i64 [[TMP29]] +; CHECK-NEXT: store [[TMP19]], ptr [[TMP25]], align 8 +; CHECK-NEXT: store [[TMP20]], ptr [[TMP31]], align 8 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]] ; CHECK-NEXT: [[TMP32:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP32]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: @@ -112,7 +112,7 @@ define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 { ; CHECK: for.body.preheader: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 4 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() @@ -125,42 +125,42 @@ define void @vector_reverse_i64(i64 %N, ptr %a, ptr %b) #0 { ; CHECK-NEXT: [[TMP6:%.*]] = shl i64 [[TMP5]], 4 ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP6]] ; CHECK-NEXT: [[N_VEC:%.*]] = sub nsw i64 [[N]], [[N_MOD_VF]] -; CHECK-NEXT: [[TMP33:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP34:%.*]] = shl i64 [[TMP33]], 4 +; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP8:%.*]] = shl i64 [[TMP7]], 4 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP7:%.*]] = xor i64 [[INDEX]], -1 -; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[TMP7]], [[N]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP8]] -; CHECK-NEXT: [[TMP10:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP11:%.*]] = shl i64 [[TMP10]], 3 -; CHECK-NEXT: [[TMP12:%.*]] = sub i64 1, [[TMP11]] -; CHECK-NEXT: [[TMP13:%.*]] = getelementptr inbounds i64, ptr [[TMP9]], i64 [[TMP12]] -; CHECK-NEXT: [[TMP14:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP15:%.*]] = shl i64 [[TMP14]], 3 -; CHECK-NEXT: [[TMP16:%.*]] = sub i64 0, [[TMP15]] -; CHECK-NEXT: [[TMP17:%.*]] = sub i64 1, [[TMP15]] -; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i64, ptr [[TMP9]], i64 [[TMP16]] -; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i64, ptr [[TMP18]], i64 [[TMP17]] -; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP13]], align 8 -; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP19]], align 8 -; CHECK-NEXT: [[TMP20:%.*]] = add [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i64 1, i64 0), poison, zeroinitializer) -; CHECK-NEXT: [[TMP21:%.*]] = add [[WIDE_LOAD3]], shufflevector ( insertelement ( poison, i64 1, i64 0), poison, zeroinitializer) -; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP8]] -; CHECK-NEXT: [[TMP23:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP24:%.*]] = shl i64 [[TMP23]], 3 -; CHECK-NEXT: [[TMP25:%.*]] = sub i64 1, [[TMP24]] -; CHECK-NEXT: [[TMP26:%.*]] = getelementptr inbounds i64, ptr [[TMP22]], i64 [[TMP25]] -; CHECK-NEXT: [[TMP27:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[TMP28:%.*]] = shl i64 [[TMP27]], 3 -; CHECK-NEXT: [[TMP29:%.*]] = sub i64 0, [[TMP28]] -; CHECK-NEXT: [[TMP30:%.*]] = sub i64 1, [[TMP28]] -; CHECK-NEXT: [[TMP31:%.*]] = getelementptr inbounds i64, ptr [[TMP22]], i64 [[TMP29]] -; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds i64, ptr [[TMP31]], i64 [[TMP30]] -; CHECK-NEXT: store [[TMP20]], ptr [[TMP26]], align 8 -; CHECK-NEXT: store [[TMP21]], ptr [[TMP32]], align 8 -; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP34]] +; CHECK-NEXT: [[TMP9:%.*]] = xor i64 [[INDEX]], -1 +; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[N]], [[TMP9]] +; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i64, ptr [[B]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP12:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP13:%.*]] = shl i64 [[TMP12]], 3 +; CHECK-NEXT: [[TMP14:%.*]] = sub i64 1, [[TMP13]] +; CHECK-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[TMP11]], i64 [[TMP14]] +; CHECK-NEXT: [[TMP16:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP17:%.*]] = shl i64 [[TMP16]], 3 +; CHECK-NEXT: [[TMP18:%.*]] = sub i64 0, [[TMP17]] +; CHECK-NEXT: [[TMP19:%.*]] = sub i64 1, [[TMP17]] +; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, ptr [[TMP11]], i64 [[TMP18]] +; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds i64, ptr [[TMP20]], i64 [[TMP19]] +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[TMP15]], align 8 +; CHECK-NEXT: [[WIDE_LOAD3:%.*]] = load , ptr [[TMP21]], align 8 +; CHECK-NEXT: [[TMP22:%.*]] = add [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i64 1, i64 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP23:%.*]] = add [[WIDE_LOAD3]], shufflevector ( insertelement ( poison, i64 1, i64 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP10]] +; CHECK-NEXT: [[TMP25:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP26:%.*]] = shl i64 [[TMP25]], 3 +; CHECK-NEXT: [[TMP27:%.*]] = sub i64 1, [[TMP26]] +; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, ptr [[TMP24]], i64 [[TMP27]] +; CHECK-NEXT: [[TMP29:%.*]] = call i64 @llvm.vscale.i64() +; CHECK-NEXT: [[TMP30:%.*]] = shl i64 [[TMP29]], 3 +; CHECK-NEXT: [[TMP31:%.*]] = sub i64 0, [[TMP30]] +; CHECK-NEXT: [[TMP32:%.*]] = sub i64 1, [[TMP30]] +; CHECK-NEXT: [[TMP33:%.*]] = getelementptr inbounds i64, ptr [[TMP24]], i64 [[TMP31]] +; CHECK-NEXT: [[TMP34:%.*]] = getelementptr inbounds i64, ptr [[TMP33]], i64 [[TMP32]] +; CHECK-NEXT: store [[TMP22]], ptr [[TMP28]], align 8 +; CHECK-NEXT: store [[TMP23]], ptr [[TMP34]], align 8 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP8]] ; CHECK-NEXT: [[TMP35:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP35]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll index 76084776b2b76..626bb55cf2a77 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll @@ -19,12 +19,12 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[N_VEC]], 3 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[TMP3]] ; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64() @@ -66,7 +66,7 @@ define void @widen_ptr_phi_unrolled(ptr noalias nocapture %a, ptr noalias nocapt ; CHECK-NEXT: [[TMP23:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP23]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[C]], [[ENTRY:%.*]] ] @@ -132,12 +132,12 @@ define void @widen_2ptrs_phi_unrolled(ptr noalias nocapture %dst, ptr noalias no ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTNEG:%.*]] = mul nsw i64 [[TMP2]], -8 -; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[DOTNEG]], [[N]] +; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]] ; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[N_VEC]], 2 ; CHECK-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC:%.*]], i64 [[TMP3]] ; CHECK-NEXT: [[TMP4:%.*]] = shl i64 [[N_VEC]], 2 @@ -149,25 +149,25 @@ define void @widen_2ptrs_phi_unrolled(ptr noalias nocapture %dst, ptr noalias no ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2 ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[OFFSET_IDX]] -; CHECK-NEXT: [[OFFSET_IDX5:%.*]] = shl i64 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX5]] +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i64 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[DST]], i64 [[OFFSET_IDX4]] ; CHECK-NEXT: [[TMP7:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[DOTIDX:%.*]] = shl nuw nsw i64 [[TMP7]], 4 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 [[DOTIDX]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[NEXT_GEP]], align 4 -; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load , ptr [[TMP8]], align 4 +; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load , ptr [[TMP8]], align 4 ; CHECK-NEXT: [[TMP9:%.*]] = shl nsw [[WIDE_LOAD]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) -; CHECK-NEXT: [[TMP10:%.*]] = shl nsw [[WIDE_LOAD8]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) +; CHECK-NEXT: [[TMP10:%.*]] = shl nsw [[WIDE_LOAD6]], shufflevector ( insertelement ( poison, i32 1, i64 0), poison, zeroinitializer) ; CHECK-NEXT: [[TMP11:%.*]] = call i64 @llvm.vscale.i64() -; CHECK-NEXT: [[DOTIDX9:%.*]] = shl nuw nsw i64 [[TMP11]], 4 -; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[NEXT_GEP6]], i64 [[DOTIDX9]] -; CHECK-NEXT: store [[TMP9]], ptr [[NEXT_GEP6]], align 4 +; CHECK-NEXT: [[DOTIDX7:%.*]] = shl nuw nsw i64 [[TMP11]], 4 +; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[NEXT_GEP5]], i64 [[DOTIDX7]] +; CHECK-NEXT: store [[TMP9]], ptr [[NEXT_GEP5]], align 4 ; CHECK-NEXT: store [[TMP10]], ptr [[TMP12]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP6]] ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll b/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll index c22613509be4f..57807604b37a8 100644 --- a/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll +++ b/llvm/test/Transforms/LoopVectorize/AArch64/vector-reverse-mask4.ll @@ -32,7 +32,7 @@ define void @vector_reverse_mask_v4i1(ptr noalias %a, ptr noalias %cond, i64 %N) ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = xor i64 [[INDEX]], -1 -; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], [[N]] +; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[N]], [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[COND:%.*]], i64 [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 -24 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 -56 @@ -47,17 +47,17 @@ define void @vector_reverse_mask_v4i1(ptr noalias %a, ptr noalias %cond, i64 %N) ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP7]], i64 -56 ; CHECK-NEXT: [[REVERSE3:%.*]] = shufflevector <4 x i1> [[TMP5]], <4 x i1> poison, <4 x i32> ; CHECK-NEXT: [[WIDE_MASKED_LOAD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[TMP8]], i32 8, <4 x i1> [[REVERSE3]], <4 x double> poison) -; CHECK-NEXT: [[REVERSE4:%.*]] = shufflevector <4 x i1> [[TMP6]], <4 x i1> poison, <4 x i32> -; CHECK-NEXT: [[WIDE_MASKED_LOAD6:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[TMP9]], i32 8, <4 x i1> [[REVERSE4]], <4 x double> poison) +; CHECK-NEXT: [[REVERSE5:%.*]] = shufflevector <4 x i1> [[TMP6]], <4 x i1> poison, <4 x i32> +; CHECK-NEXT: [[WIDE_MASKED_LOAD6:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[TMP9]], i32 8, <4 x i1> [[REVERSE5]], <4 x double> poison) ; CHECK-NEXT: [[TMP10:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD]], ; CHECK-NEXT: [[TMP11:%.*]] = fadd <4 x double> [[WIDE_MASKED_LOAD6]], ; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP10]], ptr [[TMP8]], i32 8, <4 x i1> [[REVERSE3]]) -; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP11]], ptr [[TMP9]], i32 8, <4 x i1> [[REVERSE4]]) +; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[TMP11]], ptr [[TMP9]], i32 8, <4 x i1> [[REVERSE5]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8 ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP_LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[N]], [[FOR_BODY_PREHEADER]] ] diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-qabs.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-qabs.ll index 45b84a0b5e856..fec5921720fed 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-qabs.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-qabs.ll @@ -38,7 +38,7 @@ define void @arm_abs_q7(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 % ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PSRC]], [[WHILE_BODY_PREHEADER]] ] @@ -118,22 +118,22 @@ define void @arm_abs_q15(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC]], i32 [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[TMP4]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX7:%.*]] = shl i32 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[OFFSET_IDX7]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 -; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt <8 x i16> [[WIDE_LOAD]], zeroinitializer -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq <8 x i16> [[WIDE_LOAD]], -; CHECK-NEXT: [[TMP7:%.*]] = sub <8 x i16> zeroinitializer, [[WIDE_LOAD]] -; CHECK-NEXT: [[TMP8:%.*]] = select <8 x i1> [[TMP6]], <8 x i16> , <8 x i16> [[TMP7]] -; CHECK-NEXT: [[TMP9:%.*]] = select <8 x i1> [[TMP5]], <8 x i16> [[WIDE_LOAD]], <8 x i16> [[TMP8]] -; CHECK-NEXT: store <8 x i16> [[TMP9]], ptr [[NEXT_GEP7]], align 2 +; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <8 x i16> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <8 x i16> [[WIDE_LOAD]], +; CHECK-NEXT: [[TMP5:%.*]] = sub <8 x i16> zeroinitializer, [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP6:%.*]] = select <8 x i1> [[TMP4]], <8 x i16> , <8 x i16> [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = select <8 x i1> [[TMP3]], <8 x i16> [[WIDE_LOAD]], <8 x i16> [[TMP6]] +; CHECK-NEXT: store <8 x i16> [[TMP7]], ptr [[NEXT_GEP8]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PSRC]], [[WHILE_BODY_PREHEADER]] ] @@ -145,12 +145,12 @@ define void @arm_abs_q15(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 ; CHECK-NEXT: [[BLKCNT_022:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[PDST_ADDR_021:%.*]] = phi ptr [ [[INCDEC_PTR13:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL6]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[PSRC_ADDR_023]], i32 2 -; CHECK-NEXT: [[TMP11:%.*]] = load i16, ptr [[PSRC_ADDR_023]], align 2 -; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i16 [[TMP11]], 0 -; CHECK-NEXT: [[CMP5:%.*]] = icmp eq i16 [[TMP11]], -32768 -; CHECK-NEXT: [[SUB:%.*]] = sub i16 0, [[TMP11]] +; CHECK-NEXT: [[TMP9:%.*]] = load i16, ptr [[PSRC_ADDR_023]], align 2 +; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i16 [[TMP9]], 0 +; CHECK-NEXT: [[CMP5:%.*]] = icmp eq i16 [[TMP9]], -32768 +; CHECK-NEXT: [[SUB:%.*]] = sub i16 0, [[TMP9]] ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP5]], i16 32767, i16 [[SUB]] -; CHECK-NEXT: [[COND11:%.*]] = select i1 [[CMP1]], i16 [[TMP11]], i16 [[COND]] +; CHECK-NEXT: [[COND11:%.*]] = select i1 [[CMP1]], i16 [[TMP9]], i16 [[COND]] ; CHECK-NEXT: [[INCDEC_PTR13]] = getelementptr inbounds i8, ptr [[PDST_ADDR_021]], i32 2 ; CHECK-NEXT: store i16 [[COND11]], ptr [[PDST_ADDR_021]], align 2 ; CHECK-NEXT: [[DEC]] = add i32 [[BLKCNT_022]], -1 @@ -213,22 +213,22 @@ define void @arm_abs_q31(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC]], i32 [[TMP3]] -; CHECK-NEXT: [[TMP4:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP7:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[TMP4]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PSRC]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX7:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP8:%.*]] = getelementptr i8, ptr [[PDST]], i32 [[OFFSET_IDX7]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[NEXT_GEP]], align 4 -; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], zeroinitializer -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], -; CHECK-NEXT: [[TMP7:%.*]] = sub nsw <4 x i32> zeroinitializer, [[WIDE_LOAD]] -; CHECK-NEXT: [[TMP8:%.*]] = select <4 x i1> [[TMP6]], <4 x i32> , <4 x i32> [[TMP7]] -; CHECK-NEXT: [[TMP9:%.*]] = select <4 x i1> [[TMP5]], <4 x i32> [[WIDE_LOAD]], <4 x i32> [[TMP8]] -; CHECK-NEXT: store <4 x i32> [[TMP9]], ptr [[NEXT_GEP7]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt <4 x i32> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = icmp eq <4 x i32> [[WIDE_LOAD]], +; CHECK-NEXT: [[TMP5:%.*]] = sub nsw <4 x i32> zeroinitializer, [[WIDE_LOAD]] +; CHECK-NEXT: [[TMP6:%.*]] = select <4 x i1> [[TMP4]], <4 x i32> , <4 x i32> [[TMP5]] +; CHECK-NEXT: [[TMP7:%.*]] = select <4 x i1> [[TMP3]], <4 x i32> [[WIDE_LOAD]], <4 x i32> [[TMP6]] +; CHECK-NEXT: store <4 x i32> [[TMP7]], ptr [[NEXT_GEP8]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP10:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP10]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] +; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PSRC]], [[WHILE_BODY_PREHEADER]] ] @@ -240,12 +240,12 @@ define void @arm_abs_q31(ptr nocapture readonly %pSrc, ptr nocapture %pDst, i32 ; CHECK-NEXT: [[BLKCNT_016:%.*]] = phi i32 [ [[DEC:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[PDST_ADDR_015:%.*]] = phi ptr [ [[INCDEC_PTR7:%.*]], [[WHILE_BODY]] ], [ [[BC_RESUME_VAL6]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[PSRC_ADDR_017]], i32 4 -; CHECK-NEXT: [[TMP11:%.*]] = load i32, ptr [[PSRC_ADDR_017]], align 4 -; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP11]], 0 -; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[TMP11]], -2147483648 -; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 0, [[TMP11]] +; CHECK-NEXT: [[TMP9:%.*]] = load i32, ptr [[PSRC_ADDR_017]], align 4 +; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt i32 [[TMP9]], 0 +; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[TMP9]], -2147483648 +; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 0, [[TMP9]] ; CHECK-NEXT: [[COND:%.*]] = select i1 [[CMP2]], i32 2147483647, i32 [[SUB]] -; CHECK-NEXT: [[COND6:%.*]] = select i1 [[CMP1]], i32 [[TMP11]], i32 [[COND]] +; CHECK-NEXT: [[COND6:%.*]] = select i1 [[CMP1]], i32 [[TMP9]], i32 [[COND]] ; CHECK-NEXT: [[INCDEC_PTR7]] = getelementptr inbounds i8, ptr [[PDST_ADDR_015]], i32 4 ; CHECK-NEXT: store i32 [[COND6]], ptr [[PDST_ADDR_015]], align 4 ; CHECK-NEXT: [[DEC]] = add i32 [[BLKCNT_016]], -1 diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll index 18caa9cc16f35..a7cb5c61ca550 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-reductions.ll @@ -67,7 +67,7 @@ define i64 @add_i32_i64(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] @@ -132,7 +132,7 @@ define i64 @add_i16_i64(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] @@ -197,7 +197,7 @@ define i64 @add_i8_i64(ptr nocapture readonly %x, i32 %n) #0 { ; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] @@ -582,7 +582,7 @@ define i64 @mla_i32_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3 ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP6]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] @@ -658,7 +658,7 @@ define i64 @mla_i16_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i3 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] @@ -738,7 +738,7 @@ define i64 @mla_i8_i64(ptr nocapture readonly %x, ptr nocapture readonly %y, i32 ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] @@ -1197,7 +1197,7 @@ define i64 @red_mla_ext_s16_u16_s64(ptr noalias nocapture readonly %A, ptr noali ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] diff --git a/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll b/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll index 6953834335669..d904c50f3bf9c 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/mve-selectandorcost.ll @@ -30,35 +30,35 @@ define float @test(ptr nocapture readonly %pA, ptr nocapture readonly %pB, i32 % ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x float> [ zeroinitializer, [[VECTOR_PH]] ], [ [[PREDPHI:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PA]], i32 [[TMP2]] -; CHECK-NEXT: [[TMP3:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[PB]], i32 [[TMP3]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[PA]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX5:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP6:%.*]] = getelementptr i8, ptr [[PB]], i32 [[OFFSET_IDX5]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[NEXT_GEP]], align 4 -; CHECK-NEXT: [[WIDE_LOAD6:%.*]] = load <4 x float>, ptr [[NEXT_GEP5]], align 4 -; CHECK-NEXT: [[TMP4:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD]], zeroinitializer -; CHECK-NEXT: [[TMP5:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD6]], zeroinitializer -; CHECK-NEXT: [[DOTNOT8:%.*]] = select <4 x i1> [[TMP4]], <4 x i1> [[TMP5]], <4 x i1> zeroinitializer -; CHECK-NEXT: [[TMP6:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[WIDE_LOAD]]) -; CHECK-NEXT: [[TMP7:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[WIDE_LOAD6]]) -; CHECK-NEXT: [[TMP8:%.*]] = fadd fast <4 x float> [[TMP7]], [[TMP6]] -; CHECK-NEXT: [[TMP9:%.*]] = fsub fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD6]] -; CHECK-NEXT: [[TMP10:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP9]]) -; CHECK-NEXT: [[TMP11:%.*]] = fdiv fast <4 x float> [[TMP10]], [[TMP8]] -; CHECK-NEXT: [[TMP12:%.*]] = fadd fast <4 x float> [[TMP11]], [[VEC_PHI]] -; CHECK-NEXT: [[PREDPHI]] = select <4 x i1> [[DOTNOT8]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP12]] +; CHECK-NEXT: [[WIDE_LOAD7:%.*]] = load <4 x float>, ptr [[NEXT_GEP6]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD]], zeroinitializer +; CHECK-NEXT: [[TMP3:%.*]] = fcmp fast oeq <4 x float> [[WIDE_LOAD7]], zeroinitializer +; CHECK-NEXT: [[DOTNOT9:%.*]] = select <4 x i1> [[TMP2]], <4 x i1> [[TMP3]], <4 x i1> zeroinitializer +; CHECK-NEXT: [[TMP4:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[WIDE_LOAD]]) +; CHECK-NEXT: [[TMP5:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[WIDE_LOAD7]]) +; CHECK-NEXT: [[TMP6:%.*]] = fadd fast <4 x float> [[TMP5]], [[TMP4]] +; CHECK-NEXT: [[TMP7:%.*]] = fsub fast <4 x float> [[WIDE_LOAD]], [[WIDE_LOAD7]] +; CHECK-NEXT: [[TMP8:%.*]] = call fast <4 x float> @llvm.fabs.v4f32(<4 x float> [[TMP7]]) +; CHECK-NEXT: [[TMP9:%.*]] = fdiv fast <4 x float> [[TMP8]], [[TMP6]] +; CHECK-NEXT: [[TMP10:%.*]] = fadd fast <4 x float> [[TMP9]], [[VEC_PHI]] +; CHECK-NEXT: [[PREDPHI]] = select <4 x i1> [[DOTNOT9]], <4 x float> [[VEC_PHI]], <4 x float> [[TMP10]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP14:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[PREDPHI]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]] +; CHECK-NEXT: [[TMP12:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[PREDPHI]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[PA]], [[WHILE_BODY_PREHEADER]] ] ; CHECK-NEXT: [[BC_RESUME_VAL2:%.*]] = phi ptr [ [[IND_END1]], [[MIDDLE_BLOCK]] ], [ [[PB]], [[WHILE_BODY_PREHEADER]] ] ; CHECK-NEXT: [[BC_RESUME_VAL4:%.*]] = phi i32 [ [[IND_END3]], [[MIDDLE_BLOCK]] ], [ [[BLOCKSIZE]], [[WHILE_BODY_PREHEADER]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP14]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[WHILE_BODY_PREHEADER]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi float [ [[TMP12]], [[MIDDLE_BLOCK]] ], [ 0.000000e+00, [[WHILE_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[WHILE_BODY:%.*]] ; CHECK: while.body: ; CHECK-NEXT: [[PA_ADDR_020:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[IF_END:%.*]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] @@ -66,20 +66,20 @@ define float @test(ptr nocapture readonly %pA, ptr nocapture readonly %pB, i32 % ; CHECK-NEXT: [[BLOCKSIZE_ADDR_018:%.*]] = phi i32 [ [[DEC:%.*]], [[IF_END]] ], [ [[BC_RESUME_VAL4]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[ACCUM_017:%.*]] = phi float [ [[ACCUM_1:%.*]], [[IF_END]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[PA_ADDR_020]], i32 4 -; CHECK-NEXT: [[TMP15:%.*]] = load float, ptr [[PA_ADDR_020]], align 4 +; CHECK-NEXT: [[TMP13:%.*]] = load float, ptr [[PA_ADDR_020]], align 4 ; CHECK-NEXT: [[INCDEC_PTR1]] = getelementptr inbounds i8, ptr [[PB_ADDR_019]], i32 4 -; CHECK-NEXT: [[TMP16:%.*]] = load float, ptr [[PB_ADDR_019]], align 4 -; CHECK-NEXT: [[CMP2:%.*]] = fcmp fast une float [[TMP15]], 0.000000e+00 -; CHECK-NEXT: [[CMP3:%.*]] = fcmp fast une float [[TMP16]], 0.000000e+00 +; CHECK-NEXT: [[TMP14:%.*]] = load float, ptr [[PB_ADDR_019]], align 4 +; CHECK-NEXT: [[CMP2:%.*]] = fcmp fast une float [[TMP13]], 0.000000e+00 +; CHECK-NEXT: [[CMP3:%.*]] = fcmp fast une float [[TMP14]], 0.000000e+00 ; CHECK-NEXT: [[OR_COND:%.*]] = select i1 [[CMP2]], i1 true, i1 [[CMP3]] ; CHECK-NEXT: br i1 [[OR_COND]], label [[IF_THEN:%.*]], label [[IF_END]] ; CHECK: if.then: -; CHECK-NEXT: [[TMP17:%.*]] = tail call fast float @llvm.fabs.f32(float [[TMP15]]) -; CHECK-NEXT: [[TMP18:%.*]] = tail call fast float @llvm.fabs.f32(float [[TMP16]]) -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP18]], [[TMP17]] -; CHECK-NEXT: [[SUB:%.*]] = fsub fast float [[TMP15]], [[TMP16]] -; CHECK-NEXT: [[TMP19:%.*]] = tail call fast float @llvm.fabs.f32(float [[SUB]]) -; CHECK-NEXT: [[DIV:%.*]] = fdiv fast float [[TMP19]], [[ADD]] +; CHECK-NEXT: [[TMP15:%.*]] = tail call fast float @llvm.fabs.f32(float [[TMP13]]) +; CHECK-NEXT: [[TMP16:%.*]] = tail call fast float @llvm.fabs.f32(float [[TMP14]]) +; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP16]], [[TMP15]] +; CHECK-NEXT: [[SUB:%.*]] = fsub fast float [[TMP13]], [[TMP14]] +; CHECK-NEXT: [[TMP17:%.*]] = tail call fast float @llvm.fabs.f32(float [[SUB]]) +; CHECK-NEXT: [[DIV:%.*]] = fdiv fast float [[TMP17]], [[ADD]] ; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float [[DIV]], [[ACCUM_017]] ; CHECK-NEXT: br label [[IF_END]] ; CHECK: if.end: @@ -88,7 +88,7 @@ define float @test(ptr nocapture readonly %pA, ptr nocapture readonly %pB, i32 % ; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp eq i32 [[DEC]], 0 ; CHECK-NEXT: br i1 [[CMP_NOT]], label [[WHILE_END]], label [[WHILE_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: while.end: -; CHECK-NEXT: [[ACCUM_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[ACCUM_1]], [[IF_END]] ], [ [[TMP14]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[ACCUM_0_LCSSA:%.*]] = phi float [ 0.000000e+00, [[ENTRY:%.*]] ], [ [[ACCUM_1]], [[IF_END]] ], [ [[TMP12]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret float [[ACCUM_0_LCSSA]] ; entry: diff --git a/llvm/test/Transforms/LoopVectorize/ARM/pointer_iv.ll b/llvm/test/Transforms/LoopVectorize/ARM/pointer_iv.ll index 2269b774d9f31..3432773b4e1b3 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/pointer_iv.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/pointer_iv.ll @@ -12,16 +12,16 @@ define hidden void @pointer_phi_v4i32_add1(ptr noalias nocapture readonly %A, pt ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[TMP0]] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[TMP1]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[OFFSET_IDX4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[NEXT_GEP]], align 4 -; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP4]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <4 x i32> [[TMP0]], ptr [[NEXT_GEP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP3]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP1]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -53,24 +53,24 @@ define hidden void @pointer_phi_v4i32_add2(ptr noalias nocapture readonly %A, pt ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 3 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP0]] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 3 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX4]] ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x i32>, ptr [[NEXT_GEP]], align 4 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x i32> [[WIDE_VEC]], <8 x i32> poison, <4 x i32> -; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP4]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = add nsw <4 x i32> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <4 x i32> [[TMP0]], ptr [[NEXT_GEP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996 -; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996 +; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: for.body: ; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 996, [[VECTOR_BODY]] ] ; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR_09]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[A_ADDR_09]], align 4 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 8 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[Y]] +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[Y]] ; CHECK-NEXT: store i32 [[ADD]], ptr [[B_ADDR_07]], align 4 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 4 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 @@ -109,22 +109,22 @@ define hidden void @pointer_phi_v4i32_add3(ptr noalias nocapture readonly %A, pt ; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[A]], [[ENTRY:%.*]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX]] ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP0]], i32 4, <4 x i1> , <4 x i32> poison) -; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <4 x i32> [[TMP1]], ptr [[NEXT_GEP]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 48 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996 -; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996 +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: for.body: ; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 996, [[VECTOR_BODY]] ] ; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[A_ADDR_09]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[A_ADDR_09]], align 4 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 12 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[Y]] +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP3]], [[Y]] ; CHECK-NEXT: store i32 [[ADD]], ptr [[B_ADDR_07]], align 4 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 4 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 @@ -160,16 +160,16 @@ define hidden void @pointer_phi_v8i16_add1(ptr noalias nocapture readonly %A, pt ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[TMP1]] -; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[TMP2]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[OFFSET_IDX4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i16>, ptr [[NEXT_GEP]], align 2 -; CHECK-NEXT: [[TMP3:%.*]] = add <8 x i16> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <8 x i16> [[TMP3]], ptr [[NEXT_GEP4]], align 2 +; CHECK-NEXT: [[TMP1:%.*]] = add <8 x i16> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <8 x i16> [[TMP1]], ptr [[NEXT_GEP5]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP4]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP2]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -203,17 +203,17 @@ define hidden void @pointer_phi_v8i16_add2(ptr noalias nocapture readonly %A, pt ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP1]] -; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP2]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX4]] ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x i16>, ptr [[NEXT_GEP]], align 2 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x i16> [[WIDE_VEC]], <16 x i16> poison, <8 x i32> -; CHECK-NEXT: [[TMP3:%.*]] = add <8 x i16> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <8 x i16> [[TMP3]], ptr [[NEXT_GEP4]], align 2 +; CHECK-NEXT: [[TMP1:%.*]] = add <8 x i16> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <8 x i16> [[TMP1]], ptr [[NEXT_GEP5]], align 2 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992 -; CHECK-NEXT: br i1 [[TMP4]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992 +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: for.body: ; CHECK-NEXT: [[A_ADDR_011:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[I_010:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 992, [[VECTOR_BODY]] ] @@ -352,23 +352,23 @@ define hidden void @pointer_phi_v16i8_add2(ptr noalias nocapture readonly %A, pt ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP1]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]] ; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[INDEX]] ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <32 x i8>, ptr [[NEXT_GEP]], align 1 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <32 x i8> [[WIDE_VEC]], <32 x i8> poison, <16 x i32> -; CHECK-NEXT: [[TMP2:%.*]] = add <16 x i8> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <16 x i8> [[TMP2]], ptr [[NEXT_GEP4]], align 1 +; CHECK-NEXT: [[TMP1:%.*]] = add <16 x i8> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <16 x i8> [[TMP1]], ptr [[NEXT_GEP4]], align 1 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992 -; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992 +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: for.body: ; CHECK-NEXT: [[A_ADDR_010:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[I_09:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 992, [[VECTOR_BODY]] ] ; CHECK-NEXT: [[B_ADDR_08:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[A_ADDR_010]], align 1 +; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[A_ADDR_010]], align 1 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_010]], i32 2 -; CHECK-NEXT: [[CONV1:%.*]] = add i8 [[TMP4]], [[TMP0]] +; CHECK-NEXT: [[CONV1:%.*]] = add i8 [[TMP3]], [[TMP0]] ; CHECK-NEXT: store i8 [[CONV1]], ptr [[B_ADDR_08]], align 1 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_08]], i32 1 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_09]], 1 @@ -445,16 +445,16 @@ define hidden void @pointer_phi_v4f32_add1(ptr noalias nocapture readonly %A, pt ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[TMP0]] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[TMP1]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[OFFSET_IDX4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x float>, ptr [[NEXT_GEP]], align 4 -; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[NEXT_GEP4]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = fadd fast <4 x float> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[NEXT_GEP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP3]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP1]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -486,24 +486,24 @@ define hidden void @pointer_phi_v4f32_add2(ptr noalias nocapture readonly %A, pt ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 3 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP0]] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 3 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX4]] ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <8 x float>, ptr [[NEXT_GEP]], align 4 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <8 x float> [[WIDE_VEC]], <8 x float> poison, <4 x i32> -; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[NEXT_GEP4]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = fadd fast <4 x float> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <4 x float> [[TMP0]], ptr [[NEXT_GEP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996 -; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996 +; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: for.body: ; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 996, [[VECTOR_BODY]] ] ; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[A_ADDR_09]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load float, ptr [[A_ADDR_09]], align 4 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 8 -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP4]], [[Y]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP2]], [[Y]] ; CHECK-NEXT: store float [[ADD]], ptr [[B_ADDR_07]], align 4 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 4 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 @@ -542,22 +542,22 @@ define hidden void @pointer_phi_v4f32_add3(ptr noalias nocapture readonly %A, pt ; CHECK-NEXT: [[POINTER_PHI:%.*]] = phi ptr [ [[A]], [[ENTRY:%.*]] ], [ [[PTR_IND:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX]] ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr> [[TMP0]], i32 4, <4 x i1> , <4 x float> poison) -; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <4 x float> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <4 x float> [[TMP2]], ptr [[NEXT_GEP]], align 4 +; CHECK-NEXT: [[TMP1:%.*]] = fadd fast <4 x float> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <4 x float> [[TMP1]], ptr [[NEXT_GEP]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 48 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996 -; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], 996 +; CHECK-NEXT: br i1 [[TMP2]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: for.body: ; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 996, [[VECTOR_BODY]] ] ; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = load float, ptr [[A_ADDR_09]], align 4 +; CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[A_ADDR_09]], align 4 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 12 -; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP4]], [[Y]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast float [[TMP3]], [[Y]] ; CHECK-NEXT: store float [[ADD]], ptr [[B_ADDR_07]], align 4 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 4 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 @@ -592,16 +592,16 @@ define hidden void @pointer_phi_v4half_add1(ptr noalias nocapture readonly %A, p ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[TMP0]] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[TMP1]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B:%.*]], i32 [[OFFSET_IDX4]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <8 x half>, ptr [[NEXT_GEP]], align 4 -; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <8 x half> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <8 x half> [[TMP2]], ptr [[NEXT_GEP4]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = fadd fast <8 x half> [[WIDE_LOAD]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <8 x half> [[TMP0]], ptr [[NEXT_GEP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP3]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP1]], label [[END:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: end: ; CHECK-NEXT: ret void ; @@ -633,24 +633,24 @@ define hidden void @pointer_phi_v4half_add2(ptr noalias nocapture readonly %A, p ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP0]] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX4]] ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <16 x half>, ptr [[NEXT_GEP]], align 4 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <16 x half> [[WIDE_VEC]], <16 x half> poison, <8 x i32> -; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <8 x half> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <8 x half> [[TMP2]], ptr [[NEXT_GEP4]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = fadd fast <8 x half> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <8 x half> [[TMP0]], ptr [[NEXT_GEP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992 -; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992 +; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: for.body: ; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 992, [[VECTOR_BODY]] ] ; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = load half, ptr [[A_ADDR_09]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load half, ptr [[A_ADDR_09]], align 4 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 4 -; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP4]], [[Y]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP2]], [[Y]] ; CHECK-NEXT: store half [[ADD]], ptr [[B_ADDR_07]], align 4 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 2 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 @@ -687,24 +687,24 @@ define hidden void @pointer_phi_v4half_add3(ptr noalias nocapture readonly %A, p ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP0:%.*]] = mul i32 [[INDEX]], 6 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[TMP0]] -; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[INDEX]], 1 -; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP1]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i32 [[INDEX]], 6 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[A]], i32 [[OFFSET_IDX]] +; CHECK-NEXT: [[OFFSET_IDX4:%.*]] = shl i32 [[INDEX]], 1 +; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX4]] ; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <24 x half>, ptr [[NEXT_GEP]], align 4 ; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <24 x half> [[WIDE_VEC]], <24 x half> poison, <8 x i32> -; CHECK-NEXT: [[TMP2:%.*]] = fadd fast <8 x half> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: store <8 x half> [[TMP2]], ptr [[NEXT_GEP4]], align 4 +; CHECK-NEXT: [[TMP0:%.*]] = fadd fast <8 x half> [[STRIDED_VEC]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: store <8 x half> [[TMP0]], ptr [[NEXT_GEP5]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992 -; CHECK-NEXT: br i1 [[TMP3]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] +; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[INDEX_NEXT]], 992 +; CHECK-NEXT: br i1 [[TMP1]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]] ; CHECK: for.body: ; CHECK-NEXT: [[A_ADDR_09:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[I_08:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 992, [[VECTOR_BODY]] ] ; CHECK-NEXT: [[B_ADDR_07:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP4:%.*]] = load half, ptr [[A_ADDR_09]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = load half, ptr [[A_ADDR_09]], align 4 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_09]], i32 6 -; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP4]], [[Y]] +; CHECK-NEXT: [[ADD:%.*]] = fadd fast half [[TMP2]], [[Y]] ; CHECK-NEXT: store half [[ADD]], ptr [[B_ADDR_07]], align 4 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_07]], i32 2 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_08]], 1 @@ -747,28 +747,28 @@ define hidden void @pointer_phi_v4i32_uf2(ptr noalias nocapture readonly %A, ptr ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> -; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP2]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX]] ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP0]], i32 4, <4 x i1> , <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_GATHER5:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP1]], i32 4, <4 x i1> , <4 x i32> poison) -; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER5]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 -; CHECK-NEXT: store <4 x i32> [[TMP3]], ptr [[NEXT_GEP]], align 4 -; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[TMP5]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP3:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER5]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 +; CHECK-NEXT: store <4 x i32> [[TMP2]], ptr [[NEXT_GEP]], align 4 +; CHECK-NEXT: store <4 x i32> [[TMP3]], ptr [[TMP4]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8 ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 192 -; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i32 [[INDEX_NEXT]], 9992 -; CHECK-NEXT: br i1 [[TMP6]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] +; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[INDEX_NEXT]], 9992 +; CHECK-NEXT: br i1 [[TMP5]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP24:![0-9]+]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; CHECK: for.body: ; CHECK-NEXT: [[A_ADDR_08:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 9992, [[VECTOR_BODY]] ] ; CHECK-NEXT: [[B_ADDR_06:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[A_ADDR_08]], align 4 +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[A_ADDR_08]], align 4 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_08]], i32 24 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP7]], [[Y]] +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[Y]] ; CHECK-NEXT: store i32 [[ADD]], ptr [[B_ADDR_06]], align 4 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_06]], i32 4 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1 @@ -814,36 +814,36 @@ define hidden void @pointer_phi_v4i32_uf4(ptr noalias nocapture readonly %A, ptr ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <4 x i32> -; CHECK-NEXT: [[TMP4:%.*]] = shl i32 [[INDEX]], 2 -; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[TMP4]] +; CHECK-NEXT: [[OFFSET_IDX:%.*]] = shl i32 [[INDEX]], 2 +; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[B]], i32 [[OFFSET_IDX]] ; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP0]], i32 4, <4 x i1> , <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_GATHER7:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP1]], i32 4, <4 x i1> , <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_GATHER8:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP2]], i32 4, <4 x i1> , <4 x i32> poison) ; CHECK-NEXT: [[WIDE_MASKED_GATHER9:%.*]] = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> [[TMP3]], i32 4, <4 x i1> , <4 x i32> poison) -; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER7]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER8]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP8:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER9]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 -; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 32 -; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 48 -; CHECK-NEXT: store <4 x i32> [[TMP5]], ptr [[NEXT_GEP]], align 4 +; CHECK-NEXT: [[TMP4:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP5:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER7]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER8]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP7:%.*]] = add nsw <4 x i32> [[WIDE_MASKED_GATHER9]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 16 +; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 32 +; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i32 48 +; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr [[NEXT_GEP]], align 4 +; CHECK-NEXT: store <4 x i32> [[TMP5]], ptr [[TMP8]], align 4 ; CHECK-NEXT: store <4 x i32> [[TMP6]], ptr [[TMP9]], align 4 ; CHECK-NEXT: store <4 x i32> [[TMP7]], ptr [[TMP10]], align 4 -; CHECK-NEXT: store <4 x i32> [[TMP8]], ptr [[TMP11]], align 4 ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 16 ; CHECK-NEXT: [[PTR_IND]] = getelementptr i8, ptr [[POINTER_PHI]], i32 384 -; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], 9984 -; CHECK-NEXT: br i1 [[TMP12]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] +; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i32 [[INDEX_NEXT]], 9984 +; CHECK-NEXT: br i1 [[TMP11]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP26:![0-9]+]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: ret void ; CHECK: for.body: ; CHECK-NEXT: [[A_ADDR_08:%.*]] = phi ptr [ [[ADD_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[I_07:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ 9984, [[VECTOR_BODY]] ] ; CHECK-NEXT: [[B_ADDR_06:%.*]] = phi ptr [ [[INCDEC_PTR:%.*]], [[FOR_BODY]] ], [ [[IND_END2]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[A_ADDR_08]], align 4 +; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[A_ADDR_08]], align 4 ; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_08]], i32 24 -; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[Y]] +; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[Y]] ; CHECK-NEXT: store i32 [[ADD]], ptr [[B_ADDR_06]], align 4 ; CHECK-NEXT: [[INCDEC_PTR]] = getelementptr inbounds i8, ptr [[B_ADDR_06]], i32 4 ; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_07]], 1 @@ -875,8 +875,8 @@ define hidden void @mult_ptr_iv(ptr noalias nocapture readonly %x, ptr noalias n ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[Z:%.*]], i32 3000 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[X:%.*]], i32 3000 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[Z]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[X]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[Z]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[X]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: diff --git a/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll b/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll index f58d864e1e147..7db5bccd896b2 100644 --- a/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll +++ b/llvm/test/Transforms/LoopVectorize/ARM/tail-fold-multiple-icmps.ll @@ -16,41 +16,41 @@ define arm_aapcs_vfpcc i32 @minmaxval4(ptr nocapture readonly %x, ptr nocapture ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[TMP3:%.*]], [[VECTOR_BODY]] ] -; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] +; CHECK-NEXT: [[VEC_PHI1:%.*]] = phi <4 x i32> [ , [[VECTOR_PH]] ], [ [[TMP1:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds i32, ptr [[X:%.*]], i32 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP0]], align 4 -; CHECK-NEXT: [[TMP2]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI1]]) -; CHECK-NEXT: [[TMP3]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI]]) +; CHECK-NEXT: [[TMP1]] = call <4 x i32> @llvm.smax.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI1]]) +; CHECK-NEXT: [[TMP2]] = call <4 x i32> @llvm.smin.v4i32(<4 x i32> [[WIDE_LOAD]], <4 x i32> [[VEC_PHI]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] -; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[TMP6:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP3]]) -; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP2]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[N]] +; CHECK-NEXT: [[TMP4:%.*]] = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> [[TMP2]]) +; CHECK-NEXT: [[TMP5:%.*]] = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> [[TMP1]]) +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER]] ] -; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP6]], [[MIDDLE_BLOCK]] ], [ 2147483647, [[FOR_BODY_PREHEADER]] ] +; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP4]], [[MIDDLE_BLOCK]] ], [ 2147483647, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: [[BC_MERGE_RDX2:%.*]] = phi i32 [ [[TMP5]], [[MIDDLE_BLOCK]] ], [ -2147483648, [[FOR_BODY_PREHEADER]] ] ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[MAX_0_LCSSA:%.*]] = phi i32 [ -2147483648, [[ENTRY:%.*]] ], [ [[TMP8:%.*]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] -; CHECK-NEXT: [[MIN_0_LCSSA:%.*]] = phi i32 [ 2147483647, [[ENTRY]] ], [ [[TMP9:%.*]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[MAX_0_LCSSA:%.*]] = phi i32 [ -2147483648, [[ENTRY:%.*]] ], [ [[COND:%.*]], [[FOR_BODY]] ], [ [[TMP5]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[MIN_0_LCSSA:%.*]] = phi i32 [ 2147483647, [[ENTRY]] ], [ [[COND9:%.*]], [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: store i32 [[MIN_0_LCSSA]], ptr [[MINP:%.*]], align 4 ; CHECK-NEXT: ret i32 [[MAX_0_LCSSA]] ; CHECK: for.body: ; CHECK-NEXT: [[I_029:%.*]] = phi i32 [ [[INC:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[MIN_028:%.*]] = phi i32 [ [[TMP9]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] -; CHECK-NEXT: [[MAX_027:%.*]] = phi i32 [ [[TMP8]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[MIN_028:%.*]] = phi i32 [ [[COND9]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX]], [[SCALAR_PH]] ] +; CHECK-NEXT: [[MAX_027:%.*]] = phi i32 [ [[COND]], [[FOR_BODY]] ], [ [[BC_MERGE_RDX2]], [[SCALAR_PH]] ] ; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[X]], i32 [[I_029]] -; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 -; CHECK-NEXT: [[TMP8]] = call i32 @llvm.smax.i32(i32 [[TMP7]], i32 [[MAX_027]]) -; CHECK-NEXT: [[TMP9]] = call i32 @llvm.smin.i32(i32 [[TMP7]], i32 [[MIN_028]]) +; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[ARRAYIDX]], align 4 +; CHECK-NEXT: [[COND]] = call i32 @llvm.smax.i32(i32 [[TMP6]], i32 [[MAX_027]]) +; CHECK-NEXT: [[COND9]] = call i32 @llvm.smin.i32(i32 [[TMP6]], i32 [[MIN_028]]) ; CHECK-NEXT: [[INC]] = add nuw i32 [[I_029]], 1 ; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[INC]], [[N]] -; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP2:![0-9]+]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; entry: %cmp26.not = icmp eq i32 %N, 0 diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll index 8783326b1ef1a..9f9db3ad85991 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-load-gather.ll @@ -15,8 +15,8 @@ define i32 @inv_load_conditional(ptr %a, i64 %n, ptr %b, i32 %k) { ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; CHECK: vector.main.loop.iter.check: diff --git a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll index 2b61a1cc3d78b..2fb4a68f4b586 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/invariant-store-vectorization.ll @@ -18,8 +18,8 @@ define i32 @inv_val_store_to_inv_address_with_reduction(ptr %a, i64 %n, ptr %b) ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; CHECK: vector.main.loop.iter.check: @@ -132,8 +132,8 @@ define void @inv_val_store_to_inv_address_conditional(ptr %a, i64 %n, ptr %b, i3 ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] ; CHECK: vector.main.loop.iter.check: @@ -245,15 +245,15 @@ define void @variant_val_store_to_inv_address_conditional(ptr %a, i64 %n, ptr %b ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 ; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] -; CHECK-NEXT: [[BOUND03:%.*]] = icmp ugt ptr [[SCEVGEP2]], [[B]] -; CHECK-NEXT: [[BOUND14:%.*]] = icmp ugt ptr [[SCEVGEP]], [[C]] +; CHECK-NEXT: [[BOUND03:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]] +; CHECK-NEXT: [[BOUND14:%.*]] = icmp ult ptr [[C]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT5:%.*]] = and i1 [[BOUND03]], [[BOUND14]] ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT5]] -; CHECK-NEXT: [[BOUND06:%.*]] = icmp ugt ptr [[SCEVGEP2]], [[A]] -; CHECK-NEXT: [[BOUND17:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[C]] +; CHECK-NEXT: [[BOUND06:%.*]] = icmp ult ptr [[A]], [[SCEVGEP2]] +; CHECK-NEXT: [[BOUND17:%.*]] = icmp ult ptr [[C]], [[SCEVGEP1]] ; CHECK-NEXT: [[FOUND_CONFLICT8:%.*]] = and i1 [[BOUND06]], [[BOUND17]] ; CHECK-NEXT: [[CONFLICT_RDX9:%.*]] = or i1 [[CONFLICT_RDX]], [[FOUND_CONFLICT8]] ; CHECK-NEXT: br i1 [[CONFLICT_RDX9]], label [[VEC_EPILOG_SCALAR_PH]], label [[VECTOR_MAIN_LOOP_ITER_CHECK:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll index 8800fa26f067c..6bd70cefcaf74 100644 --- a/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll +++ b/llvm/test/Transforms/LoopVectorize/X86/pr23997.ll @@ -48,7 +48,7 @@ define void @foo(ptr addrspace(1) align 8 dereferenceable_or_null(16), ptr addrs ; CHECK-NEXT: [[TMP13:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP13]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[TMP2]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[PREHEADER]] ], [ 0, [[VECTOR_MEMCHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll b/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll index fe6d9b3ec690e..47636b2c66d29 100644 --- a/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll +++ b/llvm/test/Transforms/LoopVectorize/extract-last-veclane.ll @@ -21,7 +21,7 @@ define void @inv_store_last_lane(ptr noalias nocapture %a, ptr noalias nocapture ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP1]], i64 3 -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -82,7 +82,7 @@ define float @ret_last_lane(ptr noalias nocapture %a, ptr noalias nocapture read ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x float> [[TMP1]], i64 3 -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/LoopVectorize/float-induction.ll b/llvm/test/Transforms/LoopVectorize/float-induction.ll index bd658c31768a8..bf1905bf33487 100644 --- a/llvm/test/Transforms/LoopVectorize/float-induction.ll +++ b/llvm/test/Transforms/LoopVectorize/float-induction.ll @@ -66,7 +66,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N) ; VEC4_INTERL1-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]] ; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC4_INTERL1-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VEC4_INTERL1: for.end.loopexit: ; VEC4_INTERL1-NEXT: br label [[FOR_END]] @@ -124,7 +124,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N) ; VEC4_INTERL2-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]] ; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC4_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VEC4_INTERL2: for.end.loopexit: ; VEC4_INTERL2-NEXT: br label [[FOR_END]] @@ -175,7 +175,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N) ; VEC1_INTERL2-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]] ; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC1_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VEC1_INTERL2: for.end.loopexit: ; VEC1_INTERL2-NEXT: br label [[FOR_END]] @@ -226,7 +226,7 @@ define void @fp_iv_loop1_fast_FMF(float %init, ptr noalias nocapture %A, i32 %N) ; VEC2_INTERL1_PRED_STORE-NEXT: [[ADD]] = fsub fast float [[X_05]], [[FPINC]] ; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; VEC2_INTERL1_PRED_STORE: for.end: ; VEC2_INTERL1_PRED_STORE-NEXT: ret void @@ -313,7 +313,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32 ; VEC4_INTERL1-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]] ; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC4_INTERL1-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VEC4_INTERL1: for.end.loopexit: ; VEC4_INTERL1-NEXT: br label [[FOR_END]] @@ -371,7 +371,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32 ; VEC4_INTERL2-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]] ; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC4_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VEC4_INTERL2: for.end.loopexit: ; VEC4_INTERL2-NEXT: br label [[FOR_END]] @@ -424,7 +424,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32 ; VEC1_INTERL2-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]] ; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC1_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VEC1_INTERL2: for.end.loopexit: ; VEC1_INTERL2-NEXT: br label [[FOR_END]] @@ -475,7 +475,7 @@ define void @fp_iv_loop1_reassoc_FMF(float %init, ptr noalias nocapture %A, i32 ; VEC2_INTERL1_PRED_STORE-NEXT: [[ADD]] = fsub reassoc float [[X_05]], [[FPINC]] ; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] ; VEC2_INTERL1_PRED_STORE: for.end: ; VEC2_INTERL1_PRED_STORE-NEXT: ret void @@ -528,7 +528,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 { ; VEC4_INTERL1-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483644 ; VEC4_INTERL1-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL1-NEXT: [[TMP1:%.*]] = fmul fast float [[DOTCAST]], 5.000000e-01 -; VEC4_INTERL1-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP1]], [[INIT:%.*]] +; VEC4_INTERL1-NEXT: [[IND_END:%.*]] = fadd fast float [[INIT:%.*]], [[TMP1]] ; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 ; VEC4_INTERL1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer ; VEC4_INTERL1-NEXT: [[INDUCTION:%.*]] = fadd fast <4 x float> [[DOTSPLAT]], @@ -557,7 +557,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 { ; VEC4_INTERL1-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01 ; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC4_INTERL1-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VEC4_INTERL1: for.end.loopexit: ; VEC4_INTERL1-NEXT: br label [[FOR_END]] @@ -576,7 +576,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 { ; VEC4_INTERL2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483640 ; VEC4_INTERL2-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL2-NEXT: [[TMP1:%.*]] = fmul fast float [[DOTCAST]], 5.000000e-01 -; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP1]], [[INIT:%.*]] +; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[INIT:%.*]], [[TMP1]] ; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 ; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer ; VEC4_INTERL2-NEXT: [[INDUCTION:%.*]] = fadd fast <4 x float> [[DOTSPLAT]], @@ -608,7 +608,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 { ; VEC4_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01 ; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC4_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VEC4_INTERL2: for.end.loopexit: ; VEC4_INTERL2-NEXT: br label [[FOR_END]] @@ -627,14 +627,14 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 { ; VEC1_INTERL2-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483646 ; VEC1_INTERL2-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC1_INTERL2-NEXT: [[TMP1:%.*]] = fmul fast float [[DOTCAST]], 5.000000e-01 -; VEC1_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP1]], [[INIT:%.*]] +; VEC1_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[INIT:%.*]], [[TMP1]] ; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]] ; VEC1_INTERL2: vector.body: ; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; VEC1_INTERL2-NEXT: [[TMP2:%.*]] = or disjoint i64 [[INDEX]], 1 ; VEC1_INTERL2-NEXT: [[DOTCAST2:%.*]] = sitofp i64 [[INDEX]] to float ; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[DOTCAST2]], 5.000000e-01 -; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fadd fast float [[TMP3]], [[INIT]] +; VEC1_INTERL2-NEXT: [[OFFSET_IDX:%.*]] = fadd fast float [[INIT]], [[TMP3]] ; VEC1_INTERL2-NEXT: [[TMP4:%.*]] = fadd fast float [[OFFSET_IDX]], 5.000000e-01 ; VEC1_INTERL2-NEXT: [[TMP5:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] ; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP2]] @@ -658,7 +658,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 { ; VEC1_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01 ; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC1_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VEC1_INTERL2: for.end.loopexit: ; VEC1_INTERL2-NEXT: br label [[FOR_END]] @@ -677,7 +677,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 { ; VEC2_INTERL1_PRED_STORE-NEXT: [[N_VEC:%.*]] = and i64 [[TMP0]], 2147483646 ; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTCAST:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP1:%.*]] = fmul fast float [[DOTCAST]], 5.000000e-01 -; VEC2_INTERL1_PRED_STORE-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP1]], [[INIT:%.*]] +; VEC2_INTERL1_PRED_STORE-NEXT: [[IND_END:%.*]] = fadd fast float [[INIT:%.*]], [[TMP1]] ; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[INIT]], i64 0 ; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x float> [[DOTSPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer ; VEC2_INTERL1_PRED_STORE-NEXT: [[INDUCTION:%.*]] = fadd fast <2 x float> [[DOTSPLAT]], @@ -702,7 +702,7 @@ define void @fp_iv_loop2(float %init, ptr noalias nocapture %A, i32 %N) #0 { ; VEC2_INTERL1_PRED_STORE-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01 ; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]] ; VEC2_INTERL1_PRED_STORE: for.end: ; VEC2_INTERL1_PRED_STORE-NEXT: ret void @@ -763,7 +763,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC4_INTERL1-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP2]], 0x3FB99999A0000000 ; VEC4_INTERL1-NEXT: [[DOTCAST2:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL1-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[DOTCAST2]] -; VEC4_INTERL1-NEXT: [[IND_END3:%.*]] = fadd fast float [[TMP3]], [[INIT:%.*]] +; VEC4_INTERL1-NEXT: [[IND_END3:%.*]] = fadd fast float [[INIT:%.*]], [[TMP3]] ; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 ; VEC4_INTERL1-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer ; VEC4_INTERL1-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0 @@ -817,7 +817,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC4_INTERL1-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4 ; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC4_INTERL1-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; VEC4_INTERL1: for.end.loopexit: ; VEC4_INTERL1-NEXT: br label [[FOR_END]] @@ -840,7 +840,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC4_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP2]], 0x3FB99999A0000000 ; VEC4_INTERL2-NEXT: [[DOTCAST2:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC4_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[DOTCAST2]] -; VEC4_INTERL2-NEXT: [[IND_END3:%.*]] = fadd fast float [[TMP3]], [[INIT:%.*]] +; VEC4_INTERL2-NEXT: [[IND_END3:%.*]] = fadd fast float [[INIT:%.*]], [[TMP3]] ; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[INIT]], i64 0 ; VEC4_INTERL2-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x float> [[DOTSPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer ; VEC4_INTERL2-NEXT: [[DOTSPLATINSERT6:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0 @@ -904,7 +904,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC4_INTERL2-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4 ; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC4_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; VEC4_INTERL2: for.end.loopexit: ; VEC4_INTERL2-NEXT: br label [[FOR_END]] @@ -927,7 +927,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC1_INTERL2-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP2]], 0x3FB99999A0000000 ; VEC1_INTERL2-NEXT: [[DOTCAST2:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC1_INTERL2-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[DOTCAST2]] -; VEC1_INTERL2-NEXT: [[IND_END3:%.*]] = fadd fast float [[TMP3]], [[INIT:%.*]] +; VEC1_INTERL2-NEXT: [[IND_END3:%.*]] = fadd fast float [[INIT:%.*]], [[TMP3]] ; VEC1_INTERL2-NEXT: br label [[VECTOR_BODY:%.*]] ; VEC1_INTERL2: vector.body: ; VEC1_INTERL2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] @@ -936,7 +936,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC1_INTERL2-NEXT: [[TMP5:%.*]] = fmul fast float [[DOTCAST5]], -5.000000e-01 ; VEC1_INTERL2-NEXT: [[DOTCAST6:%.*]] = sitofp i64 [[INDEX]] to float ; VEC1_INTERL2-NEXT: [[TMP6:%.*]] = fmul fast float [[TMP0]], [[DOTCAST6]] -; VEC1_INTERL2-NEXT: [[OFFSET_IDX7:%.*]] = fadd fast float [[TMP6]], [[INIT]] +; VEC1_INTERL2-NEXT: [[OFFSET_IDX7:%.*]] = fadd fast float [[INIT]], [[TMP6]] ; VEC1_INTERL2-NEXT: [[TMP7:%.*]] = fadd fast float [[OFFSET_IDX7]], [[TMP0]] ; VEC1_INTERL2-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, ptr [[A:%.*]], i64 [[INDEX]] ; VEC1_INTERL2-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[TMP4]] @@ -982,7 +982,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC1_INTERL2-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4 ; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC1_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; VEC1_INTERL2: for.end.loopexit: ; VEC1_INTERL2-NEXT: br label [[FOR_END]] @@ -1005,7 +1005,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC2_INTERL1_PRED_STORE-NEXT: [[IND_END:%.*]] = fadd fast float [[TMP2]], 0x3FB99999A0000000 ; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTCAST2:%.*]] = uitofp nneg i64 [[N_VEC]] to float ; VEC2_INTERL1_PRED_STORE-NEXT: [[TMP3:%.*]] = fmul fast float [[TMP0]], [[DOTCAST2]] -; VEC2_INTERL1_PRED_STORE-NEXT: [[IND_END3:%.*]] = fadd fast float [[TMP3]], [[INIT:%.*]] +; VEC2_INTERL1_PRED_STORE-NEXT: [[IND_END3:%.*]] = fadd fast float [[INIT:%.*]], [[TMP3]] ; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[INIT]], i64 0 ; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x float> [[DOTSPLATINSERT]], <2 x float> poison, <2 x i32> zeroinitializer ; VEC2_INTERL1_PRED_STORE-NEXT: [[DOTSPLATINSERT5:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i64 0 @@ -1054,7 +1054,7 @@ define void @fp_iv_loop3(float %init, ptr noalias nocapture %A, ptr noalias noca ; VEC2_INTERL1_PRED_STORE-NEXT: store float [[CONV1]], ptr [[ARRAYIDX6]], align 4 ; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; VEC2_INTERL1_PRED_STORE: for.end: ; VEC2_INTERL1_PRED_STORE-NEXT: ret void @@ -1141,7 +1141,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) { ; VEC4_INTERL1-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01 ; VEC4_INTERL1-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC4_INTERL1-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC4_INTERL1-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC4_INTERL1-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; VEC4_INTERL1: for.end.loopexit: ; VEC4_INTERL1-NEXT: br label [[FOR_END]] @@ -1189,7 +1189,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) { ; VEC4_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01 ; VEC4_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC4_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC4_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC4_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; VEC4_INTERL2: for.end.loopexit: ; VEC4_INTERL2-NEXT: br label [[FOR_END]] @@ -1239,7 +1239,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) { ; VEC1_INTERL2-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01 ; VEC1_INTERL2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC1_INTERL2-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC1_INTERL2-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC1_INTERL2-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; VEC1_INTERL2: for.end.loopexit: ; VEC1_INTERL2-NEXT: br label [[FOR_END]] @@ -1280,7 +1280,7 @@ define void @fp_iv_loop4(ptr noalias nocapture %A, i32 %N) { ; VEC2_INTERL1_PRED_STORE-NEXT: [[CONV1]] = fadd fast float [[X_06]], 5.000000e-01 ; VEC2_INTERL1_PRED_STORE-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; VEC2_INTERL1_PRED_STORE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; VEC2_INTERL1_PRED_STORE-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; VEC2_INTERL1_PRED_STORE-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; VEC2_INTERL1_PRED_STORE: for.end: ; VEC2_INTERL1_PRED_STORE-NEXT: ret void diff --git a/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll b/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll index 1d7ead0a8e49b..d19ca172a8c0a 100644 --- a/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll +++ b/llvm/test/Transforms/LoopVectorize/if-conversion-nest.ll @@ -19,8 +19,8 @@ define i32 @foo(ptr nocapture %A, ptr nocapture %B, i32 %n) { ; CHECK-NEXT: [[TMP4:%.*]] = add nuw nsw i64 [[TMP3]], 4 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP4]] ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP4]] -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -71,7 +71,7 @@ define i32 @foo(ptr nocapture %A, ptr nocapture %B, i32 %n) { ; CHECK-NEXT: store i32 [[X_0]], ptr [[ARRAYIDX]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: br label [[FOR_END]] diff --git a/llvm/test/Transforms/LoopVectorize/induction.ll b/llvm/test/Transforms/LoopVectorize/induction.ll index 45674acaae538..08d05a1e2db69 100644 --- a/llvm/test/Transforms/LoopVectorize/induction.ll +++ b/llvm/test/Transforms/LoopVectorize/induction.ll @@ -90,7 +90,7 @@ define void @multi_int_induction(ptr %A, i32 %N) { ; IND-NEXT: [[INC]] = add nsw i32 [[COUNT_09]], 1 ; IND-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; IND-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; IND-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; IND-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; IND-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; IND: for.end: ; IND-NEXT: ret void @@ -134,7 +134,7 @@ define void @multi_int_induction(ptr %A, i32 %N) { ; UNROLL-NEXT: [[INC]] = add nsw i32 [[COUNT_09]], 1 ; UNROLL-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; UNROLL-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; UNROLL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; UNROLL-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; UNROLL-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; UNROLL: for.end: ; UNROLL-NEXT: ret void @@ -227,7 +227,7 @@ define void @multi_int_induction(ptr %A, i32 %N) { ; INTERLEAVE-NEXT: [[INC]] = add nsw i32 [[COUNT_09]], 1 ; INTERLEAVE-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; INTERLEAVE-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; INTERLEAVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; INTERLEAVE-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; INTERLEAVE-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] ; INTERLEAVE: for.end: ; INTERLEAVE-NEXT: ret void @@ -361,7 +361,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) { ; IND-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; IND-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; IND: middle.block: -; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; IND-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] ; IND: scalar.ph: ; IND-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] @@ -374,7 +374,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) { ; IND-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]] ; IND-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP13]], i64 [[OFFSET2]] ; IND-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4 -; IND-NEXT: [[M:%.*]] = fmul fast float [[L2]], [[B]] +; IND-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]] ; IND-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]] ; IND-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4 ; IND-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -428,7 +428,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) { ; UNROLL-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; UNROLL-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; UNROLL-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] ; UNROLL: scalar.ph: ; UNROLL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] @@ -441,7 +441,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) { ; UNROLL-NEXT: [[TMP17:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]] ; UNROLL-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP17]], i64 [[OFFSET2]] ; UNROLL-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4 -; UNROLL-NEXT: [[M:%.*]] = fmul fast float [[L2]], [[B]] +; UNROLL-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]] ; UNROLL-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]] ; UNROLL-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4 ; UNROLL-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -571,7 +571,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) { ; INTERLEAVE-NEXT: [[TMP15:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; INTERLEAVE-NEXT: br i1 [[TMP15]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP9:![0-9]+]] ; INTERLEAVE: middle.block: -; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; INTERLEAVE-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] ; INTERLEAVE: scalar.ph: ; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] @@ -584,7 +584,7 @@ define void @scalar_use(ptr %a, float %b, i64 %offset, i64 %offset2, i64 %n) { ; INTERLEAVE-NEXT: [[TMP17:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]] ; INTERLEAVE-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP17]], i64 [[OFFSET2]] ; INTERLEAVE-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4 -; INTERLEAVE-NEXT: [[M:%.*]] = fmul fast float [[L2]], [[B]] +; INTERLEAVE-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]] ; INTERLEAVE-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]] ; INTERLEAVE-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4 ; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -1636,7 +1636,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) { ; IND-NEXT: [[TMP8:%.*]] = or disjoint i64 [[TMP7]], 4 ; IND-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP8]] ; IND-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]] -; IND-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]] +; IND-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] ; IND-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; IND-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; IND: vector.ph: @@ -1676,7 +1676,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) { ; IND-NEXT: store i32 [[TMP21]], ptr [[TMP22]], align 1 ; IND-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 ; IND-NEXT: [[TMP23:%.*]] = trunc i64 [[I_NEXT]] to i32 -; IND-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP23]], [[N]] +; IND-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP23]] ; IND-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; IND: for.end: ; IND-NEXT: ret void @@ -1699,7 +1699,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) { ; UNROLL-NEXT: [[TMP8:%.*]] = or disjoint i64 [[TMP7]], 4 ; UNROLL-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP8]] ; UNROLL-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]] -; UNROLL-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]] +; UNROLL-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] ; UNROLL-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; UNROLL-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; UNROLL: vector.ph: @@ -1753,7 +1753,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) { ; UNROLL-NEXT: store i32 [[TMP32]], ptr [[TMP33]], align 1 ; UNROLL-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 ; UNROLL-NEXT: [[TMP34:%.*]] = trunc i64 [[I_NEXT]] to i32 -; UNROLL-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP34]], [[N]] +; UNROLL-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP34]] ; UNROLL-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; UNROLL: for.end: ; UNROLL-NEXT: ret void @@ -1855,7 +1855,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) { ; INTERLEAVE-NEXT: [[TMP8:%.*]] = or disjoint i64 [[TMP7]], 4 ; INTERLEAVE-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP8]] ; INTERLEAVE-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[SCEVGEP]], [[SCEVGEP2]] -; INTERLEAVE-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]] +; INTERLEAVE-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] ; INTERLEAVE-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; INTERLEAVE-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; INTERLEAVE: vector.ph: @@ -1920,7 +1920,7 @@ define void @scalarize_induction_variable_04(ptr %a, ptr %p, i32 %n) { ; INTERLEAVE-NEXT: store i32 [[TMP38]], ptr [[TMP39]], align 1 ; INTERLEAVE-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 ; INTERLEAVE-NEXT: [[TMP40:%.*]] = trunc i64 [[I_NEXT]] to i32 -; INTERLEAVE-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP40]], [[N]] +; INTERLEAVE-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP40]] ; INTERLEAVE-NEXT: br i1 [[COND]], label [[FOR_END:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP23:![0-9]+]] ; INTERLEAVE: for.end: ; INTERLEAVE-NEXT: ret void @@ -2535,13 +2535,13 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) { ; IND: for.body: ; IND-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; IND-NEXT: [[TMP11:%.*]] = trunc i64 [[I]] to i32 -; IND-NEXT: [[TMP12:%.*]] = add i32 [[TMP11]], [[A]] +; IND-NEXT: [[TMP12:%.*]] = add i32 [[A]], [[TMP11]] ; IND-NEXT: [[TMP13:%.*]] = trunc i32 [[TMP12]] to i16 ; IND-NEXT: [[TMP14:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[I]], i32 1 ; IND-NEXT: store i16 [[TMP13]], ptr [[TMP14]], align 2 ; IND-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 ; IND-NEXT: [[TMP15:%.*]] = trunc i64 [[I_NEXT]] to i32 -; IND-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP15]], [[N]] +; IND-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP15]] ; IND-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; IND: for.end: ; IND-NEXT: ret void @@ -2594,13 +2594,13 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) { ; UNROLL: for.body: ; UNROLL-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; UNROLL-NEXT: [[TMP19:%.*]] = trunc i64 [[I]] to i32 -; UNROLL-NEXT: [[TMP20:%.*]] = add i32 [[TMP19]], [[A]] +; UNROLL-NEXT: [[TMP20:%.*]] = add i32 [[A]], [[TMP19]] ; UNROLL-NEXT: [[TMP21:%.*]] = trunc i32 [[TMP20]] to i16 ; UNROLL-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[I]], i32 1 ; UNROLL-NEXT: store i16 [[TMP21]], ptr [[TMP22]], align 2 ; UNROLL-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 ; UNROLL-NEXT: [[TMP23:%.*]] = trunc i64 [[I_NEXT]] to i32 -; UNROLL-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP23]], [[N]] +; UNROLL-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP23]] ; UNROLL-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; UNROLL: for.end: ; UNROLL-NEXT: ret void @@ -2730,13 +2730,13 @@ define void @iv_vector_and_scalar_users(ptr %p, i32 %a, i32 %n) { ; INTERLEAVE: for.body: ; INTERLEAVE-NEXT: [[I:%.*]] = phi i64 [ [[I_NEXT:%.*]], [[FOR_BODY]] ], [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ] ; INTERLEAVE-NEXT: [[TMP31:%.*]] = trunc i64 [[I]] to i32 -; INTERLEAVE-NEXT: [[TMP32:%.*]] = add i32 [[TMP31]], [[A]] +; INTERLEAVE-NEXT: [[TMP32:%.*]] = add i32 [[A]], [[TMP31]] ; INTERLEAVE-NEXT: [[TMP33:%.*]] = trunc i32 [[TMP32]] to i16 ; INTERLEAVE-NEXT: [[TMP34:%.*]] = getelementptr inbounds [[PAIR_I16]], ptr [[P]], i64 [[I]], i32 1 ; INTERLEAVE-NEXT: store i16 [[TMP33]], ptr [[TMP34]], align 2 ; INTERLEAVE-NEXT: [[I_NEXT]] = add nuw nsw i64 [[I]], 1 ; INTERLEAVE-NEXT: [[TMP35:%.*]] = trunc i64 [[I_NEXT]] to i32 -; INTERLEAVE-NEXT: [[COND:%.*]] = icmp eq i32 [[TMP35]], [[N]] +; INTERLEAVE-NEXT: [[COND:%.*]] = icmp eq i32 [[N]], [[TMP35]] ; INTERLEAVE-NEXT: br i1 [[COND]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP27:![0-9]+]] ; INTERLEAVE: for.end: ; INTERLEAVE-NEXT: ret void @@ -3516,7 +3516,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) { ; IND-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1 ; IND-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]] ; IND-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8 -; IND-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]] +; IND-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]] ; IND-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]] ; IND-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255 ; IND-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] @@ -3525,7 +3525,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) { ; IND: vector.ph: ; IND-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 510 ; IND-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8 -; IND-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]] +; IND-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]] ; IND-NEXT: [[IND_END2:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]] ; IND-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[EXT]], i64 0 ; IND-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer @@ -3535,7 +3535,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) { ; IND-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; IND-NEXT: [[DOTCAST4:%.*]] = trunc i32 [[INDEX]] to i8 -; IND-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST4]], [[T]] +; IND-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST4]] ; IND-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64 ; IND-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]] ; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP11]], align 4 @@ -3582,7 +3582,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) { ; UNROLL-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1 ; UNROLL-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]] ; UNROLL-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8 -; UNROLL-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]] +; UNROLL-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]] ; UNROLL-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]] ; UNROLL-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255 ; UNROLL-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] @@ -3591,7 +3591,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) { ; UNROLL: vector.ph: ; UNROLL-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 508 ; UNROLL-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8 -; UNROLL-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]] +; UNROLL-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]] ; UNROLL-NEXT: [[IND_END2:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]] ; UNROLL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[EXT]], i64 0 ; UNROLL-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer @@ -3602,7 +3602,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) { ; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], ; UNROLL-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[INDEX]] to i8 -; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST5]], [[T]] +; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST5]] ; UNROLL-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64 ; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]] ; UNROLL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 8 @@ -3726,7 +3726,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) { ; INTERLEAVE-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1 ; INTERLEAVE-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]] ; INTERLEAVE-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8 -; INTERLEAVE-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]] +; INTERLEAVE-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]] ; INTERLEAVE-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]] ; INTERLEAVE-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255 ; INTERLEAVE-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] @@ -3735,7 +3735,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) { ; INTERLEAVE: vector.ph: ; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 504 ; INTERLEAVE-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8 -; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]] +; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]] ; INTERLEAVE-NEXT: [[IND_END2:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]] ; INTERLEAVE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[EXT]], i64 0 ; INTERLEAVE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer @@ -3746,7 +3746,7 @@ define void @wrappingindvars1(i8 %t, i32 %len, ptr %A) { ; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], ; INTERLEAVE-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[INDEX]] to i8 -; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST5]], [[T]] +; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST5]] ; INTERLEAVE-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64 ; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]] ; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 16 @@ -3900,7 +3900,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) { ; IND-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1 ; IND-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]] ; IND-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8 -; IND-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]] +; IND-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]] ; IND-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]] ; IND-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255 ; IND-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] @@ -3909,7 +3909,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) { ; IND: vector.ph: ; IND-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 510 ; IND-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8 -; IND-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]] +; IND-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]] ; IND-NEXT: [[EXT_MUL5:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]] ; IND-NEXT: [[IND_END1:%.*]] = shl nuw nsw i32 [[EXT_MUL5]], 2 ; IND-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[EXT_MUL]], i64 0 @@ -3920,7 +3920,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) { ; IND-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; IND-NEXT: [[DOTCAST4:%.*]] = trunc i32 [[INDEX]] to i8 -; IND-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST4]], [[T]] +; IND-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST4]] ; IND-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64 ; IND-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]] ; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP11]], align 4 @@ -3969,7 +3969,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) { ; UNROLL-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1 ; UNROLL-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]] ; UNROLL-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8 -; UNROLL-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]] +; UNROLL-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]] ; UNROLL-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]] ; UNROLL-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255 ; UNROLL-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] @@ -3978,7 +3978,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) { ; UNROLL: vector.ph: ; UNROLL-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 508 ; UNROLL-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8 -; UNROLL-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]] +; UNROLL-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]] ; UNROLL-NEXT: [[EXT_MUL6:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]] ; UNROLL-NEXT: [[IND_END1:%.*]] = shl nuw nsw i32 [[EXT_MUL6]], 2 ; UNROLL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[EXT_MUL]], i64 0 @@ -3990,7 +3990,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) { ; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], ; UNROLL-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[INDEX]] to i8 -; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST5]], [[T]] +; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST5]] ; UNROLL-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64 ; UNROLL-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]] ; UNROLL-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 8 @@ -4119,7 +4119,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) { ; INTERLEAVE-NEXT: [[TMP2:%.*]] = xor i8 [[T]], -1 ; INTERLEAVE-NEXT: [[TMP3:%.*]] = icmp ult i8 [[TMP2]], [[TMP1]] ; INTERLEAVE-NEXT: [[TMP4:%.*]] = trunc i32 [[LEN]] to i8 -; INTERLEAVE-NEXT: [[TMP5:%.*]] = add i8 [[TMP4]], [[T]] +; INTERLEAVE-NEXT: [[TMP5:%.*]] = add i8 [[T]], [[TMP4]] ; INTERLEAVE-NEXT: [[TMP6:%.*]] = icmp slt i8 [[TMP5]], [[T]] ; INTERLEAVE-NEXT: [[TMP7:%.*]] = icmp ugt i32 [[LEN]], 255 ; INTERLEAVE-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] @@ -4128,7 +4128,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) { ; INTERLEAVE: vector.ph: ; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], 504 ; INTERLEAVE-NEXT: [[DOTCAST:%.*]] = trunc i32 [[N_VEC]] to i8 -; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i8 [[DOTCAST]], [[T]] +; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i8 [[T]], [[DOTCAST]] ; INTERLEAVE-NEXT: [[EXT_MUL6:%.*]] = add nuw nsw i32 [[N_VEC]], [[EXT]] ; INTERLEAVE-NEXT: [[IND_END1:%.*]] = shl nuw nsw i32 [[EXT_MUL6]], 2 ; INTERLEAVE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[EXT_MUL]], i64 0 @@ -4140,7 +4140,7 @@ define void @wrappingindvars2(i8 %t, i32 %len, ptr %A) { ; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], ; INTERLEAVE-NEXT: [[DOTCAST5:%.*]] = trunc i32 [[INDEX]] to i8 -; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[DOTCAST5]], [[T]] +; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i8 [[T]], [[DOTCAST5]] ; INTERLEAVE-NEXT: [[TMP10:%.*]] = sext i8 [[OFFSET_IDX]] to i64 ; INTERLEAVE-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP10]] ; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i8, ptr [[TMP11]], i64 16 @@ -4262,7 +4262,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) { ; IND-NEXT: [[TMP2:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; IND-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]] ; IND: middle.block: -; IND-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[K]] +; IND-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[K]], [[N_VEC]] ; IND-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; IND: scalar.ph: ; IND-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ] @@ -4299,7 +4299,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) { ; UNROLL-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; UNROLL-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[K]] +; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[K]], [[N_VEC]] ; UNROLL-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; UNROLL: scalar.ph: ; UNROLL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ] @@ -4376,7 +4376,7 @@ define void @veciv(ptr nocapture %a, i32 %start, i32 %k) { ; INTERLEAVE-NEXT: [[TMP3:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; INTERLEAVE-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]] ; INTERLEAVE: middle.block: -; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[K]] +; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[K]], [[N_VEC]] ; INTERLEAVE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; INTERLEAVE: scalar.ph: ; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ] @@ -4474,7 +4474,7 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) { ; IND-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; IND-NEXT: br i1 [[TMP2]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] ; IND: middle.block: -; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[K]] +; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]] ; IND-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; IND: scalar.ph: ; IND-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] @@ -4517,7 +4517,7 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) { ; UNROLL-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; UNROLL-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] ; UNROLL: middle.block: -; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[K]] +; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]] ; UNROLL-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; UNROLL: scalar.ph: ; UNROLL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] @@ -4609,7 +4609,7 @@ define void @trunciv(ptr nocapture %a, i32 %start, i64 %k) { ; INTERLEAVE-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; INTERLEAVE-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] ; INTERLEAVE: middle.block: -; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[K]] +; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[K]], [[N_VEC]] ; INTERLEAVE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; INTERLEAVE: scalar.ph: ; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[FOR_BODY_PREHEADER:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] @@ -4694,7 +4694,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) { ; IND-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; IND: vector.ph: ; IND-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], -2 -; IND-NEXT: [[IND_END:%.*]] = add i32 [[N_VEC]], [[I]] +; IND-NEXT: [[IND_END:%.*]] = add i32 [[I]], [[N_VEC]] ; IND-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[I]], i64 0 ; IND-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; IND-NEXT: [[INDUCTION:%.*]] = add <2 x i32> [[DOTSPLAT]], @@ -4702,7 +4702,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) { ; IND: vector.body: ; IND-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; IND-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] -; IND-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[INDEX]], [[I]] +; IND-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[I]], [[INDEX]] ; IND-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET_IDX]] to i64 ; IND-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP1]] ; IND-NEXT: store <2 x i32> [[VEC_IND]], ptr [[TMP2]], align 4 @@ -4734,7 +4734,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) { ; UNROLL-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; UNROLL: vector.ph: ; UNROLL-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], -4 -; UNROLL-NEXT: [[IND_END:%.*]] = add i32 [[N_VEC]], [[I]] +; UNROLL-NEXT: [[IND_END:%.*]] = add i32 [[I]], [[N_VEC]] ; UNROLL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[I]], i64 0 ; UNROLL-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; UNROLL-NEXT: [[INDUCTION:%.*]] = add <2 x i32> [[DOTSPLAT]], @@ -4743,7 +4743,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) { ; UNROLL-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; UNROLL-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], -; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[INDEX]], [[I]] +; UNROLL-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[I]], [[INDEX]] ; UNROLL-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET_IDX]] to i64 ; UNROLL-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP1]] ; UNROLL-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 8 @@ -4823,7 +4823,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) { ; INTERLEAVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; INTERLEAVE: vector.ph: ; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i32 [[TMP0]], -8 -; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i32 [[N_VEC]], [[I]] +; INTERLEAVE-NEXT: [[IND_END:%.*]] = add i32 [[I]], [[N_VEC]] ; INTERLEAVE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[I]], i64 0 ; INTERLEAVE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; INTERLEAVE-NEXT: [[INDUCTION:%.*]] = add <4 x i32> [[DOTSPLAT]], @@ -4832,7 +4832,7 @@ define void @nonprimary(ptr nocapture %a, i32 %start, i32 %i, i32 %k) { ; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <4 x i32> [ [[INDUCTION]], [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ] ; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <4 x i32> [[VEC_IND]], -; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[INDEX]], [[I]] +; INTERLEAVE-NEXT: [[OFFSET_IDX:%.*]] = add i32 [[I]], [[INDEX]] ; INTERLEAVE-NEXT: [[TMP1:%.*]] = sext i32 [[OFFSET_IDX]] to i64 ; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[A:%.*]], i64 [[TMP1]] ; INTERLEAVE-NEXT: [[TMP3:%.*]] = getelementptr inbounds i8, ptr [[TMP2]], i64 16 @@ -6307,7 +6307,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; IND: vector.ph: ; IND-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -2 ; IND-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 -; IND-NEXT: [[IND_END:%.*]] = mul i32 [[DOTCAST]], [[STEP]] +; IND-NEXT: [[IND_END:%.*]] = mul i32 [[STEP]], [[DOTCAST]] ; IND-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[STEP]], i64 0 ; IND-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; IND-NEXT: [[TMP15:%.*]] = mul nuw <2 x i32> [[DOTSPLAT]], @@ -6328,7 +6328,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; IND-NEXT: br i1 [[TMP19]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP54:![0-9]+]] ; IND: middle.block: ; IND-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <2 x i32> [[VEC_IND]], i64 1 -; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; IND-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; IND-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; IND: scalar.ph: ; IND-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] @@ -6378,7 +6378,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; UNROLL: vector.ph: ; UNROLL-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -4 ; UNROLL-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 -; UNROLL-NEXT: [[IND_END:%.*]] = mul i32 [[DOTCAST]], [[STEP]] +; UNROLL-NEXT: [[IND_END:%.*]] = mul i32 [[STEP]], [[DOTCAST]] ; UNROLL-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <2 x i32> poison, i32 [[STEP]], i64 0 ; UNROLL-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i32> [[DOTSPLATINSERT]], <2 x i32> poison, <2 x i32> zeroinitializer ; UNROLL-NEXT: [[TMP15:%.*]] = mul nuw <2 x i32> [[DOTSPLAT]], @@ -6403,7 +6403,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; UNROLL-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP54:![0-9]+]] ; UNROLL: middle.block: ; UNROLL-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <2 x i32> [[STEP_ADD]], i64 1 -; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; UNROLL-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; UNROLL-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; UNROLL: scalar.ph: ; UNROLL-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] @@ -6536,7 +6536,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; INTERLEAVE: vector.ph: ; INTERLEAVE-NEXT: [[N_VEC:%.*]] = and i64 [[N]], -8 ; INTERLEAVE-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32 -; INTERLEAVE-NEXT: [[IND_END:%.*]] = mul i32 [[DOTCAST]], [[STEP]] +; INTERLEAVE-NEXT: [[IND_END:%.*]] = mul i32 [[STEP]], [[DOTCAST]] ; INTERLEAVE-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[STEP]], i64 0 ; INTERLEAVE-NEXT: [[DOTSPLAT:%.*]] = shufflevector <4 x i32> [[DOTSPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer ; INTERLEAVE-NEXT: [[TMP15:%.*]] = mul <4 x i32> [[DOTSPLAT]], @@ -6561,7 +6561,7 @@ define void @test_optimized_cast_induction_feeding_first_order_recurrence(i64 %n ; INTERLEAVE-NEXT: br i1 [[TMP21]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP54:![0-9]+]] ; INTERLEAVE: middle.block: ; INTERLEAVE-NEXT: [[VECTOR_RECUR_EXTRACT:%.*]] = extractelement <4 x i32> [[STEP_ADD]], i64 3 -; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; INTERLEAVE-NEXT: br i1 [[CMP_N]], label [[EXIT:%.*]], label [[SCALAR_PH]] ; INTERLEAVE: scalar.ph: ; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll index 6fc52ab3f26e0..29ce8457e8d65 100644 --- a/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll +++ b/llvm/test/Transforms/LoopVectorize/interleaved-accesses.ll @@ -1481,7 +1481,7 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A]], i64 2 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP3]] ; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[TMP5]], i64 6 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP2]], [[B]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]] ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP1]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll index 50c67040cfb2a..45de11141235e 100644 --- a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll +++ b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization-2.ll @@ -28,8 +28,8 @@ define void @inv_val_store_to_inv_address_conditional_diff_values_ic(ptr %a, i64 ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -120,7 +120,7 @@ define void @inv_val_store_to_inv_address_conditional_inv(ptr %a, i64 %n, ptr %b ; CHECK-LABEL: @inv_val_store_to_inv_address_conditional_inv( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[NTRUNC:%.*]] = trunc i64 [[N:%.*]] to i32 -; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[NTRUNC]], [[K:%.*]] +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[K:%.*]], [[NTRUNC]] ; CHECK-NEXT: [[SMAX2:%.*]] = call i64 @llvm.smax.i64(i64 [[N]], i64 1) ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp slt i64 [[N]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_MEMCHECK:%.*]] @@ -128,8 +128,8 @@ define void @inv_val_store_to_inv_address_conditional_inv(ptr %a, i64 %n, ptr %b ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -217,8 +217,8 @@ define i32 @variant_val_store_to_inv_address(ptr %a, i64 %n, ptr %b, i32 %k) { ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: diff --git a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll index 20d612a548b15..63381454cc590 100644 --- a/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll +++ b/llvm/test/Transforms/LoopVectorize/invariant-store-vectorization.ll @@ -27,8 +27,8 @@ define i32 @inv_val_store_to_inv_address_with_reduction(ptr %a, i64 %n, ptr %b) ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -101,8 +101,8 @@ define void @inv_val_store_to_inv_address(ptr %a, i64 %n, ptr %b) { ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]] -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[A]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[B]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[A]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -176,8 +176,8 @@ define void @inv_val_store_to_inv_address_conditional(ptr %a, i64 %n, ptr %b, i3 ; CHECK-NEXT: [[TMP0:%.*]] = shl i64 [[N]], 2 ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP0]] ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 4 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP1]], [[B]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP1]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -360,7 +360,7 @@ define i32 @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly ; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[J_022]] to i64 ; CHECK-NEXT: [[ARRAYIDX5_PROMOTED:%.*]] = load i32, ptr [[ARRAYIDX5]], align 4 ; CHECK-NEXT: [[TMP5:%.*]] = xor i32 [[J_022]], -1 -; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], [[ITR]] +; CHECK-NEXT: [[TMP6:%.*]] = add i32 [[ITR]], [[TMP5]] ; CHECK-NEXT: [[TMP7:%.*]] = zext i32 [[TMP6]] to i64 ; CHECK-NEXT: [[TMP8:%.*]] = add nuw nsw i64 [[TMP7]], 1 ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[TMP6]], 3 @@ -369,12 +369,12 @@ define i32 @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly ; CHECK-NEXT: [[TMP9:%.*]] = shl nuw nsw i64 [[TMP4]], 2 ; CHECK-NEXT: [[SCEVGEP1:%.*]] = getelementptr i8, ptr [[VAR2]], i64 [[TMP9]] ; CHECK-NEXT: [[TMP10:%.*]] = xor i32 [[J_022]], -1 -; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[TMP10]], [[ITR]] +; CHECK-NEXT: [[TMP11:%.*]] = add i32 [[ITR]], [[TMP10]] ; CHECK-NEXT: [[TMP12:%.*]] = zext i32 [[TMP11]] to i64 ; CHECK-NEXT: [[TMP13:%.*]] = add nuw nsw i64 [[TMP4]], [[TMP12]] ; CHECK-NEXT: [[TMP14:%.*]] = shl nuw nsw i64 [[TMP13]], 2 ; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[SCEVGEP2]], i64 [[TMP14]] -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP3]], [[VAR1]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[VAR1]], [[SCEVGEP3]] ; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[SCEVGEP1]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]] @@ -414,7 +414,7 @@ define i32 @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly ; CHECK-NEXT: store i32 [[TMP22]], ptr [[ARRAYIDX5]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[ITR]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[ITR]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_INC8_LOOPEXIT_LOOPEXIT:%.*]], label [[FOR_BODY3]], !llvm.loop [[LOOP27:![0-9]+]] ; CHECK: for.inc8.loopexit.loopexit: ; CHECK-NEXT: br label [[FOR_INC8_LOOPEXIT]] @@ -424,7 +424,7 @@ define i32 @multiple_uniform_stores(ptr nocapture %var1, ptr nocapture readonly ; CHECK-NEXT: [[J_1_LCSSA]] = phi i32 [ [[J_022]], [[FOR_COND1_PREHEADER]] ], [ [[ITR]], [[FOR_INC8_LOOPEXIT]] ] ; CHECK-NEXT: [[INDVARS_IV_NEXT24]] = add nuw nsw i64 [[INDVARS_IV23]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV25:%.*]] = trunc i64 [[INDVARS_IV_NEXT24]] to i32 -; CHECK-NEXT: [[EXITCOND26:%.*]] = icmp eq i32 [[LFTR_WIDEIV25]], [[ITR]] +; CHECK-NEXT: [[EXITCOND26:%.*]] = icmp eq i32 [[ITR]], [[LFTR_WIDEIV25]] ; CHECK-NEXT: br i1 [[EXITCOND26]], label [[FOR_END10_LOOPEXIT:%.*]], label [[FOR_COND1_PREHEADER]] ; CHECK: for.end10.loopexit: ; CHECK-NEXT: br label [[FOR_END10]] @@ -507,7 +507,7 @@ define i32 @multiple_uniform_stores_conditional(ptr nocapture %var1, ptr nocaptu ; CHECK-NEXT: store i32 [[TMP5]], ptr [[ARRAYIDX5]], align 4 ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[ITR]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[ITR]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_INC8_LOOPEXIT:%.*]], label [[FOR_BODY3]] ; CHECK: for.inc8.loopexit: ; CHECK-NEXT: br label [[FOR_INC8]] @@ -515,7 +515,7 @@ define i32 @multiple_uniform_stores_conditional(ptr nocapture %var1, ptr nocaptu ; CHECK-NEXT: [[J_1_LCSSA]] = phi i32 [ [[J_022]], [[FOR_COND1_PREHEADER]] ], [ [[ITR]], [[FOR_INC8_LOOPEXIT]] ] ; CHECK-NEXT: [[INDVARS_IV_NEXT24]] = add nuw nsw i64 [[INDVARS_IV23]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV25:%.*]] = trunc i64 [[INDVARS_IV_NEXT24]] to i32 -; CHECK-NEXT: [[EXITCOND26:%.*]] = icmp eq i32 [[LFTR_WIDEIV25]], [[ITR]] +; CHECK-NEXT: [[EXITCOND26:%.*]] = icmp eq i32 [[ITR]], [[LFTR_WIDEIV25]] ; CHECK-NEXT: br i1 [[EXITCOND26]], label [[FOR_END10_LOOPEXIT:%.*]], label [[FOR_COND1_PREHEADER]] ; CHECK: for.end10.loopexit: ; CHECK-NEXT: br label [[FOR_END10]] @@ -589,7 +589,7 @@ define void @unsafe_dep_uniform_load_store(i32 %arg, i32 %arg1, i64 %arg2, ptr % ; CHECK-NEXT: [[I13:%.*]] = add nsw i32 [[I12]], [[I9]] ; CHECK-NEXT: [[I14:%.*]] = trunc i32 [[I13]] to i16 ; CHECK-NEXT: [[I15:%.*]] = trunc i64 [[I8]] to i32 -; CHECK-NEXT: [[I16:%.*]] = add i32 [[I15]], [[ARG:%.*]] +; CHECK-NEXT: [[I16:%.*]] = add i32 [[ARG:%.*]], [[I15]] ; CHECK-NEXT: [[I17:%.*]] = zext i32 [[I16]] to i64 ; CHECK-NEXT: [[I18:%.*]] = getelementptr inbounds i16, ptr [[I6]], i64 [[I17]] ; CHECK-NEXT: store i16 [[I14]], ptr [[I18]], align 2 diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll index 873f6364f8281..c50bcf8ae88f5 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop-cond.ll @@ -61,7 +61,7 @@ define float @cond_fadd(ptr noalias nocapture readonly %a, ptr noalias nocapture ; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -176,7 +176,7 @@ define float @cond_cmp_sel(ptr noalias %a, ptr noalias %cond, i64 %N) { ; CHECK-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -294,7 +294,7 @@ define i32 @conditional_and(ptr noalias %A, ptr noalias %B, i32 %cond, i64 nound ; CHECK-NEXT: [[TMP28:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -428,7 +428,7 @@ define i32 @simple_chained_rdx(ptr noalias %a, ptr noalias %b, ptr noalias %cond ; CHECK-NEXT: [[TMP47:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP47]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -597,7 +597,7 @@ define i64 @nested_cond_and(ptr noalias nocapture readonly %a, ptr noalias nocap ; CHECK-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP50:%.*]] = call i64 @llvm.vector.reduce.and.v4i64(<4 x i64> [[PREDPHI15]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -732,7 +732,7 @@ define i32 @cond-uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond, ; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP29:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP27]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -896,7 +896,7 @@ define float @cond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond, ; CHECK-NEXT: br i1 [[TMP48]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP14:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP49:%.*]] = call fast float @llvm.vector.reduce.fadd.v4f32(float -0.000000e+00, <4 x float> [[PREDPHI15]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -1038,7 +1038,7 @@ define i32 @uncond_cond(ptr noalias %src1, ptr noalias %src2, ptr noalias %cond, ; CHECK-NEXT: br i1 [[TMP28]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP16:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP29:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PREDPHI]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -1167,7 +1167,7 @@ define i32 @uncond_cond_uncond(ptr noalias %src1, ptr noalias %src2, ptr noalias ; CHECK-NEXT: br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP18:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: [[TMP30:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP28]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll index e6936b19415d0..a226a5a36d63b 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction-inloop.ll @@ -989,7 +989,7 @@ define float @reduction_fmuladd(ptr %a, ptr %b, i64 %n) { ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP5]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP36:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -1132,7 +1132,7 @@ define i32 @predicated_not_dominates_reduction(ptr nocapture noundef readonly %h ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP38:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[I]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END7:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -1221,7 +1221,7 @@ define i32 @predicated_not_dominates_reduction_twoadd(ptr nocapture noundef read ; CHECK-NEXT: [[TMP12:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP40:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[I]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[I]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_END7:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i32 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -1292,7 +1292,7 @@ define i32 @predicated_or_dominates_reduction(ptr %b) { ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_LOAD_CONTINUE6:%.*]] ] -; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ undef, [[VECTOR_PH]] ], [ [[TMP51:%.*]], [[PRED_LOAD_CONTINUE6]] ] +; CHECK-NEXT: [[VEC_PHI:%.*]] = phi i32 [ undef, [[VECTOR_PH]] ], [ [[TMP48:%.*]], [[PRED_LOAD_CONTINUE6]] ] ; CHECK-NEXT: [[TMP0:%.*]] = or disjoint i32 [[INDEX]], 1 ; CHECK-NEXT: [[TMP1:%.*]] = or disjoint i32 [[INDEX]], 2 ; CHECK-NEXT: [[TMP2:%.*]] = or disjoint i32 [[INDEX]], 3 @@ -1354,21 +1354,21 @@ define i32 @predicated_or_dominates_reduction(ptr %b) { ; CHECK: pred.load.continue6: ; CHECK-NEXT: [[TMP43:%.*]] = phi <4 x i32> [ [[TMP37]], [[PRED_LOAD_CONTINUE4]] ], [ [[TMP42]], [[PRED_LOAD_IF5]] ] ; CHECK-NEXT: [[TMP44:%.*]] = icmp ne <4 x i32> [[TMP43]], zeroinitializer -; CHECK-NEXT: [[TMP46:%.*]] = xor <4 x i1> [[TMP19]], -; CHECK-NEXT: [[TMP47:%.*]] = select <4 x i1> [[TMP46]], <4 x i1> , <4 x i1> [[TMP44]] -; CHECK-NEXT: [[TMP48:%.*]] = bitcast <4 x i1> [[TMP47]] to i4 -; CHECK-NEXT: [[TMP49:%.*]] = call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 [[TMP48]]) -; CHECK-NEXT: [[TMP50:%.*]] = zext nneg i4 [[TMP49]] to i32 -; CHECK-NEXT: [[TMP51]] = add i32 [[VEC_PHI]], [[TMP50]] +; CHECK-NEXT: [[NOT_:%.*]] = xor <4 x i1> [[TMP19]], +; CHECK-NEXT: [[DOTNOT7:%.*]] = select <4 x i1> [[NOT_]], <4 x i1> , <4 x i1> [[TMP44]] +; CHECK-NEXT: [[TMP45:%.*]] = bitcast <4 x i1> [[DOTNOT7]] to i4 +; CHECK-NEXT: [[TMP46:%.*]] = call range(i4 0, 5) i4 @llvm.ctpop.i4(i4 [[TMP45]]) +; CHECK-NEXT: [[TMP47:%.*]] = zext nneg i4 [[TMP46]] to i32 +; CHECK-NEXT: [[TMP48]] = add i32 [[VEC_PHI]], [[TMP47]] ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 4 -; CHECK-NEXT: [[TMP52:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 -; CHECK-NEXT: br i1 [[TMP52]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] +; CHECK-NEXT: [[TMP49:%.*]] = icmp eq i32 [[INDEX_NEXT]], 1000 +; CHECK-NEXT: br i1 [[TMP49]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP42:![0-9]+]] ; CHECK: middle.block: ; CHECK-NEXT: br i1 true, label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: br label [[FOR_BODY:%.*]] ; CHECK: for.cond.cleanup: -; CHECK-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ poison, [[FOR_INC:%.*]] ], [ [[TMP51]], [[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[A_1_LCSSA:%.*]] = phi i32 [ poison, [[FOR_INC:%.*]] ], [ [[TMP48]], [[MIDDLE_BLOCK]] ] ; CHECK-NEXT: ret i32 [[A_1_LCSSA]] ; CHECK: for.body: ; CHECK-NEXT: br i1 poison, label [[LOR_LHS_FALSE:%.*]], label [[IF_THEN:%.*]] diff --git a/llvm/test/Transforms/LoopVectorize/reduction.ll b/llvm/test/Transforms/LoopVectorize/reduction.ll index b66ce4047ad95..89fd1a9a73f2f 100644 --- a/llvm/test/Transforms/LoopVectorize/reduction.ll +++ b/llvm/test/Transforms/LoopVectorize/reduction.ll @@ -49,7 +49,7 @@ define i32 @reduction_sum(i32 %n, ptr %A, ptr %B) { ; CHECK-NEXT: [[TMP17]] = add i32 [[TMP16]], [[TMP13]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP3:![0-9]+]] ; CHECK: ._crit_edge.loopexit: ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] @@ -130,7 +130,7 @@ define i32 @reduction_prod(i32 %n, ptr %A, ptr %B) { ; CHECK-NEXT: [[TMP17]] = mul i32 [[TMP16]], [[TMP13]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP5:![0-9]+]] ; CHECK: ._crit_edge.loopexit: ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] @@ -211,7 +211,7 @@ define i32 @reduction_mix(i32 %n, ptr %A, ptr %B) { ; CHECK-NEXT: [[TMP17]] = add i32 [[TMP16]], [[TMP14]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP7:![0-9]+]] ; CHECK: ._crit_edge.loopexit: ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] @@ -292,7 +292,7 @@ define i32 @reduction_mul(i32 %n, ptr %A, ptr %B) { ; CHECK-NEXT: [[TMP17]] = mul i32 [[TMP16]], [[SUM_02]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE_LOOPEXIT]], label [[DOTLR_PH]], !llvm.loop [[LOOP9:![0-9]+]] ; CHECK: ._crit_edge.loopexit: ; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i32 [ [[TMP17]], [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] @@ -369,7 +369,7 @@ define i32 @start_at_non_zero(ptr %in, ptr %coeff, ptr %out, i32 %n) { ; CHECK-NEXT: [[ADD]] = add nsw i32 [[MUL]], [[SUM_09]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: [[ADD_LCSSA:%.*]] = phi i32 [ [[ADD]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] @@ -445,7 +445,7 @@ define i32 @reduction_and(i32 %n, ptr %A, ptr %B) { ; CHECK-NEXT: [[AND]] = and i32 [[ADD]], [[RESULT_08]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: [[AND_LCSSA:%.*]] = phi i32 [ [[AND]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] @@ -521,7 +521,7 @@ define i32 @reduction_or(i32 %n, ptr %A, ptr %B) { ; CHECK-NEXT: [[OR]] = or i32 [[ADD]], [[RESULT_08]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: [[OR_LCSSA:%.*]] = phi i32 [ [[OR]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] @@ -597,7 +597,7 @@ define i32 @reduction_xor(i32 %n, ptr %A, ptr %B) { ; CHECK-NEXT: [[XOR]] = xor i32 [[ADD]], [[RESULT_08]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: [[XOR_LCSSA:%.*]] = phi i32 [ [[XOR]], [[FOR_BODY]] ], [ [[TMP6]], [[MIDDLE_BLOCK]] ] @@ -646,7 +646,7 @@ define i32 @reduction_sub_rhs(i32 %n, ptr %A) { ; CHECK-NEXT: [[SUB]] = sub nsw i32 [[TMP0]], [[X_05]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: br label [[FOR_END]] @@ -714,7 +714,7 @@ define i32 @reduction_sub_lhs(i32 %n, ptr %A) { ; CHECK-NEXT: [[SUB]] = sub nsw i32 [[X_05]], [[TMP5]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: [[SUB_LCSSA:%.*]] = phi i32 [ [[SUB]], [[FOR_BODY]] ], [ [[TMP4]], [[MIDDLE_BLOCK]] ] @@ -1083,7 +1083,7 @@ define i32 @reduction_sum_multiuse(i32 %n, ptr %A, ptr %B) { ; CHECK-NEXT: [[TMP17]] = add i32 [[TMP16]], [[TMP13]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1 ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32 -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[DOT_CRIT_EDGE]], label [[DOTLR_PH]], !llvm.loop [[LOOP23:![0-9]+]] ; CHECK: ._crit_edge: ; CHECK-NEXT: [[SUM_COPY:%.*]] = phi i32 [ [[TMP17]], [[DOTLR_PH]] ], [ [[TMP9]], [[MIDDLE_BLOCK]] ] diff --git a/llvm/test/Transforms/LoopVectorize/runtime-check.ll b/llvm/test/Transforms/LoopVectorize/runtime-check.ll index d5df8afc80a79..9521c0933fe87 100644 --- a/llvm/test/Transforms/LoopVectorize/runtime-check.ll +++ b/llvm/test/Transforms/LoopVectorize/runtime-check.ll @@ -53,7 +53,7 @@ define i32 @foo(ptr nocapture %a, ptr nocapture %b, i32 %n) nounwind uwtable ssp ; CHECK-NEXT: store float [[MUL]], ptr [[ARRAYIDX2]], align 4, !dbg [[DBG9]] ; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1, !dbg [[DBG9]] ; CHECK-NEXT: [[LFTR_WIDEIV:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32, !dbg [[DBG9]] -; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[LFTR_WIDEIV]], [[N]], !dbg [[DBG9]] +; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[N]], [[LFTR_WIDEIV]], !dbg [[DBG9]] ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT]], label [[FOR_BODY]], !dbg [[DBG9]], !llvm.loop [[LOOP13:![0-9]+]] ; CHECK: for.end.loopexit: ; CHECK-NEXT: br label [[FOR_END]], !dbg [[DBG14:![0-9]+]] @@ -144,7 +144,7 @@ define void @test_runtime_check(ptr %a, float %b, i64 %offset, i64 %offset2, i64 ; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP20:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[LOOPEXIT:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ], [ 0, [[VECTOR_MEMCHECK]] ] @@ -157,7 +157,7 @@ define void @test_runtime_check(ptr %a, float %b, i64 %offset, i64 %offset2, i64 ; CHECK-NEXT: [[TMP13:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP13]], i64 [[OFFSET2]] ; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4 -; CHECK-NEXT: [[M:%.*]] = fmul fast float [[L2]], [[B]] +; CHECK-NEXT: [[M:%.*]] = fmul fast float [[B]], [[L2]] ; CHECK-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]] ; CHECK-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4 ; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1 @@ -231,7 +231,7 @@ define void @test_runtime_check2(ptr %a, float %b, i64 %offset, i64 %offset2, i6 ; CHECK-NEXT: [[TMP1:%.*]] = getelementptr float, ptr [[A]], i64 [[IV]] ; CHECK-NEXT: [[ARR_IDX2:%.*]] = getelementptr float, ptr [[TMP1]], i64 [[OFFSET2:%.*]] ; CHECK-NEXT: [[L2:%.*]] = load float, ptr [[ARR_IDX2]], align 4 -; CHECK-NEXT: [[M:%.*]] = fmul fast float [[L2]], [[B:%.*]] +; CHECK-NEXT: [[M:%.*]] = fmul fast float [[B:%.*]], [[L2]] ; CHECK-NEXT: [[AD:%.*]] = fadd fast float [[L1]], [[M]] ; CHECK-NEXT: store float [[AD]], ptr [[ARR_IDX]], align 4 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr float, ptr [[C:%.*]], i64 [[IV]] diff --git a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll index 030eb9e76b51a..b97ceba8b0116 100644 --- a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll +++ b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll @@ -12,7 +12,7 @@ define void @add_ind64_unrolled(ptr noalias nocapture %a, ptr noalias nocapture ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() @@ -94,7 +94,7 @@ define void @add_ind64_unrolled_nxv1i64(ptr noalias nocapture %a, ptr noalias no ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 1 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() @@ -180,7 +180,7 @@ define void @add_unique_ind32(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() @@ -257,7 +257,7 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2 -; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ugt i64 [[TMP1]], [[N:%.*]] +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N:%.*]], [[TMP1]] ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() diff --git a/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll b/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll index 629b15c824f67..63ca45495335f 100644 --- a/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll +++ b/llvm/test/Transforms/LoopVectorize/uniform-args-call-variants.ll @@ -23,7 +23,7 @@ define void @test_uniform(ptr noalias %dst, ptr readonly %src, i64 %uniform , i6 ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] @@ -87,7 +87,7 @@ define void @test_uniform_not_invariant(ptr noalias %dst, ptr readonly %src, i64 ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP:%.*]], label [[SCALAR_PH]] ; CHECK: scalar.ph: ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ] diff --git a/llvm/test/Transforms/PGOProfile/chr.ll b/llvm/test/Transforms/PGOProfile/chr.ll index 38e8f8536a19c..34e39fe37979a 100644 --- a/llvm/test/Transforms/PGOProfile/chr.ll +++ b/llvm/test/Transforms/PGOProfile/chr.ll @@ -1931,15 +1931,15 @@ bb4: define i32 @test_chr_21(i64 %i, i64 %k, i64 %j) !prof !14 { ; CHECK-LABEL: @test_chr_21( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[J_FR:%.*]] = freeze i64 [[J:%.*]] ; CHECK-NEXT: [[I_FR:%.*]] = freeze i64 [[I:%.*]] -; CHECK-NEXT: [[CMP0:%.*]] = icmp ne i64 [[J_FR]], [[K:%.*]] +; CHECK-NEXT: [[CMP0:%.*]] = icmp ne i64 [[J:%.*]], [[K:%.*]] ; CHECK-NEXT: [[TMP0:%.*]] = freeze i1 [[CMP0]] -; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[I_FR]], [[J_FR]] +; CHECK-NEXT: [[CMP3:%.*]] = icmp ne i64 [[J]], [[I_FR]] ; CHECK-NEXT: [[CMP_I:%.*]] = icmp ne i64 [[I_FR]], 86 -; CHECK-NEXT: [[TMP1:%.*]] = and i1 [[TMP0]], [[CMP3]] -; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP1]], [[CMP_I]] -; CHECK-NEXT: br i1 [[TMP2]], label [[BB1:%.*]], label [[ENTRY_SPLIT_NONCHR:%.*]], !prof [[PROF15]] +; CHECK-NEXT: [[TMP1:%.*]] = freeze i1 [[CMP3]] +; CHECK-NEXT: [[TMP2:%.*]] = and i1 [[TMP0]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = and i1 [[TMP2]], [[CMP_I]] +; CHECK-NEXT: br i1 [[TMP3]], label [[BB1:%.*]], label [[ENTRY_SPLIT_NONCHR:%.*]], !prof [[PROF15]] ; CHECK: bb1: ; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i64 [[I_FR]], 2 ; CHECK-NEXT: switch i64 [[I_FR]], label [[BB2:%.*]] [ @@ -1971,7 +1971,7 @@ define i32 @test_chr_21(i64 %i, i64 %k, i64 %j) !prof !14 { ; CHECK-NEXT: [[CMP_I_NONCHR:%.*]] = icmp eq i64 [[I_FR]], 86 ; CHECK-NEXT: br i1 [[CMP_I_NONCHR]], label [[BB6_NONCHR:%.*]], label [[BB4_NONCHR:%.*]], !prof [[PROF16]] ; CHECK: bb6.nonchr: -; CHECK-NEXT: [[CMP3_NONCHR:%.*]] = icmp eq i64 [[J_FR]], [[I_FR]] +; CHECK-NEXT: [[CMP3_NONCHR:%.*]] = icmp eq i64 [[J]], [[I_FR]] ; CHECK-NEXT: br i1 [[CMP3_NONCHR]], label [[BB8_NONCHR:%.*]], label [[BB7_NONCHR:%.*]], !prof [[PROF16]] ; CHECK: bb8.nonchr: ; CHECK-NEXT: br i1 [[CMP_I_NONCHR]], label [[BB10]], label [[BB9_NONCHR:%.*]], !prof [[PROF16]] diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll index 11b1d54227681..f44e39e82d606 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoist-runtime-checks.ll @@ -13,7 +13,7 @@ define i32 @read_only_loop_with_runtime_check(ptr noundef %array, i32 noundef %c ; CHECK: for.body.preheader: ; CHECK-NEXT: [[TMP0:%.*]] = zext i32 [[N]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[N]], -1 -; CHECK-NEXT: [[DOTNOT_NOT:%.*]] = icmp ult i32 [[TMP1]], [[COUNT]] +; CHECK-NEXT: [[DOTNOT_NOT:%.*]] = icmp ugt i32 [[COUNT]], [[TMP1]] ; CHECK-NEXT: br i1 [[DOTNOT_NOT]], label [[FOR_BODY_PREHEADER10:%.*]], label [[IF_THEN:%.*]] ; CHECK: for.body.preheader10: ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[N]], 8 @@ -128,7 +128,7 @@ define dso_local noundef i32 @sum_prefix_with_sum(ptr %s.coerce0, i64 %s.coerce1 ; CHECK-NEXT: br i1 [[CMP5_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY_PREHEADER:%.*]] ; CHECK: for.body.preheader: ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N]], -1 -; CHECK-NEXT: [[DOTNOT_NOT:%.*]] = icmp ult i64 [[TMP0]], [[S_COERCE1]] +; CHECK-NEXT: [[DOTNOT_NOT:%.*]] = icmp ugt i64 [[S_COERCE1]], [[TMP0]] ; CHECK-NEXT: br i1 [[DOTNOT_NOT]], label [[FOR_BODY_PREHEADER8:%.*]], label [[COND_FALSE_I:%.*]], !prof [[PROF4:![0-9]+]] ; CHECK: for.body.preheader8: ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], 8 @@ -156,7 +156,7 @@ define dso_local noundef i32 @sum_prefix_with_sum(ptr %s.coerce0, i64 %s.coerce1 ; CHECK: middle.block: ; CHECK-NEXT: [[BIN_RDX:%.*]] = add <4 x i32> [[TMP4]], [[TMP3]] ; CHECK-NEXT: [[ADD]] = tail call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[BIN_RDX]]) -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[N]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY_PREHEADER11]] ; CHECK: for.cond.cleanup: ; CHECK-NEXT: [[RET_0_LCSSA1:%.*]] = phi i32 [ 0, [[ENTRY1:%.*]] ], [ [[ADD]], [[SPAN_CHECKED_ACCESS_EXIT]] ], [ [[ADD1:%.*]], [[FOR_BODY1]] ] @@ -227,7 +227,7 @@ define hidden noundef nonnull align 4 dereferenceable(4) ptr @span_checked_acces ; CHECK-NEXT: entry: ; CHECK-NEXT: [[__SIZE__I:%.*]] = getelementptr inbounds i8, ptr [[THIS]], i64 8 ; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[__SIZE__I]], align 8 -; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i64 [[TMP0]], [[__IDX]] +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i64 [[__IDX]], [[TMP0]] ; CHECK-NEXT: br i1 [[CMP]], label [[COND_END:%.*]], label [[COND_FALSE:%.*]], !prof [[PROF4]] ; CHECK: cond.false: ; CHECK-NEXT: tail call void @llvm.trap() diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll index ad100c399c08e..33bcab679ba91 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/hoisting-sinking-required-for-vectorization.ll @@ -134,11 +134,11 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) { ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 40000 ; CHECK-NEXT: [[SCEVGEP2:%.*]] = getelementptr i8, ptr [[C:%.*]], i64 40000 ; CHECK-NEXT: [[SCEVGEP3:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 40000 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP2]], [[B]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[C]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP2]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[C]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] -; CHECK-NEXT: [[BOUND04:%.*]] = icmp ugt ptr [[SCEVGEP3]], [[B]] -; CHECK-NEXT: [[BOUND15:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND04:%.*]] = icmp ult ptr [[B]], [[SCEVGEP3]] +; CHECK-NEXT: [[BOUND15:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT6:%.*]] = and i1 [[BOUND04]], [[BOUND15]] ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT6]] ; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label [[LOOP_BODY:%.*]], label [[VECTOR_PH:%.*]] @@ -158,8 +158,8 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) { ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i8, ptr [[TMP4]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD8:%.*]] = load <4 x float>, ptr [[TMP4]], align 4, !alias.scope [[META7:![0-9]+]] ; CHECK-NEXT: [[WIDE_LOAD9:%.*]] = load <4 x float>, ptr [[TMP5]], align 4, !alias.scope [[META7]] -; CHECK-NEXT: [[TMP6:%.*]] = fmul <4 x float> [[WIDE_LOAD8]], [[BROADCAST_SPLAT]] -; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x float> [[WIDE_LOAD9]], [[BROADCAST_SPLAT]] +; CHECK-NEXT: [[TMP6:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD8]] +; CHECK-NEXT: [[TMP7:%.*]] = fmul <4 x float> [[BROADCAST_SPLAT]], [[WIDE_LOAD9]] ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr float, ptr [[B]], i64 [[INDEX]] ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i8, ptr [[TMP8]], i64 16 ; CHECK-NEXT: [[WIDE_LOAD10:%.*]] = load <4 x float>, ptr [[TMP8]], align 4, !alias.scope [[META9:![0-9]+]], !noalias [[META11:![0-9]+]] @@ -181,7 +181,7 @@ define void @loop2(ptr %A, ptr %B, ptr %C, float %x) { ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[C_LV]], 20 ; CHECK-NEXT: [[A_GEP_0:%.*]] = getelementptr inbounds float, ptr [[A]], i64 [[IV1]] ; CHECK-NEXT: [[A_LV_0:%.*]] = load float, ptr [[A_GEP_0]], align 4 -; CHECK-NEXT: [[MUL2_I81_I:%.*]] = fmul float [[A_LV_0]], [[X]] +; CHECK-NEXT: [[MUL2_I81_I:%.*]] = fmul float [[X]], [[A_LV_0]] ; CHECK-NEXT: [[B_GEP_0:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV1]] ; CHECK-NEXT: br i1 [[CMP]], label [[LOOP_LATCH]], label [[ELSE:%.*]] ; CHECK: else: diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll index db16413cdc94a..db0656da579f4 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll @@ -97,8 +97,8 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[I]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[FOR_BODY4_US_PREHEADER:%.*]], label [[VECTOR_MEMCHECK:%.*]] ; CHECK: vector.memcheck: -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[B]], [[SCEVGEP20]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY4_US_PREHEADER]], label [[VECTOR_PH:%.*]] ; CHECK: vector.ph: @@ -172,8 +172,8 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea ; CHECK-NEXT: [[MIN_ITERS_CHECK_1:%.*]] = icmp ult i32 [[I]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_1]], label [[FOR_BODY4_US_PREHEADER_1:%.*]], label [[VECTOR_MEMCHECK_1:%.*]] ; CHECK: vector.memcheck.1: -; CHECK-NEXT: [[BOUND0_1:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]] -; CHECK-NEXT: [[BOUND1_1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND0_1:%.*]] = icmp ult ptr [[B]], [[SCEVGEP20]] +; CHECK-NEXT: [[BOUND1_1:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT_1:%.*]] = and i1 [[BOUND0_1]], [[BOUND1_1]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT_1]], label [[FOR_BODY4_US_PREHEADER_1]], label [[VECTOR_PH_1:%.*]] ; CHECK: vector.ph.1: @@ -249,8 +249,8 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea ; CHECK-NEXT: [[MIN_ITERS_CHECK_2:%.*]] = icmp ult i32 [[I]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_2]], label [[FOR_BODY4_US_PREHEADER_2:%.*]], label [[VECTOR_MEMCHECK_2:%.*]] ; CHECK: vector.memcheck.2: -; CHECK-NEXT: [[BOUND0_2:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]] -; CHECK-NEXT: [[BOUND1_2:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND0_2:%.*]] = icmp ult ptr [[B]], [[SCEVGEP20]] +; CHECK-NEXT: [[BOUND1_2:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT_2:%.*]] = and i1 [[BOUND0_2]], [[BOUND1_2]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT_2]], label [[FOR_BODY4_US_PREHEADER_2]], label [[VECTOR_PH_2:%.*]] ; CHECK: vector.ph.2: @@ -326,8 +326,8 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea ; CHECK-NEXT: [[MIN_ITERS_CHECK_3:%.*]] = icmp ult i32 [[I]], 4 ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_3]], label [[FOR_BODY4_US_PREHEADER_3:%.*]], label [[VECTOR_MEMCHECK_3:%.*]] ; CHECK: vector.memcheck.3: -; CHECK-NEXT: [[BOUND0_3:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]] -; CHECK-NEXT: [[BOUND1_3:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]] +; CHECK-NEXT: [[BOUND0_3:%.*]] = icmp ult ptr [[B]], [[SCEVGEP20]] +; CHECK-NEXT: [[BOUND1_3:%.*]] = icmp ult ptr [[A]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT_3:%.*]] = and i1 [[BOUND0_3]], [[BOUND1_3]] ; CHECK-NEXT: br i1 [[FOUND_CONFLICT_3]], label [[FOR_BODY4_US_PREHEADER_3]], label [[VECTOR_PH_3:%.*]] ; CHECK: vector.ph.3: diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll index cc4890e27f2bd..2fe49a31b7722 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/peel-multiple-unreachable-exits-for-vectorization.ll @@ -248,7 +248,7 @@ define i64 @at_with_int_conversion(ptr %ptr, i64 %idx) { ; CHECK-NEXT: [[START_INT:%.*]] = ptrtoint ptr [[START]] to i64 ; CHECK-NEXT: [[END_INT:%.*]] = ptrtoint ptr [[END]] to i64 ; CHECK-NEXT: [[SUB:%.*]] = sub i64 [[END_INT]], [[START_INT]] -; CHECK-NEXT: [[INRANGE:%.*]] = icmp ult i64 [[SUB]], [[IDX:%.*]] +; CHECK-NEXT: [[INRANGE:%.*]] = icmp ugt i64 [[IDX:%.*]], [[SUB]] ; CHECK-NEXT: br i1 [[INRANGE]], label [[ERROR:%.*]], label [[EXIT:%.*]] ; CHECK: exit: ; CHECK-NEXT: [[GEP_IDX:%.*]] = getelementptr i64, ptr [[START]], i64 [[IDX]] diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll index c133852f66937..b53d0c211919b 100644 --- a/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll +++ b/llvm/test/Transforms/PhaseOrdering/AArch64/quant_4x4.ll @@ -13,11 +13,11 @@ define i32 @quant_4x4(ptr noundef %dct, ptr noundef %mf, ptr noundef %bias) { ; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[DCT]], i64 32 ; CHECK-NEXT: [[SCEVGEP23:%.*]] = getelementptr i8, ptr [[BIAS]], i64 32 ; CHECK-NEXT: [[SCEVGEP24:%.*]] = getelementptr i8, ptr [[MF]], i64 32 -; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP23]], [[DCT]] -; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[BIAS]] +; CHECK-NEXT: [[BOUND0:%.*]] = icmp ult ptr [[DCT]], [[SCEVGEP23]] +; CHECK-NEXT: [[BOUND1:%.*]] = icmp ult ptr [[BIAS]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]] -; CHECK-NEXT: [[BOUND025:%.*]] = icmp ugt ptr [[SCEVGEP24]], [[DCT]] -; CHECK-NEXT: [[BOUND126:%.*]] = icmp ugt ptr [[SCEVGEP]], [[MF]] +; CHECK-NEXT: [[BOUND025:%.*]] = icmp ult ptr [[DCT]], [[SCEVGEP24]] +; CHECK-NEXT: [[BOUND126:%.*]] = icmp ult ptr [[MF]], [[SCEVGEP]] ; CHECK-NEXT: [[FOUND_CONFLICT27:%.*]] = and i1 [[BOUND025]], [[BOUND126]] ; CHECK-NEXT: [[CONFLICT_RDX:%.*]] = or i1 [[FOUND_CONFLICT]], [[FOUND_CONFLICT27]] ; CHECK-NEXT: br i1 [[CONFLICT_RDX]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY:%.*]] diff --git a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll index 8b82f21e38c93..6e9abb3813aa1 100644 --- a/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll +++ b/llvm/test/Transforms/PhaseOrdering/ARM/arm_mult_q15.ll @@ -47,7 +47,7 @@ define void @arm_mult_q15(ptr %pSrcA, ptr %pSrcB, ptr noalias %pDst, i32 %blockS ; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i32 [[INDEX_NEXT]], [[N_VEC]] ; CHECK-NEXT: br i1 [[TMP9]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] ; CHECK: middle.block: -; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[N_VEC]], [[BLOCKSIZE]] +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i32 [[BLOCKSIZE]], [[N_VEC]] ; CHECK-NEXT: br i1 [[CMP_N]], label [[WHILE_END]], label [[WHILE_BODY_PREHEADER18]] ; CHECK: while.body.preheader18: ; CHECK-NEXT: [[BLKCNT_06_PH:%.*]] = phi i32 [ [[BLOCKSIZE]], [[WHILE_BODY_PREHEADER]] ], [ [[IND_END]], [[MIDDLE_BLOCK]] ] diff --git a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll index 791ef7cbeb361..33a0eb43b7085 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/hoist-load-of-baseptr.ll @@ -46,7 +46,7 @@ define dso_local void @_Z7computeRSt6vectorIiSaIiEEy(ptr noundef nonnull align 8 ; O2-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DATA]], align 8 ; O2-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUMELEMS]], 8 ; O2-NEXT: [[N_VEC:%.*]] = and i64 [[NUMELEMS]], -8 -; O2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[NUMELEMS]] +; O2-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUMELEMS]], [[N_VEC]] ; O2-NEXT: br label [[FOR_COND1_PREHEADER:%.*]] ; O2: for.cond1.preheader: ; O2-NEXT: [[I_06:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INC7:%.*]], [[FOR_COND_CLEANUP3:%.*]] ] @@ -96,7 +96,7 @@ define dso_local void @_Z7computeRSt6vectorIiSaIiEEy(ptr noundef nonnull align 8 ; O3: for.cond1.preheader.us.preheader: ; O3-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[NUMELEMS]], 8 ; O3-NEXT: [[N_VEC:%.*]] = and i64 [[NUMELEMS]], -8 -; O3-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[NUMELEMS]] +; O3-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUMELEMS]], [[N_VEC]] ; O3-NEXT: br label [[FOR_COND1_PREHEADER_US:%.*]] ; O3: for.cond1.preheader.us: ; O3-NEXT: [[I_06_US:%.*]] = phi i64 [ [[INC7_US:%.*]], [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US:%.*]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ] diff --git a/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll b/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll index c5deb716d8030..5bf7be4362a8e 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/speculation-vs-tbaa.ll @@ -47,7 +47,7 @@ define void @licm(ptr align 8 dereferenceable(8) %_M_start.i, i64 %numElem) { ; O23-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] ; O23-NEXT: br i1 [[TMP3]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]] ; O23: middle.block: -; O23-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[NUMELEM]] +; O23-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[NUMELEM]], [[N_VEC]] ; O23-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY_PREHEADER]] ; O23: for.body.preheader: ; O23-NEXT: [[K_02_PH:%.*]] = phi i64 [ 0, [[FOR_BODY_LR_PH]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ] diff --git a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-logical.ll b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-logical.ll index c97d8da58be44..d2850f36a80dc 100644 --- a/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-logical.ll +++ b/llvm/test/Transforms/PhaseOrdering/X86/vector-reductions-logical.ll @@ -94,7 +94,7 @@ define float @test_merge_anyof_v4sf(<4 x float> %t) { ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i1> [[TMP4]] to i8 ; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0 ; CHECK-NEXT: [[SHIFT:%.*]] = shufflevector <4 x float> [[T]], <4 x float> poison, <4 x i32> -; CHECK-NEXT: [[TMP6:%.*]] = fadd <4 x float> [[SHIFT]], [[T]] +; CHECK-NEXT: [[TMP6:%.*]] = fadd <4 x float> [[T]], [[SHIFT]] ; CHECK-NEXT: [[ADD:%.*]] = extractelement <4 x float> [[TMP6]], i64 0 ; CHECK-NEXT: [[RETVAL_0:%.*]] = select i1 [[DOTNOT]], float [[ADD]], float 0.000000e+00 ; CHECK-NEXT: ret float [[RETVAL_0]] @@ -409,7 +409,7 @@ define float @test_merge_anyof_v4si(<4 x i32> %t) { ; CHECK-NEXT: [[TMP5:%.*]] = bitcast <8 x i1> [[TMP4]] to i8 ; CHECK-NEXT: [[DOTNOT:%.*]] = icmp eq i8 [[TMP5]], 0 ; CHECK-NEXT: [[SHIFT:%.*]] = shufflevector <4 x i32> [[T]], <4 x i32> poison, <4 x i32> -; CHECK-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[SHIFT]], [[T]] +; CHECK-NEXT: [[TMP6:%.*]] = add nsw <4 x i32> [[T]], [[SHIFT]] ; CHECK-NEXT: [[ADD:%.*]] = extractelement <4 x i32> [[TMP6]], i64 0 ; CHECK-NEXT: [[CONV:%.*]] = sitofp i32 [[ADD]] to float ; CHECK-NEXT: [[RETVAL_0:%.*]] = select i1 [[DOTNOT]], float [[CONV]], float 0.000000e+00 diff --git a/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll b/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll index 0127f05022d71..ec217a9cd31c6 100644 --- a/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll +++ b/llvm/test/Transforms/PhaseOrdering/fast-basictest.ll @@ -139,7 +139,7 @@ define float @test15_reassoc_nsz(float %b, float %a) { define float @test15_reassoc(float %b, float %a) { ; CHECK-LABEL: @test15_reassoc( ; CHECK-NEXT: [[TMP1:%.*]] = fadd reassoc float [[A:%.*]], 1.234000e+03 -; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[TMP1]], [[B:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = fadd reassoc float [[B:%.*]], [[TMP1]] ; CHECK-NEXT: [[TMP3:%.*]] = fsub reassoc float 0.000000e+00, [[A]] ; CHECK-NEXT: [[TMP4:%.*]] = fadd reassoc float [[TMP3]], [[TMP2]] ; CHECK-NEXT: ret float [[TMP4]] diff --git a/llvm/test/Transforms/PhaseOrdering/reassociate-instcombine.ll b/llvm/test/Transforms/PhaseOrdering/reassociate-instcombine.ll index 7e958e8906c9a..13aeb9e64fc3f 100644 --- a/llvm/test/Transforms/PhaseOrdering/reassociate-instcombine.ll +++ b/llvm/test/Transforms/PhaseOrdering/reassociate-instcombine.ll @@ -8,7 +8,7 @@ define i4 @not_reassociate_and_and_not(i4 %a, i4 %b, i4 %c, i4 %d) { ; CHECK-LABEL: @not_reassociate_and_and_not( ; CHECK-NEXT: [[TMP1:%.*]] = or i4 [[B:%.*]], [[C:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = xor i4 [[TMP1]], -1 -; CHECK-NEXT: [[AND2:%.*]] = and i4 [[TMP2]], [[A:%.*]] +; CHECK-NEXT: [[AND2:%.*]] = and i4 [[A:%.*]], [[TMP2]] ; CHECK-NEXT: [[AND3:%.*]] = and i4 [[AND2]], [[D:%.*]] ; CHECK-NEXT: ret i4 [[AND3]] ; @@ -25,7 +25,7 @@ define i32 @not_reassociate_or_or_not(i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: @not_reassociate_or_or_not( ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[B:%.*]], [[C:%.*]] ; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], -1 -; CHECK-NEXT: [[B2:%.*]] = or i32 [[TMP2]], [[A:%.*]] +; CHECK-NEXT: [[B2:%.*]] = or i32 [[A:%.*]], [[TMP2]] ; CHECK-NEXT: [[B3:%.*]] = or i32 [[B2]], [[D:%.*]] ; CHECK-NEXT: ret i32 [[B3]] ; diff --git a/llvm/test/Transforms/PhaseOrdering/runtime-check-removal.ll b/llvm/test/Transforms/PhaseOrdering/runtime-check-removal.ll index 89095048f2249..2933249782f44 100644 --- a/llvm/test/Transforms/PhaseOrdering/runtime-check-removal.ll +++ b/llvm/test/Transforms/PhaseOrdering/runtime-check-removal.ll @@ -10,7 +10,7 @@ define void @test_remove_check_with_incrementing_integer_induction(i16 %start, i ; CHECK-LABEL: @test_remove_check_with_incrementing_integer_induction( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[LEN:%.*]] = zext i8 [[LEN_N:%.*]] to i16 -; CHECK-NEXT: [[LEN_NEG_NOT:%.*]] = icmp ult i16 [[LEN]], [[A:%.*]] +; CHECK-NEXT: [[LEN_NEG_NOT:%.*]] = icmp ugt i16 [[A:%.*]], [[LEN]] ; CHECK-NEXT: [[C1:%.*]] = icmp ne i8 [[LEN_N]], 0 ; CHECK-NEXT: [[OR_COND3:%.*]] = and i1 [[LEN_NEG_NOT]], [[C1]] ; CHECK-NEXT: br i1 [[OR_COND3]], label [[LOOP_LATCH_PREHEADER:%.*]], label [[EXIT:%.*]] diff --git a/llvm/test/Transforms/Reassociate/fast-ArrayOutOfBounds.ll b/llvm/test/Transforms/Reassociate/fast-ArrayOutOfBounds.ll index 6dc7b89a9b186..d629ce15c1c92 100644 --- a/llvm/test/Transforms/Reassociate/fast-ArrayOutOfBounds.ll +++ b/llvm/test/Transforms/Reassociate/fast-ArrayOutOfBounds.ll @@ -6,14 +6,14 @@ define float @test1(float %a0, float %a1, float %a2, float %a3, float %a4) { ; CHECK-LABEL: define float @test1( ; CHECK-SAME: float [[A0:%.*]], float [[A1:%.*]], float [[A2:%.*]], float [[A3:%.*]], float [[A4:%.*]]) { ; CHECK-NEXT: [[TMP_2:%.*]] = fadd float [[A3]], [[A4]] -; CHECK-NEXT: [[TMP_4:%.*]] = fadd float [[TMP_2]], [[A2]] -; CHECK-NEXT: [[TMP_6:%.*]] = fadd float [[TMP_4]], [[A1]] -; CHECK-NEXT: [[TMP_8:%.*]] = fadd float [[TMP_6]], [[A0]] +; CHECK-NEXT: [[TMP_4:%.*]] = fadd float [[A2]], [[TMP_2]] +; CHECK-NEXT: [[TMP_6:%.*]] = fadd float [[A1]], [[TMP_4]] +; CHECK-NEXT: [[TMP_8:%.*]] = fadd float [[A0]], [[TMP_6]] ; CHECK-NEXT: [[TMP_11:%.*]] = fadd float [[A2]], [[A3]] -; CHECK-NEXT: [[TMP_13:%.*]] = fadd float [[TMP_11]], [[A1]] -; CHECK-NEXT: [[TMP_15:%.*]] = fadd float [[TMP_13]], [[A0]] +; CHECK-NEXT: [[TMP_13:%.*]] = fadd float [[A1]], [[TMP_11]] +; CHECK-NEXT: [[TMP_15:%.*]] = fadd float [[A0]], [[TMP_13]] ; CHECK-NEXT: [[TMP_18:%.*]] = fadd float [[A1]], [[A2]] -; CHECK-NEXT: [[TMP_20:%.*]] = fadd float [[TMP_18]], [[A0]] +; CHECK-NEXT: [[TMP_20:%.*]] = fadd float [[A0]], [[TMP_18]] ; CHECK-NEXT: [[TMP_23:%.*]] = fadd float [[A0]], [[A1]] ; CHECK-NEXT: [[TMP_26:%.*]] = fsub float [[TMP_8]], [[TMP_15]] ; CHECK-NEXT: [[TMP_28:%.*]] = fadd float [[TMP_20]], [[TMP_26]] diff --git a/llvm/test/Transforms/Reassociate/fast-SubReassociate.ll b/llvm/test/Transforms/Reassociate/fast-SubReassociate.ll index 5152201ea7c93..2d6f67bbaff6a 100644 --- a/llvm/test/Transforms/Reassociate/fast-SubReassociate.ll +++ b/llvm/test/Transforms/Reassociate/fast-SubReassociate.ll @@ -33,8 +33,8 @@ define float @test2(float %A, float %B) { ; Both 'reassoc' and 'nsz' are required. define float @test2_minimal(float %A, float %B) { ; CHECK-LABEL: @test2_minimal( -; CHECK-NEXT: [[TMP1:%.*]] = fsub reassoc nsz float [[A:%.*]], [[B:%.*]] -; CHECK-NEXT: ret float [[TMP1]] +; CHECK-NEXT: [[Z:%.*]] = fsub reassoc nsz float [[A:%.*]], [[B:%.*]] +; CHECK-NEXT: ret float [[Z]] ; %W = fadd reassoc nsz float %B, 5.000000e+00 %X = fadd reassoc nsz float %A, -7.000000e+00 @@ -81,7 +81,7 @@ define float @test3(float %A, float %B, float %C, float %D) { define float @test4(float %A, float %B, float %C, float %D) { ; CHECK-LABEL: @test4( ; CHECK-NEXT: [[TMP1:%.*]] = fadd fast float [[B:%.*]], [[A:%.*]] -; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[TMP1]], [[C:%.*]] +; CHECK-NEXT: [[TMP2:%.*]] = fadd fast float [[C:%.*]], [[TMP1]] ; CHECK-NEXT: [[Q:%.*]] = fsub fast float [[D:%.*]], [[TMP2]] ; CHECK-NEXT: ret float [[Q]] ; diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute-inseltpoison.ll index 997b8ac8add32..fd5f09bf2adc0 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute-inseltpoison.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute-inseltpoison.ll @@ -8,9 +8,9 @@ define <4 x i32> @icmp_eq_v4i32(<4 x i32> %a, ptr %b) { ; CHECK-LABEL: @icmp_eq_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x i32> %a, i32 0 @@ -38,9 +38,9 @@ define <4 x i32> @icmp_eq_v4i32(<4 x i32> %a, ptr %b) { define <4 x i32> @icmp_ne_v4i32(<4 x i32> %a, ptr %b) { ; CHECK-LABEL: @icmp_ne_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x i32> %a, i32 0 @@ -68,9 +68,9 @@ define <4 x i32> @icmp_ne_v4i32(<4 x i32> %a, ptr %b) { define <4 x i32> @fcmp_oeq_v4i32(<4 x float> %a, ptr %b) { ; CHECK-LABEL: @fcmp_oeq_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = fcmp oeq <4 x float> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <4 x float> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x float> %a, i32 0 @@ -98,9 +98,9 @@ define <4 x i32> @fcmp_oeq_v4i32(<4 x float> %a, ptr %b) { define <4 x i32> @fcmp_uno_v4i32(<4 x float> %a, ptr %b) { ; CHECK-LABEL: @fcmp_uno_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fcmp uno <4 x float> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x float> %a, i32 0 @@ -132,9 +132,9 @@ define <4 x i32> @fcmp_uno_v4i32(<4 x float> %a, ptr %b) { define <4 x i32> @icmp_sgt_slt_v4i32(<4 x i32> %a, ptr %b) { ; CHECK-LABEL: @icmp_sgt_slt_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp slt <4 x i32> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[TMP1]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x i32> %a, i32 0 @@ -162,9 +162,9 @@ define <4 x i32> @icmp_sgt_slt_v4i32(<4 x i32> %a, ptr %b) { define <4 x i32> @icmp_uge_ule_v4i32(<4 x i32> %a, ptr %b) { ; CHECK-LABEL: @icmp_uge_ule_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp ule <4 x i32> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[TMP1]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x i32> %a, i32 0 @@ -192,9 +192,9 @@ define <4 x i32> @icmp_uge_ule_v4i32(<4 x i32> %a, ptr %b) { define <4 x i32> @fcmp_ogt_olt_v4i32(<4 x float> %a, ptr %b) { ; CHECK-LABEL: @fcmp_ogt_olt_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = fcmp olt <4 x float> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[TMP1]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x float> %a, i32 0 @@ -222,11 +222,11 @@ define <4 x i32> @fcmp_ogt_olt_v4i32(<4 x float> %a, ptr %b) { define <4 x i32> @fcmp_ord_uno_v4i32(<4 x float> %a, ptr %b) { ; CHECK-LABEL: @fcmp_ord_uno_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = fcmp ord <4 x float> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[TMP2]], [[A]] -; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i1> [[TMP3]], <4 x i1> [[TMP4]], <4 x i32> -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP5]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fcmp ord <4 x float> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[TMP1]], [[A]] +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i1> [[TMP2]], <4 x i1> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP4]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x float> %a, i32 0 diff --git a/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute.ll b/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute.ll index 29cf66a1ea656..35619d6d3ad1d 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/cmp_commute.ll @@ -8,9 +8,9 @@ define <4 x i32> @icmp_eq_v4i32(<4 x i32> %a, ptr %b) { ; CHECK-LABEL: @icmp_eq_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <4 x i32> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <4 x i32> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x i32> %a, i32 0 @@ -38,9 +38,9 @@ define <4 x i32> @icmp_eq_v4i32(<4 x i32> %a, ptr %b) { define <4 x i32> @icmp_ne_v4i32(<4 x i32> %a, ptr %b) { ; CHECK-LABEL: @icmp_ne_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp ne <4 x i32> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <4 x i32> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x i32> %a, i32 0 @@ -68,9 +68,9 @@ define <4 x i32> @icmp_ne_v4i32(<4 x i32> %a, ptr %b) { define <4 x i32> @fcmp_oeq_v4i32(<4 x float> %a, ptr %b) { ; CHECK-LABEL: @fcmp_oeq_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = fcmp oeq <4 x float> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fcmp oeq <4 x float> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x float> %a, i32 0 @@ -98,9 +98,9 @@ define <4 x i32> @fcmp_oeq_v4i32(<4 x float> %a, ptr %b) { define <4 x i32> @fcmp_uno_v4i32(<4 x float> %a, ptr %b) { ; CHECK-LABEL: @fcmp_uno_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fcmp uno <4 x float> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x float> %a, i32 0 @@ -132,9 +132,9 @@ define <4 x i32> @fcmp_uno_v4i32(<4 x float> %a, ptr %b) { define <4 x i32> @icmp_sgt_slt_v4i32(<4 x i32> %a, ptr %b) { ; CHECK-LABEL: @icmp_sgt_slt_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp slt <4 x i32> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt <4 x i32> [[A:%.*]], [[TMP1]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x i32> %a, i32 0 @@ -162,9 +162,9 @@ define <4 x i32> @icmp_sgt_slt_v4i32(<4 x i32> %a, ptr %b) { define <4 x i32> @icmp_uge_ule_v4i32(<4 x i32> %a, ptr %b) { ; CHECK-LABEL: @icmp_uge_ule_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = icmp ule <4 x i32> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = icmp uge <4 x i32> [[A:%.*]], [[TMP1]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x i32> %a, i32 0 @@ -192,9 +192,9 @@ define <4 x i32> @icmp_uge_ule_v4i32(<4 x i32> %a, ptr %b) { define <4 x i32> @fcmp_ogt_olt_v4i32(<4 x float> %a, ptr %b) { ; CHECK-LABEL: @fcmp_ogt_olt_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = fcmp olt <4 x float> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP3]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fcmp ogt <4 x float> [[A:%.*]], [[TMP1]] +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP2]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x float> %a, i32 0 @@ -222,11 +222,11 @@ define <4 x i32> @fcmp_ogt_olt_v4i32(<4 x float> %a, ptr %b) { define <4 x i32> @fcmp_ord_uno_v4i32(<4 x float> %a, ptr %b) { ; CHECK-LABEL: @fcmp_ord_uno_v4i32( -; CHECK-NEXT: [[TMP2:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 -; CHECK-NEXT: [[TMP3:%.*]] = fcmp ord <4 x float> [[TMP2]], [[A:%.*]] -; CHECK-NEXT: [[TMP4:%.*]] = fcmp uno <4 x float> [[TMP2]], [[A]] -; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i1> [[TMP3]], <4 x i1> [[TMP4]], <4 x i32> -; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP5]] to <4 x i32> +; CHECK-NEXT: [[TMP1:%.*]] = load <4 x float>, ptr [[B:%.*]], align 4 +; CHECK-NEXT: [[TMP2:%.*]] = fcmp ord <4 x float> [[TMP1]], [[A:%.*]] +; CHECK-NEXT: [[TMP3:%.*]] = fcmp uno <4 x float> [[TMP1]], [[A]] +; CHECK-NEXT: [[TMP4:%.*]] = shufflevector <4 x i1> [[TMP2]], <4 x i1> [[TMP3]], <4 x i32> +; CHECK-NEXT: [[R:%.*]] = sext <4 x i1> [[TMP4]] to <4 x i32> ; CHECK-NEXT: ret <4 x i32> [[R]] ; %a0 = extractelement <4 x float> %a, i32 0