diff --git a/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics-upgrade.ll b/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics-upgrade.ll new file mode 100644 index 0000000000000..a169ead8ea20a --- /dev/null +++ b/llvm/test/Instrumentation/MemorySanitizer/X86/f16c-intrinsics-upgrade.ll @@ -0,0 +1,175 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt < %s -passes=msan -S | FileCheck %s +; +; Forked from llvm/test/CodeGen/X86/f16c-intrinsics-upgrade.ll +; +; Handled by reduction to fpext: +; - llvm.x86.vcvtph2ps.{128,256} + +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +define <4 x float> @test_x86_vcvtph2ps_128(<8 x i16> %a0) #0 { +; CHECK-LABEL: define <4 x float> @test_x86_vcvtph2ps_128( +; CHECK-SAME: <8 x i16> [[A0:%.*]]) #[[ATTR0:[0-9]+]] { +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[TMP1]], <8 x i16> [[TMP1]], <4 x i32> +; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x i16> [[A0]], <8 x i16> [[A0]], <4 x i32> +; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x i16> [[TMP2]] to <4 x half> +; CHECK-NEXT: [[TMP4:%.*]] = zext <4 x i16> [[_MSPROP]] to <4 x i32> +; CHECK-NEXT: [[CVTPH2PS:%.*]] = fpext <4 x half> [[TMP3]] to <4 x float> +; CHECK-NEXT: store <4 x i32> [[TMP4]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <4 x float> [[CVTPH2PS]] +; + %res = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %a0) ; <<4 x float>> [#uses=1] + ret <4 x float> %res +} +declare <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16>) nounwind readonly + +define <4 x float> @test_x86_vcvtph2ps_128_m(ptr nocapture %a) #0 { +; CHECK-LABEL: define <4 x float> @test_x86_vcvtph2ps_128_m( +; CHECK-SAME: ptr captures(none) [[A:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1:![0-9]+]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4:[0-9]+]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[LOAD:%.*]] = load <8 x i16>, ptr [[A]], align 16 +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i16>, ptr [[TMP6]], align 16 +; CHECK-NEXT: [[_MSPROP:%.*]] = shufflevector <8 x i16> [[_MSLD]], <8 x i16> [[_MSLD]], <4 x i32> +; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <8 x i16> [[LOAD]], <8 x i16> [[LOAD]], <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = bitcast <4 x i16> [[TMP7]] to <4 x half> +; CHECK-NEXT: [[TMP9:%.*]] = zext <4 x i16> [[_MSPROP]] to <4 x i32> +; CHECK-NEXT: [[CVTPH2PS:%.*]] = fpext <4 x half> [[TMP8]] to <4 x float> +; CHECK-NEXT: store <4 x i32> [[TMP9]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <4 x float> [[CVTPH2PS]] +; + %load = load <8 x i16>, ptr %a + %res = call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %load) ; <<4 x float>> [#uses=1] + ret <4 x float> %res +} + +define <8 x float> @test_x86_vcvtph2ps_256(<8 x i16> %a0) #0 { +; CHECK-LABEL: define <8 x float> @test_x86_vcvtph2ps_256( +; CHECK-SAME: <8 x i16> [[A0:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i16>, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i16> [[A0]] to <8 x half> +; CHECK-NEXT: [[TMP3:%.*]] = zext <8 x i16> [[TMP1]] to <8 x i32> +; CHECK-NEXT: [[CVTPH2PS:%.*]] = fpext <8 x half> [[TMP2]] to <8 x float> +; CHECK-NEXT: store <8 x i32> [[TMP3]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <8 x float> [[CVTPH2PS]] +; + %res = call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %a0) ; <<8 x float>> [#uses=1] + ret <8 x float> %res +} +declare <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16>) nounwind readonly + +define <8 x float> @test_x86_vcvtph2ps_256_m(ptr nocapture %a) nounwind #0 { +; CHECK-LABEL: define <8 x float> @test_x86_vcvtph2ps_256_m( +; CHECK-SAME: ptr captures(none) [[A:%.*]]) #[[ATTR1:[0-9]+]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[LOAD:%.*]] = load <8 x i16>, ptr [[A]], align 16 +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[A]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: [[_MSLD:%.*]] = load <8 x i16>, ptr [[TMP6]], align 16 +; CHECK-NEXT: [[TMP7:%.*]] = bitcast <8 x i16> [[LOAD]] to <8 x half> +; CHECK-NEXT: [[TMP8:%.*]] = zext <8 x i16> [[_MSLD]] to <8 x i32> +; CHECK-NEXT: [[CVTPH2PS:%.*]] = fpext <8 x half> [[TMP7]] to <8 x float> +; CHECK-NEXT: store <8 x i32> [[TMP8]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <8 x float> [[CVTPH2PS]] +; + %load = load <8 x i16>, ptr %a + %res = tail call <8 x float> @llvm.x86.vcvtph2ps.256(<8 x i16> %load) + ret <8 x float> %res +} + +define <4 x float> @test_x86_vcvtph2ps_128_scalar(ptr %ptr) #0 { +; CHECK-LABEL: define <4 x float> @test_x86_vcvtph2ps_128_scalar( +; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[PTR]], align 8 +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[PTR]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP6]], align 8 +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i64> splat (i64 -1), i64 [[_MSLD]], i32 0 +; CHECK-NEXT: [[INS1:%.*]] = insertelement <2 x i64> poison, i64 [[LOAD]], i32 0 +; CHECK-NEXT: [[_MSPROP1:%.*]] = insertelement <2 x i64> [[_MSPROP]], i64 0, i32 1 +; CHECK-NEXT: [[INS2:%.*]] = insertelement <2 x i64> [[INS1]], i64 0, i32 1 +; CHECK-NEXT: [[TMP7:%.*]] = bitcast <2 x i64> [[_MSPROP1]] to <8 x i16> +; CHECK-NEXT: [[BC:%.*]] = bitcast <2 x i64> [[INS2]] to <8 x i16> +; CHECK-NEXT: [[_MSPROP2:%.*]] = shufflevector <8 x i16> [[TMP7]], <8 x i16> [[TMP7]], <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x i16> [[BC]], <8 x i16> [[BC]], <4 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i16> [[TMP8]] to <4 x half> +; CHECK-NEXT: [[TMP10:%.*]] = zext <4 x i16> [[_MSPROP2]] to <4 x i32> +; CHECK-NEXT: [[CVTPH2PS:%.*]] = fpext <4 x half> [[TMP9]] to <4 x float> +; CHECK-NEXT: store <4 x i32> [[TMP10]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <4 x float> [[CVTPH2PS]] +; + %load = load i64, ptr %ptr + %ins1 = insertelement <2 x i64> poison, i64 %load, i32 0 + %ins2 = insertelement <2 x i64> %ins1, i64 0, i32 1 + %bc = bitcast <2 x i64> %ins2 to <8 x i16> + %res = tail call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %bc) #2 + ret <4 x float> %res +} + +define <4 x float> @test_x86_vcvtph2ps_128_scalar2(ptr %ptr) #0 { +; CHECK-LABEL: define <4 x float> @test_x86_vcvtph2ps_128_scalar2( +; CHECK-SAME: ptr [[PTR:%.*]]) #[[ATTR0]] { +; CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 +; CHECK-NEXT: call void @llvm.donothing() +; CHECK-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 +; CHECK-NEXT: br i1 [[_MSCMP]], label %[[BB2:.*]], label %[[BB3:.*]], !prof [[PROF1]] +; CHECK: [[BB2]]: +; CHECK-NEXT: call void @__msan_warning_noreturn() #[[ATTR4]] +; CHECK-NEXT: unreachable +; CHECK: [[BB3]]: +; CHECK-NEXT: [[LOAD:%.*]] = load i64, ptr [[PTR]], align 8 +; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint ptr [[PTR]] to i64 +; CHECK-NEXT: [[TMP5:%.*]] = xor i64 [[TMP4]], 87960930222080 +; CHECK-NEXT: [[TMP6:%.*]] = inttoptr i64 [[TMP5]] to ptr +; CHECK-NEXT: [[_MSLD:%.*]] = load i64, ptr [[TMP6]], align 8 +; CHECK-NEXT: [[_MSPROP:%.*]] = insertelement <2 x i64> splat (i64 -1), i64 [[_MSLD]], i32 0 +; CHECK-NEXT: [[INS:%.*]] = insertelement <2 x i64> poison, i64 [[LOAD]], i32 0 +; CHECK-NEXT: [[TMP7:%.*]] = bitcast <2 x i64> [[_MSPROP]] to <8 x i16> +; CHECK-NEXT: [[BC:%.*]] = bitcast <2 x i64> [[INS]] to <8 x i16> +; CHECK-NEXT: [[_MSPROP1:%.*]] = shufflevector <8 x i16> [[TMP7]], <8 x i16> [[TMP7]], <4 x i32> +; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <8 x i16> [[BC]], <8 x i16> [[BC]], <4 x i32> +; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x i16> [[TMP8]] to <4 x half> +; CHECK-NEXT: [[TMP10:%.*]] = zext <4 x i16> [[_MSPROP1]] to <4 x i32> +; CHECK-NEXT: [[CVTPH2PS:%.*]] = fpext <4 x half> [[TMP9]] to <4 x float> +; CHECK-NEXT: store <4 x i32> [[TMP10]], ptr @__msan_retval_tls, align 8 +; CHECK-NEXT: ret <4 x float> [[CVTPH2PS]] +; + %load = load i64, ptr %ptr + %ins = insertelement <2 x i64> poison, i64 %load, i32 0 + %bc = bitcast <2 x i64> %ins to <8 x i16> + %res = tail call <4 x float> @llvm.x86.vcvtph2ps.128(<8 x i16> %bc) + ret <4 x float> %res +} + +attributes #0 = { sanitize_memory }