diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp index 987f20629bea..b814bd3c4a13 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp @@ -2623,6 +2623,23 @@ mlir::Value CIRGenFunction::emitTargetBuiltinExpr(unsigned BuiltinID, getTarget().getTriple().getArch()); } +mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg(unsigned ICEArguments, + unsigned Idx, + const CallExpr *E) { + mlir::Value Arg = {}; + if ((ICEArguments & (1 << Idx)) == 0) { + Arg = emitScalarExpr(E->getArg(Idx)); + } else { + // If this is required to be a constant, constant fold it so that we + // know that the generated intrinsic gets a ConstantInt. + std::optional Result = + E->getArg(Idx)->getIntegerConstantExpr(getContext()); + assert(Result && "Expected argument to be a constant"); + Arg = builder.getConstInt(getLoc(E->getSourceRange()), *Result); + } + return Arg; +} + void CIRGenFunction::emitVAStartEnd(mlir::Value ArgValue, bool IsStart) { // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this // early, defer to LLVM lowering. diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp index 7f27edca2224..7964fe7f743d 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp @@ -1676,50 +1676,6 @@ static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = { #undef SMEMAP1 #undef SMEMAP2 -// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, -// we handle them here. -enum class CIRGenFunction::MSVCIntrin { - _BitScanForward, - _BitScanReverse, - _InterlockedAnd, - _InterlockedDecrement, - _InterlockedExchange, - _InterlockedExchangeAdd, - _InterlockedExchangeSub, - _InterlockedIncrement, - _InterlockedOr, - _InterlockedXor, - _InterlockedExchangeAdd_acq, - _InterlockedExchangeAdd_rel, - _InterlockedExchangeAdd_nf, - _InterlockedExchange_acq, - _InterlockedExchange_rel, - _InterlockedExchange_nf, - _InterlockedCompareExchange_acq, - _InterlockedCompareExchange_rel, - _InterlockedCompareExchange_nf, - _InterlockedCompareExchange128, - _InterlockedCompareExchange128_acq, - _InterlockedCompareExchange128_rel, - _InterlockedCompareExchange128_nf, - _InterlockedOr_acq, - _InterlockedOr_rel, - _InterlockedOr_nf, - _InterlockedXor_acq, - _InterlockedXor_rel, - _InterlockedXor_nf, - _InterlockedAnd_acq, - _InterlockedAnd_rel, - _InterlockedAnd_nf, - _InterlockedIncrement_acq, - _InterlockedIncrement_rel, - _InterlockedIncrement_nf, - _InterlockedDecrement_acq, - _InterlockedDecrement_rel, - _InterlockedDecrement_nf, - __fastfail, -}; - static std::optional translateAarch64ToMsvcIntrin(unsigned BuiltinID) { using MSVCIntrin = CIRGenFunction::MSVCIntrin; @@ -2102,23 +2058,6 @@ mlir::Value CIRGenFunction::emitAArch64SVEBuiltinExpr(unsigned BuiltinID, llvm_unreachable("NYI"); } -mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg(unsigned ICEArguments, - unsigned Idx, - const CallExpr *E) { - mlir::Value Arg = {}; - if ((ICEArguments & (1 << Idx)) == 0) { - Arg = emitScalarExpr(E->getArg(Idx)); - } else { - // If this is required to be a constant, constant fold it so that we - // know that the generated intrinsic gets a ConstantInt. - std::optional Result = - E->getArg(Idx)->getIntegerConstantExpr(getContext()); - assert(Result && "Expected argument to be a constant"); - Arg = builder.getConstInt(getLoc(E->getSourceRange()), *Result); - } - return Arg; -} - static mlir::Value emitArmLdrexNon128Intrinsic(unsigned int builtinID, const CallExpr *clangCallExpr, CIRGenFunction &cgf) { diff --git a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp index 0cd8f09f6da3..50d40cd278a2 100644 --- a/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp @@ -31,7 +31,79 @@ using namespace clang; using namespace clang::CIRGen; using namespace cir; +static std::optional +translateX86ToMsvcIntrin(unsigned BuiltinID) { + using MSVCIntrin = CIRGenFunction::MSVCIntrin; + switch (BuiltinID) { + default: + return std::nullopt; + case clang::X86::BI_BitScanForward: + case clang::X86::BI_BitScanForward64: + return MSVCIntrin::_BitScanForward; + case clang::X86::BI_BitScanReverse: + case clang::X86::BI_BitScanReverse64: + return MSVCIntrin::_BitScanReverse; + case clang::X86::BI_InterlockedAnd64: + return MSVCIntrin::_InterlockedAnd; + case clang::X86::BI_InterlockedCompareExchange128: + return MSVCIntrin::_InterlockedCompareExchange128; + case clang::X86::BI_InterlockedExchange64: + return MSVCIntrin::_InterlockedExchange; + case clang::X86::BI_InterlockedExchangeAdd64: + return MSVCIntrin::_InterlockedExchangeAdd; + case clang::X86::BI_InterlockedExchangeSub64: + return MSVCIntrin::_InterlockedExchangeSub; + case clang::X86::BI_InterlockedOr64: + return MSVCIntrin::_InterlockedOr; + case clang::X86::BI_InterlockedXor64: + return MSVCIntrin::_InterlockedXor; + case clang::X86::BI_InterlockedDecrement64: + return MSVCIntrin::_InterlockedDecrement; + case clang::X86::BI_InterlockedIncrement64: + return MSVCIntrin::_InterlockedIncrement; + } + llvm_unreachable("must return from switch"); +} + mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E) { - llvm_unreachable("NYI"); + if (BuiltinID == Builtin::BI__builtin_cpu_is) + llvm_unreachable("__builtin_cpu_is NYI"); + if (BuiltinID == Builtin::BI__builtin_cpu_supports) + llvm_unreachable("__builtin_cpu_supports NYI"); + if (BuiltinID == Builtin::BI__builtin_cpu_init) + llvm_unreachable("__builtin_cpu_init NYI"); + + // Handle MSVC intrinsics before argument evaluation to prevent double + // evaluation. + if (std::optional MsvcIntId = translateX86ToMsvcIntrin(BuiltinID)) + llvm_unreachable("translateX86ToMsvcIntrin NYI"); + + llvm::SmallVector Ops; + + // Find out if any arguments are required to be integer constant expressions. + unsigned ICEArguments = 0; + ASTContext::GetBuiltinTypeError Error; + getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments); + assert(Error == ASTContext::GE_None && "Should not codegen an error"); + + for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) { + Ops.push_back(emitScalarOrConstFoldImmArg(ICEArguments, i, E)); + } + + switch (BuiltinID) { + default: + return nullptr; + case X86::BI_mm_prefetch: { + llvm_unreachable("_mm_prefetch NYI"); + } + case X86::BI_mm_clflush: { + mlir::Type voidTy = cir::VoidType::get(&getMLIRContext()); + return builder + .create( + getLoc(E->getExprLoc()), builder.getStringAttr("x86.sse2.clflush"), + voidTy, Ops[0]) + .getResult(); + } + } } diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 44484bcd2fe3..e4f7216f77fc 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -1015,7 +1015,49 @@ class CIRGenFunction : public CIRGenTypeCache { RValue emitCoroutineIntrinsic(const CallExpr *E, unsigned int IID); RValue emitCoroutineFrame(); - enum class MSVCIntrin; + // Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code, + // we handle them here. + enum class MSVCIntrin { + _BitScanForward, + _BitScanReverse, + _InterlockedAnd, + _InterlockedDecrement, + _InterlockedExchange, + _InterlockedExchangeAdd, + _InterlockedExchangeSub, + _InterlockedIncrement, + _InterlockedOr, + _InterlockedXor, + _InterlockedExchangeAdd_acq, + _InterlockedExchangeAdd_rel, + _InterlockedExchangeAdd_nf, + _InterlockedExchange_acq, + _InterlockedExchange_rel, + _InterlockedExchange_nf, + _InterlockedCompareExchange_acq, + _InterlockedCompareExchange_rel, + _InterlockedCompareExchange_nf, + _InterlockedCompareExchange128, + _InterlockedCompareExchange128_acq, + _InterlockedCompareExchange128_rel, + _InterlockedCompareExchange128_nf, + _InterlockedOr_acq, + _InterlockedOr_rel, + _InterlockedOr_nf, + _InterlockedXor_acq, + _InterlockedXor_rel, + _InterlockedXor_nf, + _InterlockedAnd_acq, + _InterlockedAnd_rel, + _InterlockedAnd_nf, + _InterlockedIncrement_acq, + _InterlockedIncrement_rel, + _InterlockedIncrement_nf, + _InterlockedDecrement_acq, + _InterlockedDecrement_rel, + _InterlockedDecrement_nf, + __fastfail, + }; mlir::Value emitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, diff --git a/clang/test/CIR/CodeGen/X86/builtins-x86.c b/clang/test/CIR/CodeGen/X86/builtins-x86.c new file mode 100644 index 000000000000..f91ae6696cd5 --- /dev/null +++ b/clang/test/CIR/CodeGen/X86/builtins-x86.c @@ -0,0 +1,13 @@ +// Global variables of intergal types +// RUN: %clang_cc1 -triple x86_64-unknown-linux -Wno-implicit-function-declaration -fclangir -emit-cir -o %t.cir %s +// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s +// RUN: %clang_cc1 -triple x86_64-unknown-linux -Wno-implicit-function-declaration -fclangir -emit-llvm -o %t.ll %s +// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s + +void test_mm_clflush(const void* tmp_vCp) { + // CIR-LABEL: test_mm_clflush + // LLVM-LABEL: test_mm_clflush + _mm_clflush(tmp_vCp); + // CIR: {{%.*}} = cir.llvm.intrinsic "x86.sse2.clflush" {{%.*}} : (!cir.ptr) -> !void + // LLVM: call void @llvm.x86.sse2.clflush(ptr {{%.*}}) +}