Skip to content

[CIR][CIRGen][builtin] handle _mm_clflush #1397

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Feb 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions clang/lib/CIR/CodeGen/CIRGenBuiltin.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2623,6 +2623,23 @@ mlir::Value CIRGenFunction::emitTargetBuiltinExpr(unsigned BuiltinID,
getTarget().getTriple().getArch());
}

mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg(unsigned ICEArguments,
unsigned Idx,
const CallExpr *E) {
mlir::Value Arg = {};
if ((ICEArguments & (1 << Idx)) == 0) {
Arg = emitScalarExpr(E->getArg(Idx));
} else {
// If this is required to be a constant, constant fold it so that we
// know that the generated intrinsic gets a ConstantInt.
std::optional<llvm::APSInt> Result =
E->getArg(Idx)->getIntegerConstantExpr(getContext());
assert(Result && "Expected argument to be a constant");
Arg = builder.getConstInt(getLoc(E->getSourceRange()), *Result);
}
return Arg;
}

void CIRGenFunction::emitVAStartEnd(mlir::Value ArgValue, bool IsStart) {
// LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
// early, defer to LLVM lowering.
Expand Down
61 changes: 0 additions & 61 deletions clang/lib/CIR/CodeGen/CIRGenBuiltinAArch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1676,50 +1676,6 @@ static const ARMVectorIntrinsicInfo AArch64SMEIntrinsicMap[] = {
#undef SMEMAP1
#undef SMEMAP2

// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
// we handle them here.
enum class CIRGenFunction::MSVCIntrin {
_BitScanForward,
_BitScanReverse,
_InterlockedAnd,
_InterlockedDecrement,
_InterlockedExchange,
_InterlockedExchangeAdd,
_InterlockedExchangeSub,
_InterlockedIncrement,
_InterlockedOr,
_InterlockedXor,
_InterlockedExchangeAdd_acq,
_InterlockedExchangeAdd_rel,
_InterlockedExchangeAdd_nf,
_InterlockedExchange_acq,
_InterlockedExchange_rel,
_InterlockedExchange_nf,
_InterlockedCompareExchange_acq,
_InterlockedCompareExchange_rel,
_InterlockedCompareExchange_nf,
_InterlockedCompareExchange128,
_InterlockedCompareExchange128_acq,
_InterlockedCompareExchange128_rel,
_InterlockedCompareExchange128_nf,
_InterlockedOr_acq,
_InterlockedOr_rel,
_InterlockedOr_nf,
_InterlockedXor_acq,
_InterlockedXor_rel,
_InterlockedXor_nf,
_InterlockedAnd_acq,
_InterlockedAnd_rel,
_InterlockedAnd_nf,
_InterlockedIncrement_acq,
_InterlockedIncrement_rel,
_InterlockedIncrement_nf,
_InterlockedDecrement_acq,
_InterlockedDecrement_rel,
_InterlockedDecrement_nf,
__fastfail,
};

static std::optional<CIRGenFunction::MSVCIntrin>
translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
using MSVCIntrin = CIRGenFunction::MSVCIntrin;
Expand Down Expand Up @@ -2102,23 +2058,6 @@ mlir::Value CIRGenFunction::emitAArch64SVEBuiltinExpr(unsigned BuiltinID,
llvm_unreachable("NYI");
}

mlir::Value CIRGenFunction::emitScalarOrConstFoldImmArg(unsigned ICEArguments,
unsigned Idx,
const CallExpr *E) {
mlir::Value Arg = {};
if ((ICEArguments & (1 << Idx)) == 0) {
Arg = emitScalarExpr(E->getArg(Idx));
} else {
// If this is required to be a constant, constant fold it so that we
// know that the generated intrinsic gets a ConstantInt.
std::optional<llvm::APSInt> Result =
E->getArg(Idx)->getIntegerConstantExpr(getContext());
assert(Result && "Expected argument to be a constant");
Arg = builder.getConstInt(getLoc(E->getSourceRange()), *Result);
}
return Arg;
}

static mlir::Value emitArmLdrexNon128Intrinsic(unsigned int builtinID,
const CallExpr *clangCallExpr,
CIRGenFunction &cgf) {
Expand Down
74 changes: 73 additions & 1 deletion clang/lib/CIR/CodeGen/CIRGenBuiltinX86.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,79 @@ using namespace clang;
using namespace clang::CIRGen;
using namespace cir;

static std::optional<CIRGenFunction::MSVCIntrin>
translateX86ToMsvcIntrin(unsigned BuiltinID) {
using MSVCIntrin = CIRGenFunction::MSVCIntrin;
switch (BuiltinID) {
default:
return std::nullopt;
case clang::X86::BI_BitScanForward:
case clang::X86::BI_BitScanForward64:
return MSVCIntrin::_BitScanForward;
case clang::X86::BI_BitScanReverse:
case clang::X86::BI_BitScanReverse64:
return MSVCIntrin::_BitScanReverse;
case clang::X86::BI_InterlockedAnd64:
return MSVCIntrin::_InterlockedAnd;
case clang::X86::BI_InterlockedCompareExchange128:
return MSVCIntrin::_InterlockedCompareExchange128;
case clang::X86::BI_InterlockedExchange64:
return MSVCIntrin::_InterlockedExchange;
case clang::X86::BI_InterlockedExchangeAdd64:
return MSVCIntrin::_InterlockedExchangeAdd;
case clang::X86::BI_InterlockedExchangeSub64:
return MSVCIntrin::_InterlockedExchangeSub;
case clang::X86::BI_InterlockedOr64:
return MSVCIntrin::_InterlockedOr;
case clang::X86::BI_InterlockedXor64:
return MSVCIntrin::_InterlockedXor;
case clang::X86::BI_InterlockedDecrement64:
return MSVCIntrin::_InterlockedDecrement;
case clang::X86::BI_InterlockedIncrement64:
return MSVCIntrin::_InterlockedIncrement;
}
llvm_unreachable("must return from switch");
}

mlir::Value CIRGenFunction::emitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
llvm_unreachable("NYI");
if (BuiltinID == Builtin::BI__builtin_cpu_is)
llvm_unreachable("__builtin_cpu_is NYI");
if (BuiltinID == Builtin::BI__builtin_cpu_supports)
llvm_unreachable("__builtin_cpu_supports NYI");
if (BuiltinID == Builtin::BI__builtin_cpu_init)
llvm_unreachable("__builtin_cpu_init NYI");

// Handle MSVC intrinsics before argument evaluation to prevent double
// evaluation.
if (std::optional<MSVCIntrin> MsvcIntId = translateX86ToMsvcIntrin(BuiltinID))
llvm_unreachable("translateX86ToMsvcIntrin NYI");

llvm::SmallVector<mlir::Value, 4> Ops;

// Find out if any arguments are required to be integer constant expressions.
unsigned ICEArguments = 0;
ASTContext::GetBuiltinTypeError Error;
getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
assert(Error == ASTContext::GE_None && "Should not codegen an error");

for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
Ops.push_back(emitScalarOrConstFoldImmArg(ICEArguments, i, E));
}

switch (BuiltinID) {
default:
return nullptr;
case X86::BI_mm_prefetch: {
llvm_unreachable("_mm_prefetch NYI");
}
case X86::BI_mm_clflush: {
mlir::Type voidTy = cir::VoidType::get(&getMLIRContext());
return builder
.create<cir::LLVMIntrinsicCallOp>(
getLoc(E->getExprLoc()), builder.getStringAttr("x86.sse2.clflush"),
voidTy, Ops[0])
.getResult();
}
}
}
44 changes: 43 additions & 1 deletion clang/lib/CIR/CodeGen/CIRGenFunction.h
Original file line number Diff line number Diff line change
Expand Up @@ -1015,7 +1015,49 @@ class CIRGenFunction : public CIRGenTypeCache {
RValue emitCoroutineIntrinsic(const CallExpr *E, unsigned int IID);
RValue emitCoroutineFrame();

enum class MSVCIntrin;
// Many of MSVC builtins are on x64, ARM and AArch64; to avoid repeating code,
// we handle them here.
enum class MSVCIntrin {
_BitScanForward,
_BitScanReverse,
_InterlockedAnd,
_InterlockedDecrement,
_InterlockedExchange,
_InterlockedExchangeAdd,
_InterlockedExchangeSub,
_InterlockedIncrement,
_InterlockedOr,
_InterlockedXor,
_InterlockedExchangeAdd_acq,
_InterlockedExchangeAdd_rel,
_InterlockedExchangeAdd_nf,
_InterlockedExchange_acq,
_InterlockedExchange_rel,
_InterlockedExchange_nf,
_InterlockedCompareExchange_acq,
_InterlockedCompareExchange_rel,
_InterlockedCompareExchange_nf,
_InterlockedCompareExchange128,
_InterlockedCompareExchange128_acq,
_InterlockedCompareExchange128_rel,
_InterlockedCompareExchange128_nf,
_InterlockedOr_acq,
_InterlockedOr_rel,
_InterlockedOr_nf,
_InterlockedXor_acq,
_InterlockedXor_rel,
_InterlockedXor_nf,
_InterlockedAnd_acq,
_InterlockedAnd_rel,
_InterlockedAnd_nf,
_InterlockedIncrement_acq,
_InterlockedIncrement_rel,
_InterlockedIncrement_nf,
_InterlockedDecrement_acq,
_InterlockedDecrement_rel,
_InterlockedDecrement_nf,
__fastfail,
};

mlir::Value emitARMMVEBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue,
Expand Down
13 changes: 13 additions & 0 deletions clang/test/CIR/CodeGen/X86/builtins-x86.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// Global variables of intergal types
// RUN: %clang_cc1 -triple x86_64-unknown-linux -Wno-implicit-function-declaration -fclangir -emit-cir -o %t.cir %s
// RUN: FileCheck --check-prefix=CIR --input-file=%t.cir %s
// RUN: %clang_cc1 -triple x86_64-unknown-linux -Wno-implicit-function-declaration -fclangir -emit-llvm -o %t.ll %s
// RUN: FileCheck --check-prefix=LLVM --input-file=%t.ll %s

void test_mm_clflush(const void* tmp_vCp) {
// CIR-LABEL: test_mm_clflush
// LLVM-LABEL: test_mm_clflush
_mm_clflush(tmp_vCp);
// CIR: {{%.*}} = cir.llvm.intrinsic "x86.sse2.clflush" {{%.*}} : (!cir.ptr<!void>) -> !void
// LLVM: call void @llvm.x86.sse2.clflush(ptr {{%.*}})
}
Loading