Skip to content

Commit c2483ed

Browse files
[SPIRV] Add __spirv_ builtins for existing instructions (#85654)
This PR: * adds __spirv_ builtins for existing instructions; * fixes parsing of "syncscope" values in atomic instructions; * fix a special case of binary header emision.
1 parent 949d70d commit c2483ed

File tree

10 files changed

+226
-40
lines changed

10 files changed

+226
-40
lines changed

llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -103,22 +103,22 @@ void SPIRVAsmPrinter::emitEndOfAsmFile(Module &M) {
103103
if (ModuleSectionsEmitted == false) {
104104
outputModuleSections();
105105
ModuleSectionsEmitted = true;
106-
} else {
107-
ST = static_cast<const SPIRVTargetMachine &>(TM).getSubtargetImpl();
108-
uint32_t DecSPIRVVersion = ST->getSPIRVVersion();
109-
uint32_t Major = DecSPIRVVersion / 10;
110-
uint32_t Minor = DecSPIRVVersion - Major * 10;
111-
// TODO: calculate Bound more carefully from maximum used register number,
112-
// accounting for generated OpLabels and other related instructions if
113-
// needed.
114-
unsigned Bound = 2 * (ST->getBound() + 1);
115-
bool FlagToRestore = OutStreamer->getUseAssemblerInfoForParsing();
116-
OutStreamer->setUseAssemblerInfoForParsing(true);
117-
if (MCAssembler *Asm = OutStreamer->getAssemblerPtr())
118-
Asm->setBuildVersion(static_cast<MachO::PlatformType>(0), Major, Minor,
119-
Bound, VersionTuple(Major, Minor, 0, Bound));
120-
OutStreamer->setUseAssemblerInfoForParsing(FlagToRestore);
121106
}
107+
108+
ST = static_cast<const SPIRVTargetMachine &>(TM).getSubtargetImpl();
109+
uint32_t DecSPIRVVersion = ST->getSPIRVVersion();
110+
uint32_t Major = DecSPIRVVersion / 10;
111+
uint32_t Minor = DecSPIRVVersion - Major * 10;
112+
// TODO: calculate Bound more carefully from maximum used register number,
113+
// accounting for generated OpLabels and other related instructions if
114+
// needed.
115+
unsigned Bound = 2 * (ST->getBound() + 1);
116+
bool FlagToRestore = OutStreamer->getUseAssemblerInfoForParsing();
117+
OutStreamer->setUseAssemblerInfoForParsing(true);
118+
if (MCAssembler *Asm = OutStreamer->getAssemblerPtr())
119+
Asm->setBuildVersion(static_cast<MachO::PlatformType>(0), Major, Minor,
120+
Bound, VersionTuple(Major, Minor, 0, Bound));
121+
OutStreamer->setUseAssemblerInfoForParsing(FlagToRestore);
122122
}
123123

124124
void SPIRVAsmPrinter::emitFunctionHeader() {

llvm/lib/Target/SPIRV/SPIRVBuiltins.cpp

Lines changed: 55 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,8 @@ struct IncomingCall {
5353
: BuiltinName(BuiltinName), Builtin(Builtin),
5454
ReturnRegister(ReturnRegister), ReturnType(ReturnType),
5555
Arguments(Arguments) {}
56+
57+
bool isSpirvOp() const { return BuiltinName.rfind("__spirv_", 0) == 0; }
5658
};
5759

5860
struct NativeBuiltin {
@@ -485,9 +487,27 @@ static Register buildMemSemanticsReg(Register SemanticsRegister,
485487
return buildConstantIntReg(Semantics, MIRBuilder, GR);
486488
}
487489

490+
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
491+
const SPIRV::IncomingCall *Call,
492+
Register TypeReg = Register(0)) {
493+
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
494+
auto MIB = MIRBuilder.buildInstr(Opcode);
495+
if (TypeReg.isValid())
496+
MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
497+
for (Register ArgReg : Call->Arguments) {
498+
if (!MRI->getRegClassOrNull(ArgReg))
499+
MRI->setRegClass(ArgReg, &SPIRV::IDRegClass);
500+
MIB.addUse(ArgReg);
501+
}
502+
return true;
503+
}
504+
488505
/// Helper function for translating atomic init to OpStore.
489506
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call,
490507
MachineIRBuilder &MIRBuilder) {
508+
if (Call->isSpirvOp())
509+
return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call);
510+
491511
assert(Call->Arguments.size() == 2 &&
492512
"Need 2 arguments for atomic init translation");
493513
MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
@@ -502,6 +522,10 @@ static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call,
502522
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
503523
MachineIRBuilder &MIRBuilder,
504524
SPIRVGlobalRegistry *GR) {
525+
Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
526+
if (Call->isSpirvOp())
527+
return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
528+
505529
Register PtrRegister = Call->Arguments[0];
506530
MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
507531
// TODO: if true insert call to __translate_ocl_memory_sccope before
@@ -528,7 +552,7 @@ static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
528552

529553
MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
530554
.addDef(Call->ReturnRegister)
531-
.addUse(GR->getSPIRVTypeID(Call->ReturnType))
555+
.addUse(TypeReg)
532556
.addUse(PtrRegister)
533557
.addUse(ScopeRegister)
534558
.addUse(MemSemanticsReg);
@@ -539,6 +563,9 @@ static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call,
539563
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call,
540564
MachineIRBuilder &MIRBuilder,
541565
SPIRVGlobalRegistry *GR) {
566+
if (Call->isSpirvOp())
567+
return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call);
568+
542569
Register ScopeRegister =
543570
buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
544571
Register PtrRegister = Call->Arguments[0];
@@ -557,12 +584,13 @@ static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call,
557584
}
558585

559586
/// Helper function for building an atomic compare-exchange instruction.
560-
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call,
561-
MachineIRBuilder &MIRBuilder,
562-
SPIRVGlobalRegistry *GR) {
563-
const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
564-
unsigned Opcode =
565-
SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
587+
static bool buildAtomicCompareExchangeInst(
588+
const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
589+
unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
590+
if (Call->isSpirvOp())
591+
return buildOpFromWrapper(MIRBuilder, Opcode, Call,
592+
GR->getSPIRVTypeID(Call->ReturnType));
593+
566594
bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
567595
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
568596

@@ -667,6 +695,10 @@ static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call,
667695
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
668696
MachineIRBuilder &MIRBuilder,
669697
SPIRVGlobalRegistry *GR) {
698+
if (Call->isSpirvOp())
699+
return buildOpFromWrapper(MIRBuilder, Opcode, Call,
700+
GR->getSPIRVTypeID(Call->ReturnType));
701+
670702
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
671703
Register ScopeRegister =
672704
Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
@@ -731,6 +763,12 @@ static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call,
731763
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call,
732764
unsigned Opcode, MachineIRBuilder &MIRBuilder,
733765
SPIRVGlobalRegistry *GR) {
766+
bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
767+
Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
768+
if (Call->isSpirvOp())
769+
return buildOpFromWrapper(MIRBuilder, Opcode, Call,
770+
IsSet ? TypeReg : Register(0));
771+
734772
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
735773
Register PtrRegister = Call->Arguments[0];
736774
unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
@@ -750,9 +788,8 @@ static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call,
750788
buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
751789

752790
auto MIB = MIRBuilder.buildInstr(Opcode);
753-
if (Opcode == SPIRV::OpAtomicFlagTestAndSet)
754-
MIB.addDef(Call->ReturnRegister)
755-
.addUse(GR->getSPIRVTypeID(Call->ReturnType));
791+
if (IsSet)
792+
MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
756793

757794
MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
758795
return true;
@@ -763,6 +800,9 @@ static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call,
763800
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
764801
MachineIRBuilder &MIRBuilder,
765802
SPIRVGlobalRegistry *GR) {
803+
if (Call->isSpirvOp())
804+
return buildOpFromWrapper(MIRBuilder, Opcode, Call);
805+
766806
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
767807
unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
768808
unsigned MemSemantics = SPIRV::MemorySemantics::None;
@@ -1240,7 +1280,8 @@ static bool generateAtomicInst(const SPIRV::IncomingCall *Call,
12401280
return buildAtomicStoreInst(Call, MIRBuilder, GR);
12411281
case SPIRV::OpAtomicCompareExchange:
12421282
case SPIRV::OpAtomicCompareExchangeWeak:
1243-
return buildAtomicCompareExchangeInst(Call, MIRBuilder, GR);
1283+
return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1284+
GR);
12441285
case SPIRV::OpAtomicIAdd:
12451286
case SPIRV::OpAtomicISub:
12461287
case SPIRV::OpAtomicOr:
@@ -1815,14 +1856,15 @@ static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call,
18151856
SPIRVGlobalRegistry *GR) {
18161857
MachineRegisterInfo *MRI = MIRBuilder.getMRI();
18171858
const DataLayout &DL = MIRBuilder.getDataLayout();
1818-
bool HasEvents = Call->Builtin->Name.contains("events");
1859+
bool IsSpirvOp = Call->isSpirvOp();
1860+
bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
18191861
const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
18201862

18211863
// Make vararg instructions before OpEnqueueKernel.
18221864
// Local sizes arguments: Sizes of block invoke arguments. Clang generates
18231865
// local size operands as an array, so we need to unpack them.
18241866
SmallVector<Register, 16> LocalSizes;
1825-
if (Call->Builtin->Name.find("_varargs") != StringRef::npos) {
1867+
if (Call->Builtin->Name.find("_varargs") != StringRef::npos || IsSpirvOp) {
18261868
const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
18271869
Register GepReg = Call->Arguments[LocalSizeArrayIdx];
18281870
MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);

llvm/lib/Target/SPIRV/SPIRVBuiltins.td

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -500,27 +500,38 @@ defm : DemangledNativeBuiltin<"__spirv_All", OpenCL_std, Relational, 1, 1, OpAll
500500
defm : DemangledNativeBuiltin<"atomic_init", OpenCL_std, Atomic, 2, 2, OpStore>;
501501
defm : DemangledNativeBuiltin<"atomic_load", OpenCL_std, Atomic, 1, 1, OpAtomicLoad>;
502502
defm : DemangledNativeBuiltin<"atomic_load_explicit", OpenCL_std, Atomic, 2, 3, OpAtomicLoad>;
503+
defm : DemangledNativeBuiltin<"__spirv_AtomicLoad", OpenCL_std, Atomic, 3, 3, OpAtomicLoad>;
503504
defm : DemangledNativeBuiltin<"atomic_store", OpenCL_std, Atomic, 2, 2, OpAtomicStore>;
504505
defm : DemangledNativeBuiltin<"atomic_store_explicit", OpenCL_std, Atomic, 2, 4, OpAtomicStore>;
506+
defm : DemangledNativeBuiltin<"__spirv_AtomicStore", OpenCL_std, Atomic, 4, 4, OpAtomicStore>;
505507
defm : DemangledNativeBuiltin<"atomic_compare_exchange_strong", OpenCL_std, Atomic, 3, 6, OpAtomicCompareExchange>;
508+
defm : DemangledNativeBuiltin<"__spirv_AtomicCompareExchange", OpenCL_std, Atomic, 6, 6, OpAtomicCompareExchange>;
506509
defm : DemangledNativeBuiltin<"atomic_compare_exchange_strong_explicit", OpenCL_std, Atomic, 5, 6, OpAtomicCompareExchange>;
507510
defm : DemangledNativeBuiltin<"atomic_compare_exchange_weak", OpenCL_std, Atomic, 3, 6, OpAtomicCompareExchangeWeak>;
508511
defm : DemangledNativeBuiltin<"atomic_compare_exchange_weak_explicit", OpenCL_std, Atomic, 5, 6, OpAtomicCompareExchangeWeak>;
512+
defm : DemangledNativeBuiltin<"__spirv_AtomicCompareExchangeWeak", OpenCL_std, Atomic, 6, 6, OpAtomicCompareExchangeWeak>;
509513
defm : DemangledNativeBuiltin<"atom_cmpxchg", OpenCL_std, Atomic, 3, 6, OpAtomicCompareExchange>;
510514
defm : DemangledNativeBuiltin<"atomic_cmpxchg", OpenCL_std, Atomic, 3, 6, OpAtomicCompareExchange>;
511515
defm : DemangledNativeBuiltin<"atom_add", OpenCL_std, Atomic, 2, 4, OpAtomicIAdd>;
512516
defm : DemangledNativeBuiltin<"atomic_add", OpenCL_std, Atomic, 2, 4, OpAtomicIAdd>;
517+
defm : DemangledNativeBuiltin<"__spirv_AtomicIAdd", OpenCL_std, Atomic, 4, 4, OpAtomicIAdd>;
513518
defm : DemangledNativeBuiltin<"atom_sub", OpenCL_std, Atomic, 2, 4, OpAtomicISub>;
514519
defm : DemangledNativeBuiltin<"atomic_sub", OpenCL_std, Atomic, 2, 4, OpAtomicISub>;
520+
defm : DemangledNativeBuiltin<"__spirv_AtomicISub", OpenCL_std, Atomic, 4, 4, OpAtomicISub>;
515521
defm : DemangledNativeBuiltin<"atom_or", OpenCL_std, Atomic, 2, 4, OpAtomicOr>;
516522
defm : DemangledNativeBuiltin<"atomic_or", OpenCL_std, Atomic, 2, 4, OpAtomicOr>;
523+
defm : DemangledNativeBuiltin<"__spirv_AtomicOr", OpenCL_std, Atomic, 4, 4, OpAtomicOr>;
517524
defm : DemangledNativeBuiltin<"atom_xor", OpenCL_std, Atomic, 2, 4, OpAtomicXor>;
518525
defm : DemangledNativeBuiltin<"atomic_xor", OpenCL_std, Atomic, 2, 4, OpAtomicXor>;
526+
defm : DemangledNativeBuiltin<"__spirv_AtomicXor", OpenCL_std, Atomic, 4, 4, OpAtomicXor>;
519527
defm : DemangledNativeBuiltin<"atom_and", OpenCL_std, Atomic, 2, 4, OpAtomicAnd>;
520528
defm : DemangledNativeBuiltin<"atomic_and", OpenCL_std, Atomic, 2, 4, OpAtomicAnd>;
529+
defm : DemangledNativeBuiltin<"__spirv_AtomicAnd", OpenCL_std, Atomic, 4, 4, OpAtomicAnd>;
521530
defm : DemangledNativeBuiltin<"atomic_exchange", OpenCL_std, Atomic, 2, 4, OpAtomicExchange>;
522531
defm : DemangledNativeBuiltin<"atomic_exchange_explicit", OpenCL_std, Atomic, 2, 4, OpAtomicExchange>;
532+
defm : DemangledNativeBuiltin<"AtomicEx__spirv_change", OpenCL_std, Atomic, 2, 4, OpAtomicExchange>;
523533
defm : DemangledNativeBuiltin<"atomic_work_item_fence", OpenCL_std, Atomic, 1, 3, OpMemoryBarrier>;
534+
defm : DemangledNativeBuiltin<"__spirv_MemoryBarrier", OpenCL_std, Atomic, 2, 2, OpMemoryBarrier>;
524535
defm : DemangledNativeBuiltin<"atomic_fetch_add", OpenCL_std, Atomic, 2, 4, OpAtomicIAdd>;
525536
defm : DemangledNativeBuiltin<"atomic_fetch_sub", OpenCL_std, Atomic, 2, 4, OpAtomicISub>;
526537
defm : DemangledNativeBuiltin<"atomic_fetch_or", OpenCL_std, Atomic, 2, 4, OpAtomicOr>;
@@ -532,26 +543,37 @@ defm : DemangledNativeBuiltin<"atomic_fetch_or_explicit", OpenCL_std, Atomic, 4,
532543
defm : DemangledNativeBuiltin<"atomic_fetch_xor_explicit", OpenCL_std, Atomic, 4, 6, OpAtomicXor>;
533544
defm : DemangledNativeBuiltin<"atomic_fetch_and_explicit", OpenCL_std, Atomic, 4, 6, OpAtomicAnd>;
534545
defm : DemangledNativeBuiltin<"atomic_flag_test_and_set", OpenCL_std, Atomic, 1, 1, OpAtomicFlagTestAndSet>;
546+
defm : DemangledNativeBuiltin<"__spirv_AtomicFlagTestAndSet", OpenCL_std, Atomic, 3, 3, OpAtomicFlagTestAndSet>;
535547
defm : DemangledNativeBuiltin<"atomic_flag_test_and_set_explicit", OpenCL_std, Atomic, 2, 3, OpAtomicFlagTestAndSet>;
536548
defm : DemangledNativeBuiltin<"atomic_flag_clear", OpenCL_std, Atomic, 1, 1, OpAtomicFlagClear>;
549+
defm : DemangledNativeBuiltin<"__spirv_AtomicFlagClear", OpenCL_std, Atomic, 3, 3, OpAtomicFlagClear>;
537550
defm : DemangledNativeBuiltin<"atomic_flag_clear_explicit", OpenCL_std, Atomic, 2, 3, OpAtomicFlagClear>;
538551

539552
// Barrier builtin records:
540553
defm : DemangledNativeBuiltin<"barrier", OpenCL_std, Barrier, 1, 3, OpControlBarrier>;
541554
defm : DemangledNativeBuiltin<"work_group_barrier", OpenCL_std, Barrier, 1, 3, OpControlBarrier>;
555+
defm : DemangledNativeBuiltin<"__spirv_ControlBarrier", OpenCL_std, Barrier, 3, 3, OpControlBarrier>;
542556

543557
// Kernel enqueue builtin records:
544558
defm : DemangledNativeBuiltin<"__enqueue_kernel_basic", OpenCL_std, Enqueue, 5, 5, OpEnqueueKernel>;
545559
defm : DemangledNativeBuiltin<"__enqueue_kernel_basic_events", OpenCL_std, Enqueue, 8, 8, OpEnqueueKernel>;
546560
defm : DemangledNativeBuiltin<"__enqueue_kernel_varargs", OpenCL_std, Enqueue, 7, 7, OpEnqueueKernel>;
547561
defm : DemangledNativeBuiltin<"__enqueue_kernel_events_varargs", OpenCL_std, Enqueue, 10, 10, OpEnqueueKernel>;
562+
defm : DemangledNativeBuiltin<"__spirv_EnqueueKernel", OpenCL_std, Enqueue, 10, 0, OpEnqueueKernel>;
548563
defm : DemangledNativeBuiltin<"retain_event", OpenCL_std, Enqueue, 1, 1, OpRetainEvent>;
564+
defm : DemangledNativeBuiltin<"__spirv_RetainEvent", OpenCL_std, Enqueue, 1, 1, OpRetainEvent>;
549565
defm : DemangledNativeBuiltin<"release_event", OpenCL_std, Enqueue, 1, 1, OpReleaseEvent>;
566+
defm : DemangledNativeBuiltin<"__spirv_ReleaseEvent", OpenCL_std, Enqueue, 1, 1, OpReleaseEvent>;
550567
defm : DemangledNativeBuiltin<"create_user_event", OpenCL_std, Enqueue, 0, 0, OpCreateUserEvent>;
568+
defm : DemangledNativeBuiltin<"__spirv_CreateUserEvent", OpenCL_std, Enqueue, 0, 0, OpCreateUserEvent>;
551569
defm : DemangledNativeBuiltin<"is_valid_event", OpenCL_std, Enqueue, 1, 1, OpIsValidEvent>;
570+
defm : DemangledNativeBuiltin<"__spirv_IsValidEvent", OpenCL_std, Enqueue, 1, 1, OpIsValidEvent>;
552571
defm : DemangledNativeBuiltin<"set_user_event_status", OpenCL_std, Enqueue, 2, 2, OpSetUserEventStatus>;
572+
defm : DemangledNativeBuiltin<"__spirv_SetUserEventStatus", OpenCL_std, Enqueue, 2, 2, OpSetUserEventStatus>;
553573
defm : DemangledNativeBuiltin<"capture_event_profiling_info", OpenCL_std, Enqueue, 3, 3, OpCaptureEventProfilingInfo>;
574+
defm : DemangledNativeBuiltin<"__spirv_CaptureEventProfilingInfo", OpenCL_std, Enqueue, 3, 3, OpCaptureEventProfilingInfo>;
554575
defm : DemangledNativeBuiltin<"get_default_queue", OpenCL_std, Enqueue, 0, 0, OpGetDefaultQueue>;
576+
defm : DemangledNativeBuiltin<"__spirv_GetDefaultQueue", OpenCL_std, Enqueue, 0, 0, OpGetDefaultQueue>;
555577
defm : DemangledNativeBuiltin<"ndrange_1D", OpenCL_std, Enqueue, 1, 3, OpBuildNDRange>;
556578
defm : DemangledNativeBuiltin<"ndrange_2D", OpenCL_std, Enqueue, 1, 3, OpBuildNDRange>;
557579
defm : DemangledNativeBuiltin<"ndrange_3D", OpenCL_std, Enqueue, 1, 3, OpBuildNDRange>;
@@ -562,7 +584,9 @@ defm : DemangledNativeBuiltin<"__spirv_SpecConstantComposite", OpenCL_std, SpecC
562584

563585
// Async Copy and Prefetch builtin records:
564586
defm : DemangledNativeBuiltin<"async_work_group_copy", OpenCL_std, AsyncCopy, 4, 4, OpGroupAsyncCopy>;
587+
defm : DemangledNativeBuiltin<"__spirv_GroupAsyncCopy", OpenCL_std, AsyncCopy, 4, 4, OpGroupAsyncCopy>;
565588
defm : DemangledNativeBuiltin<"wait_group_events", OpenCL_std, AsyncCopy, 2, 2, OpGroupWaitEvents>;
589+
defm : DemangledNativeBuiltin<"__spirv_GroupWaitEvents", OpenCL_std, AsyncCopy, 2, 2, OpGroupWaitEvents>;
566590

567591
// Load and store builtin records:
568592
defm : DemangledNativeBuiltin<"__spirv_Load", OpenCL_std, LoadStore, 1, 3, OpLoad>;

0 commit comments

Comments
 (0)