Skip to content

Commit d6098e4

Browse files
committed
cmd/compile: intrinsify sync/atomic for amd64
Uses the same implementation as runtime/internal/atomic. Reorganize the intrinsic detector to make it more table-driven. Also works on amd64p32. Change-Id: I7a5238951d6018d7d5d1bc01f339f6ee9282b2d0 Reviewed-on: https://go-review.googlesource.com/28076 Reviewed-by: Cherry Zhang <[email protected]>
1 parent adb1e67 commit d6098e4

File tree

4 files changed

+265
-121
lines changed

4 files changed

+265
-121
lines changed

src/cmd/compile/internal/gc/ssa.go

+255-109
Original file line numberDiff line numberDiff line change
@@ -2528,12 +2528,226 @@ const (
25282528
callGo
25292529
)
25302530

2531-
// isSSAIntrinsic returns true if n is a call to a recognized intrinsic
2532-
// that can be handled by the SSA backend.
2533-
// SSA uses this, but so does the front end to see if should not
2534-
// inline a function because it is a candidate for intrinsic
2535-
// substitution.
2536-
func isSSAIntrinsic(s *Sym) bool {
2531+
// TODO: make this a field of a configuration object instead of a global.
2532+
var intrinsics *intrinsicInfo
2533+
2534+
type intrinsicInfo struct {
2535+
std map[intrinsicKey]intrinsicBuilder
2536+
intSized map[sizedIntrinsicKey]intrinsicBuilder
2537+
ptrSized map[sizedIntrinsicKey]intrinsicBuilder
2538+
}
2539+
2540+
// An intrinsicBuilder converts a call node n into an ssa value that
2541+
// implements that call as an intrinsic.
2542+
type intrinsicBuilder func(s *state, n *Node) *ssa.Value
2543+
2544+
type intrinsicKey struct {
2545+
pkg string
2546+
fn string
2547+
}
2548+
2549+
type sizedIntrinsicKey struct {
2550+
pkg string
2551+
fn string
2552+
size int
2553+
}
2554+
2555+
func intrinsicInit() {
2556+
i := &intrinsicInfo{}
2557+
intrinsics = i
2558+
2559+
// initial set of intrinsics.
2560+
i.std = map[intrinsicKey]intrinsicBuilder{
2561+
/******** runtime/internal/sys ********/
2562+
intrinsicKey{"runtime/internal/sys", "Ctz32"}: func(s *state, n *Node) *ssa.Value {
2563+
return s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n))
2564+
},
2565+
intrinsicKey{"runtime/internal/sys", "Ctz64"}: func(s *state, n *Node) *ssa.Value {
2566+
return s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n))
2567+
},
2568+
intrinsicKey{"runtime/internal/sys", "Bswap32"}: func(s *state, n *Node) *ssa.Value {
2569+
return s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n))
2570+
},
2571+
intrinsicKey{"runtime/internal/sys", "Bswap64"}: func(s *state, n *Node) *ssa.Value {
2572+
return s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n))
2573+
},
2574+
2575+
/******** runtime/internal/atomic ********/
2576+
intrinsicKey{"runtime/internal/atomic", "Load"}: func(s *state, n *Node) *ssa.Value {
2577+
v := s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.mem())
2578+
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
2579+
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
2580+
},
2581+
intrinsicKey{"runtime/internal/atomic", "Load64"}: func(s *state, n *Node) *ssa.Value {
2582+
v := s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.mem())
2583+
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
2584+
return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
2585+
},
2586+
intrinsicKey{"runtime/internal/atomic", "Loadp"}: func(s *state, n *Node) *ssa.Value {
2587+
v := s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(Ptrto(Types[TUINT8]), ssa.TypeMem), s.intrinsicArg(n, 0), s.mem())
2588+
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
2589+
return s.newValue1(ssa.OpSelect0, Ptrto(Types[TUINT8]), v)
2590+
},
2591+
2592+
intrinsicKey{"runtime/internal/atomic", "Store"}: func(s *state, n *Node) *ssa.Value {
2593+
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2594+
return nil
2595+
},
2596+
intrinsicKey{"runtime/internal/atomic", "Store64"}: func(s *state, n *Node) *ssa.Value {
2597+
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2598+
return nil
2599+
},
2600+
intrinsicKey{"runtime/internal/atomic", "StorepNoWB"}: func(s *state, n *Node) *ssa.Value {
2601+
s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2602+
return nil
2603+
},
2604+
2605+
intrinsicKey{"runtime/internal/atomic", "Xchg"}: func(s *state, n *Node) *ssa.Value {
2606+
v := s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2607+
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
2608+
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
2609+
},
2610+
intrinsicKey{"runtime/internal/atomic", "Xchg64"}: func(s *state, n *Node) *ssa.Value {
2611+
v := s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2612+
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
2613+
return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
2614+
},
2615+
2616+
intrinsicKey{"runtime/internal/atomic", "Xadd"}: func(s *state, n *Node) *ssa.Value {
2617+
v := s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2618+
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
2619+
return s.newValue1(ssa.OpSelect0, Types[TUINT32], v)
2620+
},
2621+
intrinsicKey{"runtime/internal/atomic", "Xadd64"}: func(s *state, n *Node) *ssa.Value {
2622+
v := s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2623+
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
2624+
return s.newValue1(ssa.OpSelect0, Types[TUINT64], v)
2625+
},
2626+
2627+
intrinsicKey{"runtime/internal/atomic", "Cas"}: func(s *state, n *Node) *ssa.Value {
2628+
v := s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2), s.mem())
2629+
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
2630+
return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
2631+
},
2632+
intrinsicKey{"runtime/internal/atomic", "Cas64"}: func(s *state, n *Node) *ssa.Value {
2633+
v := s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2), s.mem())
2634+
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, v)
2635+
return s.newValue1(ssa.OpSelect0, Types[TBOOL], v)
2636+
},
2637+
2638+
intrinsicKey{"runtime/internal/atomic", "And8"}: func(s *state, n *Node) *ssa.Value {
2639+
s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2640+
return nil
2641+
},
2642+
intrinsicKey{"runtime/internal/atomic", "Or8"}: func(s *state, n *Node) *ssa.Value {
2643+
s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2644+
return nil
2645+
},
2646+
}
2647+
2648+
// aliases internal to runtime/internal/atomic
2649+
i.std[intrinsicKey{"runtime/internal/atomic", "Loadint64"}] =
2650+
i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}]
2651+
2652+
// intrinsics which vary depending on the size of int/ptr.
2653+
i.intSized = map[sizedIntrinsicKey]intrinsicBuilder{
2654+
sizedIntrinsicKey{"runtime/internal/atomic", "Loaduint", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Load"}],
2655+
sizedIntrinsicKey{"runtime/internal/atomic", "Loaduint", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}],
2656+
}
2657+
i.ptrSized = map[sizedIntrinsicKey]intrinsicBuilder{
2658+
sizedIntrinsicKey{"runtime/internal/atomic", "Loaduintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Load"}],
2659+
sizedIntrinsicKey{"runtime/internal/atomic", "Loaduintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}],
2660+
sizedIntrinsicKey{"runtime/internal/atomic", "Storeuintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Store"}],
2661+
sizedIntrinsicKey{"runtime/internal/atomic", "Storeuintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}],
2662+
sizedIntrinsicKey{"runtime/internal/atomic", "Xchguintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Xchg"}],
2663+
sizedIntrinsicKey{"runtime/internal/atomic", "Xchguintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Xchg64"}],
2664+
sizedIntrinsicKey{"runtime/internal/atomic", "Xadduintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}],
2665+
sizedIntrinsicKey{"runtime/internal/atomic", "Xadduintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}],
2666+
sizedIntrinsicKey{"runtime/internal/atomic", "Casuintptr", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}],
2667+
sizedIntrinsicKey{"runtime/internal/atomic", "Casuintptr", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}],
2668+
sizedIntrinsicKey{"runtime/internal/atomic", "Casp1", 4}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}],
2669+
sizedIntrinsicKey{"runtime/internal/atomic", "Casp1", 8}: i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}],
2670+
}
2671+
2672+
/******** sync/atomic ********/
2673+
if flag_race {
2674+
// The race detector needs to be able to intercept these calls.
2675+
// We can't intrinsify them.
2676+
return
2677+
}
2678+
// these are all aliases to runtime/internal/atomic implementations.
2679+
i.std[intrinsicKey{"sync/atomic", "LoadInt32"}] =
2680+
i.std[intrinsicKey{"runtime/internal/atomic", "Load"}]
2681+
i.std[intrinsicKey{"sync/atomic", "LoadInt64"}] =
2682+
i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}]
2683+
i.std[intrinsicKey{"sync/atomic", "LoadPointer"}] =
2684+
i.std[intrinsicKey{"runtime/internal/atomic", "Loadp"}]
2685+
i.std[intrinsicKey{"sync/atomic", "LoadUint32"}] =
2686+
i.std[intrinsicKey{"runtime/internal/atomic", "Load"}]
2687+
i.std[intrinsicKey{"sync/atomic", "LoadUint64"}] =
2688+
i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}]
2689+
i.ptrSized[sizedIntrinsicKey{"sync/atomic", "LoadUintptr", 4}] =
2690+
i.std[intrinsicKey{"runtime/internal/atomic", "Load"}]
2691+
i.ptrSized[sizedIntrinsicKey{"sync/atomic", "LoadUintptr", 8}] =
2692+
i.std[intrinsicKey{"runtime/internal/atomic", "Load64"}]
2693+
2694+
i.std[intrinsicKey{"sync/atomic", "StoreInt32"}] =
2695+
i.std[intrinsicKey{"runtime/internal/atomic", "Store"}]
2696+
i.std[intrinsicKey{"sync/atomic", "StoreInt64"}] =
2697+
i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}]
2698+
// Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
2699+
i.std[intrinsicKey{"sync/atomic", "StoreUint32"}] =
2700+
i.std[intrinsicKey{"runtime/internal/atomic", "Store"}]
2701+
i.std[intrinsicKey{"sync/atomic", "StoreUint64"}] =
2702+
i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}]
2703+
i.ptrSized[sizedIntrinsicKey{"sync/atomic", "StoreUintptr", 4}] =
2704+
i.std[intrinsicKey{"runtime/internal/atomic", "Store"}]
2705+
i.ptrSized[sizedIntrinsicKey{"sync/atomic", "StoreUintptr", 8}] =
2706+
i.std[intrinsicKey{"runtime/internal/atomic", "Store64"}]
2707+
2708+
i.std[intrinsicKey{"sync/atomic", "SwapInt32"}] =
2709+
i.std[intrinsicKey{"runtime/internal/atomic", "Swap"}]
2710+
i.std[intrinsicKey{"sync/atomic", "SwapInt64"}] =
2711+
i.std[intrinsicKey{"runtime/internal/atomic", "Swap64"}]
2712+
i.std[intrinsicKey{"sync/atomic", "SwapUint32"}] =
2713+
i.std[intrinsicKey{"runtime/internal/atomic", "Swap"}]
2714+
i.std[intrinsicKey{"sync/atomic", "SwapUint64"}] =
2715+
i.std[intrinsicKey{"runtime/internal/atomic", "Swap64"}]
2716+
i.ptrSized[sizedIntrinsicKey{"sync/atomic", "SwapUintptr", 4}] =
2717+
i.std[intrinsicKey{"runtime/internal/atomic", "Swap"}]
2718+
i.ptrSized[sizedIntrinsicKey{"sync/atomic", "SwapUintptr", 8}] =
2719+
i.std[intrinsicKey{"runtime/internal/atomic", "Swap64"}]
2720+
2721+
i.std[intrinsicKey{"sync/atomic", "CompareAndSwapInt32"}] =
2722+
i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}]
2723+
i.std[intrinsicKey{"sync/atomic", "CompareAndSwapInt64"}] =
2724+
i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}]
2725+
i.std[intrinsicKey{"sync/atomic", "CompareAndSwapUint32"}] =
2726+
i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}]
2727+
i.std[intrinsicKey{"sync/atomic", "CompareAndSwapUint64"}] =
2728+
i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}]
2729+
i.ptrSized[sizedIntrinsicKey{"sync/atomic", "CompareAndSwapUintptr", 4}] =
2730+
i.std[intrinsicKey{"runtime/internal/atomic", "Cas"}]
2731+
i.ptrSized[sizedIntrinsicKey{"sync/atomic", "CompareAndSwapUintptr", 8}] =
2732+
i.std[intrinsicKey{"runtime/internal/atomic", "Cas64"}]
2733+
2734+
i.std[intrinsicKey{"sync/atomic", "AddInt32"}] =
2735+
i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}]
2736+
i.std[intrinsicKey{"sync/atomic", "AddInt64"}] =
2737+
i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}]
2738+
i.std[intrinsicKey{"sync/atomic", "AddUint32"}] =
2739+
i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}]
2740+
i.std[intrinsicKey{"sync/atomic", "AddUint64"}] =
2741+
i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}]
2742+
i.ptrSized[sizedIntrinsicKey{"sync/atomic", "AddUintptr", 4}] =
2743+
i.std[intrinsicKey{"runtime/internal/atomic", "Xadd"}]
2744+
i.ptrSized[sizedIntrinsicKey{"sync/atomic", "AddUintptr", 8}] =
2745+
i.std[intrinsicKey{"runtime/internal/atomic", "Xadd64"}]
2746+
}
2747+
2748+
// findIntrinsic returns a function which builds the SSA equivalent of the
2749+
// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
2750+
func findIntrinsic(sym *Sym) intrinsicBuilder {
25372751
// The test below is not quite accurate -- in the event that
25382752
// a function is disabled on a per-function basis, for example
25392753
// because of hash-keyed binary failure search, SSA might be
@@ -2543,40 +2757,50 @@ func isSSAIntrinsic(s *Sym) bool {
25432757
// leading/trailing instructions, but heuristics might change
25442758
// in the future or on different architectures).
25452759
if !ssaEnabled || ssa.IntrinsicsDisable || Thearch.LinkArch.Family != sys.AMD64 {
2546-
return false
2760+
return nil
25472761
}
2548-
if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/sys" {
2549-
switch s.Name {
2550-
case
2551-
"Ctz64", "Ctz32",
2552-
"Bswap64", "Bswap32":
2553-
return true
2554-
}
2762+
// TODO: parameterize this code by architecture. Maybe we should ask the SSA
2763+
// backend if it can lower the ops involved?
2764+
if sym == nil || sym.Pkg == nil {
2765+
return nil
25552766
}
2556-
if s != nil && s.Pkg != nil && s.Pkg.Path == "runtime/internal/atomic" {
2557-
switch s.Name {
2558-
case "Load", "Load64", "Loadint64", "Loadp", "Loaduint", "Loaduintptr":
2559-
return true
2560-
case "Store", "Store64", "StorepNoWB", "Storeuintptr":
2561-
return true
2562-
case "Xchg", "Xchg64", "Xchguintptr":
2563-
return true
2564-
case "Xadd", "Xadd64", "Xaddint64", "Xadduintptr":
2565-
return true
2566-
case "Cas", "Cas64", "Casp1", "Casuintptr":
2567-
return true
2568-
case "And8", "Or8":
2569-
return true
2570-
}
2767+
if intrinsics == nil {
2768+
intrinsicInit()
25712769
}
2572-
return false
2770+
pkg := sym.Pkg.Path
2771+
fn := sym.Name
2772+
f := intrinsics.std[intrinsicKey{pkg, fn}]
2773+
if f != nil {
2774+
return f
2775+
}
2776+
f = intrinsics.intSized[sizedIntrinsicKey{pkg, fn, Widthint}]
2777+
if f != nil {
2778+
return f
2779+
}
2780+
return intrinsics.ptrSized[sizedIntrinsicKey{pkg, fn, Widthptr}]
25732781
}
25742782

25752783
func isIntrinsicCall(n *Node) bool {
25762784
if n == nil || n.Left == nil {
25772785
return false
25782786
}
2579-
return isSSAIntrinsic(n.Left.Sym)
2787+
return findIntrinsic(n.Left.Sym) != nil
2788+
}
2789+
2790+
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
2791+
func (s *state) intrinsicCall(n *Node) *ssa.Value {
2792+
v := findIntrinsic(n.Left.Sym)(s, n)
2793+
if ssa.IntrinsicsDebug > 0 {
2794+
x := v
2795+
if x == nil {
2796+
x = s.mem()
2797+
}
2798+
if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
2799+
x = x.Args[0]
2800+
}
2801+
Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
2802+
}
2803+
return v
25802804
}
25812805

25822806
// intrinsicArg extracts the ith arg from n.List and returns its value.
@@ -2591,84 +2815,6 @@ func (s *state) intrinsicFirstArg(n *Node) *ssa.Value {
25912815
return s.intrinsicArg(n, 0)
25922816
}
25932817

2594-
// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
2595-
func (s *state) intrinsicCall(n *Node) (ret *ssa.Value) {
2596-
var result *ssa.Value
2597-
name := n.Left.Sym.Name
2598-
switch {
2599-
case name == "Ctz64":
2600-
result = s.newValue1(ssa.OpCtz64, Types[TUINT64], s.intrinsicFirstArg(n))
2601-
ret = result
2602-
case name == "Ctz32":
2603-
result = s.newValue1(ssa.OpCtz32, Types[TUINT32], s.intrinsicFirstArg(n))
2604-
ret = result
2605-
case name == "Bswap64":
2606-
result = s.newValue1(ssa.OpBswap64, Types[TUINT64], s.intrinsicFirstArg(n))
2607-
ret = result
2608-
case name == "Bswap32":
2609-
result = s.newValue1(ssa.OpBswap32, Types[TUINT32], s.intrinsicFirstArg(n))
2610-
ret = result
2611-
case name == "Load" || name == "Loaduint" && s.config.IntSize == 4 || name == "Loaduintptr" && s.config.PtrSize == 4:
2612-
result = s.newValue2(ssa.OpAtomicLoad32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.mem())
2613-
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result)
2614-
ret = s.newValue1(ssa.OpSelect0, Types[TUINT32], result)
2615-
case name == "Load64" || name == "Loadint64" || name == "Loaduint" && s.config.IntSize == 8 || name == "Loaduintptr" && s.config.PtrSize == 8:
2616-
result = s.newValue2(ssa.OpAtomicLoad64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.mem())
2617-
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result)
2618-
ret = s.newValue1(ssa.OpSelect0, Types[TUINT64], result)
2619-
case name == "Loadp":
2620-
result = s.newValue2(ssa.OpAtomicLoadPtr, ssa.MakeTuple(Ptrto(Types[TUINT8]), ssa.TypeMem), s.intrinsicArg(n, 0), s.mem())
2621-
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result)
2622-
ret = s.newValue1(ssa.OpSelect0, Ptrto(Types[TUINT8]), result)
2623-
case name == "Store" || name == "Storeuintptr" && s.config.PtrSize == 4:
2624-
result = s.newValue3(ssa.OpAtomicStore32, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2625-
s.vars[&memVar] = result
2626-
case name == "Store64" || name == "Storeuintptr" && s.config.PtrSize == 8:
2627-
result = s.newValue3(ssa.OpAtomicStore64, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2628-
s.vars[&memVar] = result
2629-
case name == "StorepNoWB":
2630-
result = s.newValue3(ssa.OpAtomicStorePtrNoWB, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2631-
s.vars[&memVar] = result
2632-
case name == "Xchg" || name == "Xchguintptr" && s.config.PtrSize == 4:
2633-
result = s.newValue3(ssa.OpAtomicExchange32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2634-
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result)
2635-
ret = s.newValue1(ssa.OpSelect0, Types[TUINT32], result)
2636-
case name == "Xchg64" || name == "Xchguintptr" && s.config.PtrSize == 8:
2637-
result = s.newValue3(ssa.OpAtomicExchange64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2638-
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result)
2639-
ret = s.newValue1(ssa.OpSelect0, Types[TUINT64], result)
2640-
case name == "Xadd" || name == "Xadduintptr" && s.config.PtrSize == 4:
2641-
result = s.newValue3(ssa.OpAtomicAdd32, ssa.MakeTuple(Types[TUINT32], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2642-
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result)
2643-
ret = s.newValue1(ssa.OpSelect0, Types[TUINT32], result)
2644-
case name == "Xadd64" || name == "Xaddint64" || name == "Xadduintptr" && s.config.PtrSize == 8:
2645-
result = s.newValue3(ssa.OpAtomicAdd64, ssa.MakeTuple(Types[TUINT64], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2646-
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result)
2647-
ret = s.newValue1(ssa.OpSelect0, Types[TUINT64], result)
2648-
case name == "Cas" || (name == "Casp1" || name == "Casuintptr") && s.config.PtrSize == 4:
2649-
result = s.newValue4(ssa.OpAtomicCompareAndSwap32, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2), s.mem())
2650-
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result)
2651-
ret = s.newValue1(ssa.OpSelect0, Types[TBOOL], result)
2652-
case name == "Cas64" || (name == "Casp1" || name == "Casuintptr") && s.config.PtrSize == 8:
2653-
result = s.newValue4(ssa.OpAtomicCompareAndSwap64, ssa.MakeTuple(Types[TBOOL], ssa.TypeMem), s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.intrinsicArg(n, 2), s.mem())
2654-
s.vars[&memVar] = s.newValue1(ssa.OpSelect1, ssa.TypeMem, result)
2655-
ret = s.newValue1(ssa.OpSelect0, Types[TBOOL], result)
2656-
case name == "And8":
2657-
result = s.newValue3(ssa.OpAtomicAnd8, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2658-
s.vars[&memVar] = result
2659-
case name == "Or8":
2660-
result = s.newValue3(ssa.OpAtomicOr8, ssa.TypeMem, s.intrinsicArg(n, 0), s.intrinsicArg(n, 1), s.mem())
2661-
s.vars[&memVar] = result
2662-
}
2663-
if result == nil {
2664-
Fatalf("Unknown special call: %v", n.Left.Sym)
2665-
}
2666-
if ssa.IntrinsicsDebug > 0 {
2667-
Warnl(n.Lineno, "intrinsic substitution for %v with %s", n.Left.Sym.Name, result.LongString())
2668-
}
2669-
return
2670-
}
2671-
26722818
// Calls the function n using the specified call type.
26732819
// Returns the address of the return value (or nil if none).
26742820
func (s *state) call(n *Node, k callKind) *ssa.Value {

0 commit comments

Comments
 (0)