@@ -23,27 +23,8 @@ import (
23
23
type WaitGroup struct {
24
24
noCopy noCopy
25
25
26
- // 64-bit value: high 32 bits are counter, low 32 bits are waiter count.
27
- // 64-bit atomic operations require 64-bit alignment, but 32-bit
28
- // compilers only guarantee that 64-bit fields are 32-bit aligned.
29
- // For this reason on 32 bit architectures we need to check in state()
30
- // if state1 is aligned or not, and dynamically "swap" the field order if
31
- // needed.
32
- state1 uint64
33
- state2 uint32
34
- }
35
-
36
- // state returns pointers to the state and sema fields stored within wg.state*.
37
- func (wg * WaitGroup ) state () (statep * uint64 , semap * uint32 ) {
38
- if unsafe .Alignof (wg .state1 ) == 8 || uintptr (unsafe .Pointer (& wg .state1 ))% 8 == 0 {
39
- // state1 is 64-bit aligned: nothing to do.
40
- return & wg .state1 , & wg .state2
41
- } else {
42
- // state1 is 32-bit aligned but not 64-bit aligned: this means that
43
- // (&state1)+4 is 64-bit aligned.
44
- state := (* [3 ]uint32 )(unsafe .Pointer (& wg .state1 ))
45
- return (* uint64 )(unsafe .Pointer (& state [1 ])), & state [0 ]
46
- }
26
+ state atomic.Uint64 // high 32 bits are counter, low 32 bits are waiter count.
27
+ sema uint32
47
28
}
48
29
49
30
// Add adds delta, which may be negative, to the WaitGroup counter.
@@ -60,24 +41,22 @@ func (wg *WaitGroup) state() (statep *uint64, semap *uint32) {
60
41
// new Add calls must happen after all previous Wait calls have returned.
61
42
// See the WaitGroup example.
62
43
func (wg * WaitGroup ) Add (delta int ) {
63
- statep , semap := wg .state ()
64
44
if race .Enabled {
65
- _ = * statep // trigger nil deref early
66
45
if delta < 0 {
67
46
// Synchronize decrements with Wait.
68
47
race .ReleaseMerge (unsafe .Pointer (wg ))
69
48
}
70
49
race .Disable ()
71
50
defer race .Enable ()
72
51
}
73
- state := atomic . AddUint64 ( statep , uint64 (delta )<< 32 )
52
+ state := wg . state . Add ( uint64 (delta ) << 32 )
74
53
v := int32 (state >> 32 )
75
54
w := uint32 (state )
76
55
if race .Enabled && delta > 0 && v == int32 (delta ) {
77
56
// The first increment must be synchronized with Wait.
78
57
// Need to model this as a read, because there can be
79
58
// several concurrent wg.counter transitions from 0.
80
- race .Read (unsafe .Pointer (semap ))
59
+ race .Read (unsafe .Pointer (& wg . sema ))
81
60
}
82
61
if v < 0 {
83
62
panic ("sync: negative WaitGroup counter" )
@@ -93,13 +72,13 @@ func (wg *WaitGroup) Add(delta int) {
93
72
// - Adds must not happen concurrently with Wait,
94
73
// - Wait does not increment waiters if it sees counter == 0.
95
74
// Still do a cheap sanity check to detect WaitGroup misuse.
96
- if * statep != state {
75
+ if wg . state . Load () != state {
97
76
panic ("sync: WaitGroup misuse: Add called concurrently with Wait" )
98
77
}
99
78
// Reset waiters count to 0.
100
- * statep = 0
79
+ wg . state . Store ( 0 )
101
80
for ; w != 0 ; w -- {
102
- runtime_Semrelease (semap , false , 0 )
81
+ runtime_Semrelease (& wg . sema , false , 0 )
103
82
}
104
83
}
105
84
@@ -110,13 +89,11 @@ func (wg *WaitGroup) Done() {
110
89
111
90
// Wait blocks until the WaitGroup counter is zero.
112
91
func (wg * WaitGroup ) Wait () {
113
- statep , semap := wg .state ()
114
92
if race .Enabled {
115
- _ = * statep // trigger nil deref early
116
93
race .Disable ()
117
94
}
118
95
for {
119
- state := atomic . LoadUint64 ( statep )
96
+ state := wg . state . Load ( )
120
97
v := int32 (state >> 32 )
121
98
w := uint32 (state )
122
99
if v == 0 {
@@ -128,16 +105,16 @@ func (wg *WaitGroup) Wait() {
128
105
return
129
106
}
130
107
// Increment waiters count.
131
- if atomic . CompareAndSwapUint64 ( statep , state , state + 1 ) {
108
+ if wg . state . CompareAndSwap ( state , state + 1 ) {
132
109
if race .Enabled && w == 0 {
133
110
// Wait must be synchronized with the first Add.
134
111
// Need to model this is as a write to race with the read in Add.
135
112
// As a consequence, can do the write only for the first waiter,
136
113
// otherwise concurrent Waits will race with each other.
137
- race .Write (unsafe .Pointer (semap ))
114
+ race .Write (unsafe .Pointer (& wg . sema ))
138
115
}
139
- runtime_Semacquire (semap )
140
- if * statep != 0 {
116
+ runtime_Semacquire (& wg . sema )
117
+ if wg . state . Load () != 0 {
141
118
panic ("sync: WaitGroup is reused before previous Wait has returned" )
142
119
}
143
120
if race .Enabled {
0 commit comments