Skip to content

Commit 59f2704

Browse files
committed
sync: add Pool benchmarks to stress STW and reuse
This adds two benchmarks that will highlight two problems in Pool that we're about to address. The first benchmark measures the impact of large Pools on GC STW time. Currently, STW time is O(# of items in Pools), and this benchmark demonstrates 70µs STW times. The second benchmark measures the impact of fully clearing all Pools on each GC. Typically this is a problem in heavily-loaded systems because it causes a spike in allocation. This benchmark stresses this by simulating an expensive "New" function, so the cost of creating new objects is reflected in the ns/op of the benchmark. For #22950, #22331. Change-Id: I0c8853190d23144026fa11837b6bf42adc461722 Reviewed-on: https://go-review.googlesource.com/c/go/+/166959 Run-TryBot: Austin Clements <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: David Chase <[email protected]>
1 parent 57bb7be commit 59f2704

File tree

1 file changed

+82
-0
lines changed

1 file changed

+82
-0
lines changed

src/sync/pool_test.go

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ package sync_test
1010
import (
1111
"runtime"
1212
"runtime/debug"
13+
"sort"
1314
. "sync"
1415
"sync/atomic"
1516
"testing"
@@ -253,3 +254,84 @@ func BenchmarkPoolOverflow(b *testing.B) {
253254
}
254255
})
255256
}
257+
258+
var globalSink interface{}
259+
260+
func BenchmarkPoolSTW(b *testing.B) {
261+
// Take control of GC.
262+
defer debug.SetGCPercent(debug.SetGCPercent(-1))
263+
264+
var mstats runtime.MemStats
265+
var pauses []uint64
266+
267+
var p Pool
268+
for i := 0; i < b.N; i++ {
269+
// Put a large number of items into a pool.
270+
const N = 100000
271+
var item interface{} = 42
272+
for i := 0; i < N; i++ {
273+
p.Put(item)
274+
}
275+
// Do a GC.
276+
runtime.GC()
277+
// Record pause time.
278+
runtime.ReadMemStats(&mstats)
279+
pauses = append(pauses, mstats.PauseNs[(mstats.NumGC+255)%256])
280+
}
281+
282+
// Get pause time stats.
283+
sort.Slice(pauses, func(i, j int) bool { return pauses[i] < pauses[j] })
284+
var total uint64
285+
for _, ns := range pauses {
286+
total += ns
287+
}
288+
// ns/op for this benchmark is average STW time.
289+
b.ReportMetric(float64(total)/float64(b.N), "ns/op")
290+
b.ReportMetric(float64(pauses[len(pauses)*95/100]), "p95-ns/STW")
291+
b.ReportMetric(float64(pauses[len(pauses)*50/100]), "p50-ns/STW")
292+
}
293+
294+
func BenchmarkPoolExpensiveNew(b *testing.B) {
295+
// Populate a pool with items that are expensive to construct
296+
// to stress pool cleanup and subsequent reconstruction.
297+
298+
// Create a ballast so the GC has a non-zero heap size and
299+
// runs at reasonable times.
300+
globalSink = make([]byte, 8<<20)
301+
defer func() { globalSink = nil }()
302+
303+
// Create a pool that's "expensive" to fill.
304+
var p Pool
305+
var nNew uint64
306+
p.New = func() interface{} {
307+
atomic.AddUint64(&nNew, 1)
308+
time.Sleep(time.Millisecond)
309+
return 42
310+
}
311+
var mstats1, mstats2 runtime.MemStats
312+
runtime.ReadMemStats(&mstats1)
313+
b.RunParallel(func(pb *testing.PB) {
314+
// Simulate 100X the number of goroutines having items
315+
// checked out from the Pool simultaneously.
316+
items := make([]interface{}, 100)
317+
var sink []byte
318+
for pb.Next() {
319+
// Stress the pool.
320+
for i := range items {
321+
items[i] = p.Get()
322+
// Simulate doing some work with this
323+
// item checked out.
324+
sink = make([]byte, 32<<10)
325+
}
326+
for i, v := range items {
327+
p.Put(v)
328+
items[i] = nil
329+
}
330+
}
331+
_ = sink
332+
})
333+
runtime.ReadMemStats(&mstats2)
334+
335+
b.ReportMetric(float64(mstats2.NumGC-mstats1.NumGC)/float64(b.N), "GCs/op")
336+
b.ReportMetric(float64(nNew)/float64(b.N), "New/op")
337+
}

0 commit comments

Comments
 (0)