Source File
pool.go
Belonging Package
sync
// Copyright 2013 The Go Authors. All rights reserved.// Use of this source code is governed by a BSD-style// license that can be found in the LICENSE file.package syncimport ()// A Pool is a set of temporary objects that may be individually saved and// retrieved.//// Any item stored in the Pool may be removed automatically at any time without// notification. If the Pool holds the only reference when this happens, the// item might be deallocated.//// A Pool is safe for use by multiple goroutines simultaneously.//// Pool's purpose is to cache allocated but unused items for later reuse,// relieving pressure on the garbage collector. That is, it makes it easy to// build efficient, thread-safe free lists. However, it is not suitable for all// free lists.//// An appropriate use of a Pool is to manage a group of temporary items// silently shared among and potentially reused by concurrent independent// clients of a package. Pool provides a way to amortize allocation overhead// across many clients.//// An example of good use of a Pool is in the fmt package, which maintains a// dynamically-sized store of temporary output buffers. The store scales under// load (when many goroutines are actively printing) and shrinks when// quiescent.//// On the other hand, a free list maintained as part of a short-lived object is// not a suitable use for a Pool, since the overhead does not amortize well in// that scenario. It is more efficient to have such objects implement their own// free list.//// A Pool must not be copied after first use.type Pool struct {noCopy noCopylocal unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocallocalSize uintptr // size of the local arrayvictim unsafe.Pointer // local from previous cyclevictimSize uintptr // size of victims array// New optionally specifies a function to generate// a value when Get would otherwise return nil.// It may not be changed concurrently with calls to Get.New func() interface{}}// Local per-P Pool appendix.type poolLocalInternal struct {private interface{} // Can be used only by the respective P.shared poolChain // Local P can pushHead/popHead; any P can popTail.}type poolLocal struct {poolLocalInternal// Prevents false sharing on widespread platforms with// 128 mod (cache line size) = 0 .pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte}// from runtimefunc () uint32var poolRaceHash [128]uint64// poolRaceAddr returns an address to use as the synchronization point// for race detector logic. We don't use the actual pointer stored in x// directly, for fear of conflicting with other synchronization on that address.// Instead, we hash the pointer to get an index into poolRaceHash.// See discussion on golang.org/cl/31589.func ( interface{}) unsafe.Pointer {:= uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&))[1]):= uint32((uint64(uint32()) * 0x85ebca6b) >> 16)return unsafe.Pointer(&poolRaceHash[%uint32(len(poolRaceHash))])}// Put adds x to the pool.func ( *Pool) ( interface{}) {if == nil {return}if race.Enabled {if fastrand()%4 == 0 {// Randomly drop x on floor.return}race.ReleaseMerge(poolRaceAddr())race.Disable()}, := .pin()if .private == nil {.private == nil}if != nil {.shared.pushHead()}runtime_procUnpin()if race.Enabled {race.Enable()}}// Get selects an arbitrary item from the Pool, removes it from the// Pool, and returns it to the caller.// Get may choose to ignore the pool and treat it as empty.// Callers should not assume any relation between values passed to Put and// the values returned by Get.//// If Get would otherwise return nil and p.New is non-nil, Get returns// the result of calling p.New.func ( *Pool) () interface{} {if race.Enabled {race.Disable()}, := .pin():= .private.private = nilif == nil {// Try to pop the head of the local shard. We prefer// the head over the tail for temporal locality of// reuse., _ = .shared.popHead()if == nil {= .getSlow()}}runtime_procUnpin()if race.Enabled {race.Enable()if != nil {race.Acquire(poolRaceAddr())}}if == nil && .New != nil {= .New()}return}func ( *Pool) ( int) interface{} {// See the comment in pin regarding ordering of the loads.:= runtime_LoadAcquintptr(&.localSize) // load-acquire:= .local // load-consume// Try to steal one element from other procs.for := 0; < int(); ++ {:= indexLocal(, (++1)%int())if , := .shared.popTail(); != nil {return}}// Try the victim cache. We do this after attempting to steal// from all primary caches because we want objects in the// victim cache to age out if at all possible.= atomic.LoadUintptr(&.victimSize)if uintptr() >= {return nil}= .victim:= indexLocal(, )if := .private; != nil {.private = nilreturn}for := 0; < int(); ++ {:= indexLocal(, (+)%int())if , := .shared.popTail(); != nil {return}}// Mark the victim cache as empty for future gets don't bother// with it.atomic.StoreUintptr(&.victimSize, 0)return nil}// pin pins the current goroutine to P, disables preemption and// returns poolLocal pool for the P and the P's id.// Caller must call runtime_procUnpin() when done with the pool.func ( *Pool) () (*poolLocal, int) {:= runtime_procPin()// In pinSlow we store to local and then to localSize, here we load in opposite order.// Since we've disabled preemption, GC cannot happen in between.// Thus here we must observe local at least as large localSize.// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).:= runtime_LoadAcquintptr(&.localSize) // load-acquire:= .local // load-consumeif uintptr() < {return indexLocal(, ),}return .pinSlow()}func ( *Pool) () (*poolLocal, int) {// Retry under the mutex.// Can not lock the mutex while pinned.runtime_procUnpin()allPoolsMu.Lock()defer allPoolsMu.Unlock():= runtime_procPin()// poolCleanup won't be called while we are pinned.:= .localSize:= .localif uintptr() < {return indexLocal(, ),}if .local == nil {allPools = append(allPools, )}// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.:= runtime.GOMAXPROCS(0):= make([]poolLocal, )atomic.StorePointer(&.local, unsafe.Pointer(&[0])) // store-releaseruntime_StoreReluintptr(&.localSize, uintptr()) // store-releasereturn &[],}func () {// This function is called with the world stopped, at the beginning of a garbage collection.// It must not allocate and probably should not call any runtime functions.// Because the world is stopped, no pool user can be in a// pinned section (in effect, this has all Ps pinned).// Drop victim caches from all pools.for , := range oldPools {.victim = nil.victimSize = 0}// Move primary cache to victim cache.for , := range allPools {.victim = .local.victimSize = .localSize.local = nil.localSize = 0}// The pools with non-empty primary caches now have non-empty// victim caches and no pools have primary caches.oldPools, allPools = allPools, nil}var (allPoolsMu Mutex// allPools is the set of pools that have non-empty primary// caches. Protected by either 1) allPoolsMu and pinning or 2)// STW.allPools []*Pool// oldPools is the set of pools that may have non-empty victim// caches. Protected by STW.oldPools []*Pool)func () {runtime_registerPoolCleanup(poolCleanup)}func ( unsafe.Pointer, int) *poolLocal {:= unsafe.Pointer(uintptr() + uintptr()*unsafe.Sizeof(poolLocal{}))return (*poolLocal)()}// Implemented in runtime.func ( func())func () intfunc ()// The below are implemented in runtime/internal/atomic and the// compiler also knows to intrinsify the symbol we linkname into this// package.//go:linkname runtime_LoadAcquintptr runtime/internal/atomic.LoadAcquintptrfunc ( *uintptr) uintptr//go:linkname runtime_StoreReluintptr runtime/internal/atomic.StoreReluintptrfunc ( *uintptr, uintptr) uintptr