前言

我们为什么要读源码?因为我们只有深入到实现原理,才能了解他的优势,架构和核心原理能帮助我们快速定位问题。避免重复造轮子,借鉴思想。今天我们就来看下sync.pool的源码

type Pool struct {
   noCopy noCopy
   local     unsafe.Pointer // 本地固定大小的池子。等价于每个P一个池子 [p] p是索引ID
 localSize uintptr        // 本地数组大小
 // New optionally specifies a function to generate // a value when Get would otherwise return nil. // It may not be changed concurrently with calls to Get. New func() interface{}
}
//本地P index索引
type poolLocalInternal struct {
   private interface{}   //私有对象只能被创建时的P用。
 shared  []interface{} // 共享对象 能被其他P调用
 Mutex                 // Protects shared.
}
func (p *Pool) Put(x interface{}) {
   if x == nil {
      return
 }
   if race.Enabled {
      if fastrand()%4 == 0 {
         // Randomly drop x on floor.
 return
 }
      race.ReleaseMerge(poolRaceAddr(x))
      race.Disable()
   }
   l := p.pin()
   if l.private == nil {
      l.private = x
      x = nil
   }
   runtime_procUnpin()
   if x != nil {
      l.Lock()
      l.shared = append(l.shared, x)
      l.Unlock()
   }
   if race.Enabled {
      race.Enable()
   }
}
//获取当前P的localPool
func (p *Pool) pin() *poolLocal {
   pid := runtime_procPin()
   // In pinSlow we store to localSize and then to local, here we load in opposite order.
 // Since we've disabled preemption, GC cannot happen in between. // Thus here we must observe local at least as large localSize. // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness). s := atomic.LoadUintptr(&p.localSize) // load-acquire
 l := p.local                          // load-consume
 if uintptr(pid) < s {
      return indexLocal(l, pid)
   }
   return p.pinSlow()
}
//
func (p *Pool) pinSlow() *poolLocal {
  //重试
 // 当被锁定时不能+mutex. runtime_procUnpin()
   allPoolsMu.Lock()
   defer allPoolsMu.Unlock()
   pid := runtime_procPin()
   // poolCleanup 不会被调用 当我们被锁定时
 s := p.localSize
   l := p.local
   //当前pid小于size 使用pid去本地local索引到localPool对象
   if uintptr(pid) < s {
      return indexLocal(l, pid)
   }
   if p.local == nil {
      allPools = append(allPools, p)
   }
   // 如果GCs的时候 GOMAXPROCS变化。我们会重新分配数组 并遗弃旧的
 size := runtime.GOMAXPROCS(0)
   local := make([]poolLocal, size)
   atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release
 atomic.StoreUintptr(&p.localSize, uintptr(size))         // store-release
 return &local[pid]
}

以上就是PUT的大致流程。

//get 也是调用p.pin获取本地local.然后获取private,如果nil,则+lock 从shared查找,不然从其他P的localPool偷取。
func (p *Pool) Get() interface{} {
   if race.Enabled {
      race.Disable()
   }
   l := p.pin()//定位local
   x := l.private //私有对象
   l.private = nil //clear
   runtime_procUnpin()
   if x == nil { //私有对象为空
      l.Lock()
      last := len(l.shared) - 1 //从share尾部开始
 if last >= 0 {
         x = l.shared[last]
         l.shared = l.shared[:last]
      }
      l.Unlock()
      if x == nil {
         x = p.getSlow() //下面看slow
      }
   }
   if race.Enabled {
      race.Enable()
      if x != nil {
         race.Acquire(poolRaceAddr(x))
      }
   }
   if x == nil && p.New != nil {
      x = p.New() // 所有P的share中都没找到,那么新建
   }
   return x
}
func (p *Pool) getSlow() (x interface{}) {
   // 获取当前size
 size := atomic.LoadUintptr(&p.localSize) // load-acquire
 local := p.local                         // load-consume
 // Try to steal one element from other procs. pid := runtime_procPin()
   runtime_procUnpin()
   for i := 0; i < int(size); i++ { //循环 size次
      l := indexLocal(local, (pid+i+1)%int(size)) //定位从当前P+1 %size开始,就是从当前p往后走一圈。
      l.Lock() //加锁
      last := len(l.shared) - 1
      //检查每个P的shared末尾是否存在这个值,存在就返回。
 if last >= 0 {
         x = l.shared[last]
         l.shared = l.shared[:last]
         l.Unlock()
         break
 }
      l.Unlock()
   }
   return x
}

以上是GET操作

1.14 poolCleanup

我们直接看1.14版本的 poolCleanup,上面的get,put均是12.5版本

这个Cleanup的思路很好,引入victim 和local概念,在我看来就是0/1切换思想
思路: Put新对象放在local中,Get从victim拿,拿不到再从local拿
GC的时候执行poolCleanup,先删除victim。然后将当前池子中的对象(旧对象)移到victim中。
func poolCleanup() {
   // This function is called with the world stopped, at the beginning of a garbage collection.
 // It must not allocate and probably should not call any runtime functions.
 // Because the world is stopped, no pool user can be in a // pinned section (in effect, this has all Ps pinned).
 // Drop victim caches from all pools. for _, p := range oldPools {
      p.victim = nil
      p.victimSize = 0
 }
   // Move primary cache to victim cache.
 for _, p := range allPools {
      p.victim = p.local
      p.victimSize = p.localSize
      p.local = nil
      p.localSize = 0
 }
   // The pools with non-empty primary caches now have non-empty
 // victim caches and no pools have primary caches. oldPools, allPools = allPools, nil
}
对比
我看的1.12.5 版本的sync.pool实现基于mutex来lock.保证多goroutine安全.看的最新1.14版本引入双链表 移除mutex 改善共享访问

所以我们在使用12.5版本以下的时候要注意GC引起的sync.pool的全部清空带来的毛刺。另外适合sync.pool的场景是对象频繁创建
比如 我现在有个推送任务100万人群/次。 结构体是

type Manual struct {
   core.BaseTask
   core.BaseClass
   ManualFormat *model.ManualFormat
   ManualAppId  []int
   Cfg          *baseConfig.TomlConfig
   IsAllPush    bool
}

每次都要对人群渲染。此时用sync.pool 能减少大量GC的压力。 也要注意到引发GC的两个条件.第一条,2分钟触发一次。第二条,内存达到一定阈值触发一次。

参考资料

https://mp.weixin.qq.com/s/Oc...


xiaowei520
638 声望21 粉丝

持之以恒、


下一篇 »
管理篇-001