-
Notifications
You must be signed in to change notification settings - Fork 11
/
errgroup.go
219 lines (180 loc) · 5.88 KB
/
errgroup.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
// Package neilotoole/errgroup is an extension of the sync/errgroup
// concept, and much of the code herein is descended from
// or directly copied from that sync/errgroup code which
// has this header comment:
//
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errgroup is a drop-in alternative to sync/errgroup but
// limited to N goroutines. In effect, neilotoole/errgroup is
// sync/errgroup but with a worker pool of N goroutines.
package errgroup
import (
"context"
"runtime"
"sync"
"sync/atomic"
)
// A Group is a collection of goroutines working on subtasks that are part of
// the same overall task.
//
// A zero Group is valid and does not cancel on error.
//
// This Group implementation differs from sync/errgroup in that instead
// of each call to Go spawning a new Go routine, the f passed to Go
// is sent to a queue channel (qCh), and is picked up by one of N
// worker goroutines. The number of goroutines (numG) and the queue
// channel size (qSize) are args to WithContextN. The zero Group and
// the Group returned by WithContext both use default values (the value
// of runtime.NumCPU) for the numG and qSize args. A side-effect of this
// implementation is that the Go method will block while qCh is full: in
// contrast, errgroup.Group's Go method never blocks (it always spawns
// a new goroutine).
type Group struct {
cancel func()
wg sync.WaitGroup
errOnce sync.Once
err error
// numG is the maximum number of goroutines that can be started.
numG int
// qSize is the capacity of qCh, used for buffering funcs
// passed to method Go.
qSize int
// qCh is the buffer used to hold funcs passed to method Go
// before they are picked up by worker goroutines.
qCh chan func() error
// qMu protects qCh.
qMu sync.Mutex
// gCount tracks the number of worker goroutines.
gCount int64
}
// WithContext returns a new Group and an associated Context derived from ctx.
// It is equivalent to WithContextN(ctx, 0, 0).
func WithContext(ctx context.Context) (*Group, context.Context) {
return WithContextN(ctx, 0, 0) // zero indicates default values
}
// WithContextN returns a new Group and an associated Context derived from ctx.
//
// The derived Context is canceled the first time a function passed to Go
// returns a non-nil error or the first time Wait returns, whichever occurs
// first.
//
// Param numG controls the number of worker goroutines. Param qSize
// controls the size of the queue channel that holds functions passed
// to method Go: while the queue channel is full, Go blocks.
// If numG <= 0, the value of runtime.NumCPU is used; if qSize is
// also <= 0, a qSize of runtime.NumCPU is used.
func WithContextN(ctx context.Context, numG, qSize int) (*Group, context.Context) {
ctx, cancel := context.WithCancel(ctx)
return &Group{cancel: cancel, numG: numG, qSize: qSize}, ctx
}
// Wait blocks until all function calls from the Go method have returned, then
// returns the first non-nil error (if any) from them.
func (g *Group) Wait() error {
g.qMu.Lock()
if g.qCh != nil {
// qCh is typically initialized by the first call to method Go.
// qCh can be nil if Wait is invoked before the first
// call to Go, hence this check before we close qCh.
close(g.qCh)
}
// Wait for the worker goroutines to finish.
g.wg.Wait()
// All of the worker goroutines have finished,
// so it's safe to set qCh to nil.
g.qCh = nil
g.qMu.Unlock()
if g.cancel != nil {
g.cancel()
}
return g.err
}
// Go adds the given function to a queue of functions that are called
// by one of g's worker goroutines.
//
// The first call to return a non-nil error cancels the group; its error will be
// returned by Wait.
//
// Go may block while g's qCh is full.
func (g *Group) Go(f func() error) {
g.qMu.Lock()
if g.qCh == nil {
// We need to initialize g.
// The zero value of numG would mean no worker goroutine
// would be created, which would be daft.
// We want the "effective" zero value to be runtime.NumCPU.
if g.numG == 0 {
// Benchmarking has shown that the optimal numG and
// qSize values depend on the particular workload. In
// the absence of any other deciding factor, we somewhat
// arbitrarily default to NumCPU, which seems to perform
// reasonably in benchmarks. Users that care about performance
// tuning will use the WithContextN func to specify the numG
// and qSize args.
g.numG = runtime.NumCPU()
if g.qSize == 0 {
g.qSize = g.numG
}
}
g.qCh = make(chan func() error, g.qSize)
// Being that g.Go has been invoked, we'll need at
// least one goroutine.
atomic.StoreInt64(&g.gCount, 1)
g.startG()
g.qMu.Unlock()
g.qCh <- f
return
}
g.qCh <- f
// Check if we can or should start a new goroutine?
g.maybeStartG()
g.qMu.Unlock()
}
// maybeStartG might start a new worker goroutine, if
// needed and allowed.
func (g *Group) maybeStartG() {
if len(g.qCh) == 0 {
// No point starting a new goroutine if there's
// nothing in qCh
return
}
// We have at least one item in qCh. Maybe it's time to start
// a new worker goroutine?
if atomic.AddInt64(&g.gCount, 1) > int64(g.numG) {
// Nope: not allowed. Starting a new goroutine would put us
// over the numG limit, so we back out.
atomic.AddInt64(&g.gCount, -1)
return
}
// It's safe to start a new worker goroutine.
g.startG()
}
// startG starts a new worker goroutine.
func (g *Group) startG() {
g.wg.Add(1)
go func() {
defer g.wg.Done()
defer atomic.AddInt64(&g.gCount, -1)
var f func() error
for {
// Block until f is received from qCh or
// the channel is closed.
f = <-g.qCh
if f == nil {
// qCh was closed, time for this goroutine
// to die.
return
}
if err := f(); err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel()
}
})
return
}
}
}()
}