Simplify resizing strategies

This commit is contained in:
alitto
2020-06-06 16:37:53 -03:00
parent f8b427ec5a
commit 0a4f0e9f32
7 changed files with 446 additions and 644 deletions
+37 -29
View File
@@ -158,16 +158,24 @@ panicHandler := func(p interface{}) {
pool := pond.New(10, 1000, pond.PanicHandler(panicHandler)))
```
- **Strategy**: Configures the strategy used to resize the pool when backpressure is detected. You can create a custom strategy by implementing the `pond.ResizingStrategy` interface or choose one of the 3 presets:
- **Eager**: maximizes responsiveness at the expense of higher resource usage, which can reduce throughput under certain conditions. This strategy is meant for worker pools that will operate at a small percentage of their capacity most of the time and may occasionally receive bursts of tasks.
- **Balanced**: tries to find a balance between responsiveness and throughput. It's suitable for general purpose worker pools or those that will operate close to 50% of their capacity most of the time. This is the default strategy.
- **Eager**: maximizes responsiveness at the expense of higher resource usage, which can reduce throughput under certain conditions. This strategy is meant for worker pools that will operate at a small percentage of their capacity most of the time and may occasionally receive bursts of tasks. This is the default strategy.
- **Balanced**: tries to find a balance between responsiveness and throughput. It's suitable for general purpose worker pools or those that will operate close to 50% of their capacity most of the time.
- **Lazy**: maximizes throughput at the expense of responsiveness. This strategy is meant for worker pools that will operate close to their max. capacity most of the time.
``` go
// Example: create pools with different resizing strategies
eagerPool := pond.New(10, 1000, pond.Strategy(pond.Eager))
balancedPool := pond.New(10, 1000, pond.Strategy(pond.Balanced))
lazyPool := pond.New(10, 1000, pond.Strategy(pond.Lazy))
eagerPool := pond.New(10, 1000, pond.Strategy(pond.Eager()))
balancedPool := pond.New(10, 1000, pond.Strategy(pond.Balanced()))
lazyPool := pond.New(10, 1000, pond.Strategy(pond.Lazy()))
```
### Resizing strategies
The following chart illustrates the behaviour of the different pool resizing strategies as the number of submitted tasks increases. Each line represents the number of worker goroutines in the pool (pool size) and the x-axis reflects the number of submitted tasks (cumulative).
![Pool resizing strategies behaviour](./docs/strategies.svg)
As the name suggests, the "Eager" strategy always spawns an extra worker when there are no idles, which causes the pool to grow almost linearly with the number of submitted tasks. On the other end, the "Lazy" strategy creates one worker every N submitted tasks, where N is the maximum number of available CPUs ([GOMAXPROCS](https://golang.org/pkg/runtime/#GOMAXPROCS)). The "Balanced" strategy represents a middle ground between the previous two because it creates a worker every N/2 submitted tasks.
## API Reference
Full API reference is available at https://pkg.go.dev/github.com/alitto/pond
@@ -189,30 +197,30 @@ Here are the results:
goos: linux
goarch: amd64
pkg: github.com/alitto/pond/benchmark
BenchmarkAll/1M-10ms/Pond-Eager-8 2 620347142 ns/op 82768720 B/op 1086686 allocs/op
BenchmarkAll/1M-10ms/Pond-Balanced-8 2 578973910 ns/op 81339088 B/op 1083203 allocs/op
BenchmarkAll/1M-10ms/Pond-Lazy-8 2 613344573 ns/op 84347248 B/op 1084987 allocs/op
BenchmarkAll/1M-10ms/Goroutines-8 2 540765682 ns/op 98457168 B/op 1060433 allocs/op
BenchmarkAll/1M-10ms/GoroutinePool-8 1 1157705614 ns/op 68137088 B/op 1409763 allocs/op
BenchmarkAll/1M-10ms/BufferedPool-8 1 1158068370 ns/op 76426272 B/op 1412739 allocs/op
BenchmarkAll/1M-10ms/Gammazero-8 1 1330312458 ns/op 34524328 B/op 1029692 allocs/op
BenchmarkAll/1M-10ms/AntsPool-8 2 724231628 ns/op 37870404 B/op 1077297 allocs/op
BenchmarkAll/100k-500ms/Pond-Eager-8 2 604180003 ns/op 31523028 B/op 349877 allocs/op
BenchmarkAll/100k-500ms/Pond-Balanced-8 1 1060079592 ns/op 35520416 B/op 398779 allocs/op
BenchmarkAll/100k-500ms/Pond-Lazy-8 1 1053705909 ns/op 35040512 B/op 392696 allocs/op
BenchmarkAll/100k-500ms/Goroutines-8 2 551869174 ns/op 8000016 B/op 100001 allocs/op
BenchmarkAll/100k-500ms/GoroutinePool-8 2 635442074 ns/op 20764560 B/op 299632 allocs/op
BenchmarkAll/100k-500ms/BufferedPool-8 2 641683384 ns/op 21647840 B/op 299661 allocs/op
BenchmarkAll/100k-500ms/Gammazero-8 2 667449574 ns/op 16241864 B/op 249664 allocs/op
BenchmarkAll/100k-500ms/AntsPool-8 2 659853037 ns/op 37300372 B/op 549784 allocs/op
BenchmarkAll/10k-1000ms/Pond-Eager-8 1 1014320653 ns/op 12135080 B/op 39692 allocs/op
BenchmarkAll/10k-1000ms/Pond-Balanced-8 1 1015979207 ns/op 12083704 B/op 39518 allocs/op
BenchmarkAll/10k-1000ms/Pond-Lazy-8 1 1036374161 ns/op 12046632 B/op 39366 allocs/op
BenchmarkAll/10k-1000ms/Goroutines-8 1 1007837894 ns/op 800016 B/op 10001 allocs/op
BenchmarkAll/10k-1000ms/GoroutinePool-8 1 1149536612 ns/op 21393024 B/op 222458 allocs/op
BenchmarkAll/10k-1000ms/BufferedPool-8 1 1127286218 ns/op 20343584 B/op 219359 allocs/op
BenchmarkAll/10k-1000ms/Gammazero-8 1 1023249222 ns/op 2019688 B/op 29374 allocs/op
BenchmarkAll/10k-1000ms/AntsPool-8 1 1016280850 ns/op 4155904 B/op 59487 allocs/op
1M-10ms/Pond-Eager-8 2 620347142 82768720 1086686
1M-10ms/Pond-Balanced-8 2 578973910 81339088 1083203
1M-10ms/Pond-Lazy-8 2 613344573 84347248 1084987
1M-10ms/Goroutines-8 2 540765682 98457168 1060433
1M-10ms/GoroutinePool-8 1 1157705614 68137088 1409763
1M-10ms/BufferedPool-8 1 1158068370 76426272 1412739
1M-10ms/Gammazero-8 1 1330312458 34524328 1029692
1M-10ms/AntsPool-8 2 724231628 37870404 1077297
100k-500ms/Pond-Eager-8 2 604180003 31523028 349877
100k-500ms/Pond-Balanced-8 1 1060079592 35520416 398779
100k-500ms/Pond-Lazy-8 1 1053705909 35040512 392696
100k-500ms/Goroutines-8 2 551869174 8000016 100001
100k-500ms/GoroutinePool-8 2 635442074 20764560 299632
100k-500ms/BufferedPool-8 2 641683384 21647840 299661
100k-500ms/Gammazero-8 2 667449574 16241864 249664
100k-500ms/AntsPool-8 2 659853037 37300372 549784
10k-1000ms/Pond-Eager-8 1 1014320653 12135080 39692
10k-1000ms/Pond-Balanced-8 1 1015979207 12083704 39518
10k-1000ms/Pond-Lazy-8 1 1036374161 12046632 39366
10k-1000ms/Goroutines-8 1 1007837894 800016 10001
10k-1000ms/GoroutinePool-8 1 1149536612 21393024 222458
10k-1000ms/BufferedPool-8 1 1127286218 20343584 219359
10k-1000ms/Gammazero-8 1 1023249222 2019688 29374
10k-1000ms/AntsPool-8 1 1016280850 4155904 59487
PASS
ok github.com/alitto/pond/benchmark 37.331s
```
+200 -156
View File
@@ -2,6 +2,7 @@ package benchmark
import (
"fmt"
"math/rand"
"sync"
"testing"
"time"
@@ -11,193 +12,236 @@ import (
"github.com/panjf2000/ants/v2"
)
type subject struct {
name string
factory poolFactory
}
type poolSubmit func(func())
type poolTeardown func()
type poolFactory func() (poolSubmit, poolTeardown)
type workload struct {
name string
userCount int
taskCount int
taskDuration time.Duration
taskInterval time.Duration
task func()
}
type subject struct {
name string
test poolTest
config poolConfig
}
var maxWorkers = 200000
type poolConfig struct {
minWorkers int
maxWorkers int
maxCapacity int
strategy pond.ResizingStrategy
}
type poolTest func(taskCount int, taskFunc func(), config poolConfig)
var workloads = []workload{
{"1M-10ms", 1000000, 10 * time.Millisecond},
{"100k-500ms", 100000, 500 * time.Millisecond},
{"10k-1000ms", 10000, 1000 * time.Millisecond},
}
var defaultPoolConfig = poolConfig{
maxWorkers: 200000,
}
var workloads = []workload{{
name: "1u-10Mt",
userCount: 1,
taskCount: 1000000,
taskInterval: 0,
}, {
name: "100u-10Kt",
userCount: 100,
taskCount: 10000,
taskInterval: 0,
}, {
name: "1Ku-1Kt",
userCount: 1000,
taskCount: 1000,
taskInterval: 0,
}, {
name: "10Ku-100t",
userCount: 10000,
taskCount: 100,
taskInterval: 0,
}, {
name: "1Mu-1t",
userCount: 1000000,
taskCount: 1,
taskInterval: 0,
}}
var pondSubjects = []subject{
{"Pond-Eager", pondPool, poolConfig{maxWorkers: defaultPoolConfig.maxWorkers, maxCapacity: 1000000, strategy: pond.Eager()}},
{"Pond-Balanced", pondPool, poolConfig{maxWorkers: defaultPoolConfig.maxWorkers, maxCapacity: 1000000, strategy: pond.Balanced()}},
{"Pond-Lazy", pondPool, poolConfig{maxWorkers: defaultPoolConfig.maxWorkers, maxCapacity: 1000000, strategy: pond.Lazy()}},
{
name: "Pond-Eager",
factory: func() (poolSubmit, poolTeardown) {
pool := pond.New(maxWorkers, 1000000, pond.Strategy(pond.Eager()))
return pool.Submit, pool.StopAndWait
},
}, {
name: "Pond-Balanced",
factory: func() (poolSubmit, poolTeardown) {
pool := pond.New(maxWorkers, 1000000, pond.Strategy(pond.Balanced()))
return pool.Submit, pool.StopAndWait
},
}, {
name: "Pond-Lazy",
factory: func() (poolSubmit, poolTeardown) {
pool := pond.New(maxWorkers, 1000000, pond.Strategy(pond.Lazy()))
return pool.Submit, pool.StopAndWait
},
},
}
var otherSubjects = []subject{
{"Goroutines", unboundedGoroutines, defaultPoolConfig},
{"GoroutinePool", goroutinePool, defaultPoolConfig},
{"BufferedPool", bufferedGoroutinePool, defaultPoolConfig},
{"Gammazero", gammazeroWorkerpool, defaultPoolConfig},
{"AntsPool", antsPool, defaultPoolConfig},
{
name: "Goroutines",
factory: func() (poolSubmit, poolTeardown) {
submit := func(taskFunc func()) {
go func() {
taskFunc()
}()
}
return submit, func() {}
},
},
{
name: "GoroutinePool",
factory: func() (poolSubmit, poolTeardown) {
var poolWg sync.WaitGroup
taskChan := make(chan func())
poolWg.Add(maxWorkers)
for i := 0; i < maxWorkers; i++ {
go func() {
for task := range taskChan {
task()
}
poolWg.Done()
}()
}
submit := func(task func()) {
taskChan <- task
}
teardown := func() {
close(taskChan)
poolWg.Wait()
}
return submit, teardown
},
},
{
name: "BufferedPool",
factory: func() (poolSubmit, poolTeardown) {
var poolWg sync.WaitGroup
taskChan := make(chan func(), 1000000)
poolWg.Add(maxWorkers)
for i := 0; i < maxWorkers; i++ {
go func() {
for task := range taskChan {
task()
}
poolWg.Done()
}()
}
submit := func(task func()) {
taskChan <- task
}
teardown := func() {
close(taskChan)
poolWg.Wait()
}
return submit, teardown
},
},
{
name: "Gammazero",
factory: func() (poolSubmit, poolTeardown) {
pool := workerpool.New(maxWorkers)
return pool.Submit, pool.StopWait
},
},
{
name: "AntsPool",
factory: func() (poolSubmit, poolTeardown) {
pool, _ := ants.NewPool(maxWorkers, ants.WithExpiryDuration(10*time.Second))
submit := func(task func()) {
pool.Submit(task)
}
return submit, pool.Release
},
},
}
func BenchmarkPond(b *testing.B) {
runBenchmarks(b, workloads, pondSubjects)
func BenchmarkPondSleep10ms(b *testing.B) {
sleep10ms := func() {
time.Sleep(10 * time.Millisecond)
}
runBenchmarks(b, workloads, pondSubjects, sleep10ms)
}
func BenchmarkAll(b *testing.B) {
allSubjects := make([]subject, 0)
allSubjects = append(allSubjects, pondSubjects...)
allSubjects = append(allSubjects, otherSubjects...)
runBenchmarks(b, workloads, allSubjects)
func BenchmarkPondRandFloat64(b *testing.B) {
randFloat64 := func() {
rand.Float64()
}
runBenchmarks(b, workloads, pondSubjects, randFloat64)
}
func runBenchmarks(b *testing.B, workloads []workload, subjects []subject) {
func BenchmarkAllSleep10ms(b *testing.B) {
subjects := make([]subject, 0)
subjects = append(subjects, pondSubjects...)
subjects = append(subjects, otherSubjects...)
sleep10ms := func() {
time.Sleep(10 * time.Millisecond)
}
runBenchmarks(b, workloads, subjects, sleep10ms)
}
func BenchmarkAllRandFloat64(b *testing.B) {
subjects := make([]subject, 0)
subjects = append(subjects, pondSubjects...)
subjects = append(subjects, otherSubjects...)
randFloat64 := func() {
rand.Float64()
}
runBenchmarks(b, workloads, subjects, randFloat64)
}
func runBenchmarks(b *testing.B, workloads []workload, subjects []subject, task func()) {
for _, workload := range workloads {
taskFunc := func() {
time.Sleep(workload.taskDuration)
}
for _, subject := range subjects {
name := fmt.Sprintf("%s/%s", workload.name, subject.name)
b.Run(name, func(b *testing.B) {
testName := fmt.Sprintf("%s/%s", workload.name, subject.name)
b.Run(testName, func(b *testing.B) {
for i := 0; i < b.N; i++ {
subject.test(workload.taskCount, taskFunc, subject.config)
simulateWorkload(&workload, subject.factory, task)
}
})
}
}
}
func pondPool(taskCount int, taskFunc func(), config poolConfig) {
var wg sync.WaitGroup
pool := pond.New(config.maxWorkers, config.maxCapacity,
pond.MinWorkers(config.minWorkers),
pond.Strategy(config.strategy))
// Submit tasks
wg.Add(taskCount)
for n := 0; n < taskCount; n++ {
pool.Submit(func() {
taskFunc()
wg.Done()
})
}
wg.Wait()
pool.StopAndWait()
}
func simulateWorkload(workload *workload, poolFactoy poolFactory, task func()) {
func unboundedGoroutines(taskCount int, taskFunc func(), config poolConfig) {
var wg sync.WaitGroup
wg.Add(taskCount)
for i := 0; i < taskCount; i++ {
go func() {
taskFunc()
wg.Done()
}()
}
wg.Wait()
}
func goroutinePool(taskCount int, taskFunc func(), config poolConfig) {
// Start worker goroutines
var poolWg sync.WaitGroup
taskChan := make(chan func())
poolWg.Add(config.maxWorkers)
for i := 0; i < config.maxWorkers; i++ {
go func() {
for task := range taskChan {
task()
}
poolWg.Done()
}()
}
// Submit tasks and wait for completion
var wg sync.WaitGroup
wg.Add(taskCount)
for i := 0; i < taskCount; i++ {
taskChan <- func() {
taskFunc()
wg.Done()
}
}
close(taskChan)
wg.Wait()
poolWg.Wait()
}
func bufferedGoroutinePool(taskCount int, taskFunc func(), config poolConfig) {
// Start worker goroutines
var poolWg sync.WaitGroup
taskChan := make(chan func(), taskCount)
poolWg.Add(config.maxWorkers)
for i := 0; i < config.maxWorkers; i++ {
go func() {
for task := range taskChan {
task()
}
poolWg.Done()
}()
}
// Submit tasks and wait for completion
var wg sync.WaitGroup
wg.Add(taskCount)
for i := 0; i < taskCount; i++ {
taskChan <- func() {
taskFunc()
wg.Done()
}
}
close(taskChan)
wg.Wait()
poolWg.Wait()
}
func gammazeroWorkerpool(taskCount int, taskFunc func(), config poolConfig) {
// Create pool
wp := workerpool.New(config.maxWorkers)
defer wp.StopWait()
poolSubmit, poolTeardown := poolFactoy()
// Submit tasks and wait for completion
// Spawn one goroutine per simulated user
var wg sync.WaitGroup
wg.Add(taskCount)
for i := 0; i < taskCount; i++ {
wp.Submit(func() {
taskFunc()
wg.Done()
})
wg.Add(workload.userCount * workload.taskCount)
testFunc := func() {
task()
wg.Done()
}
for i := 0; i < workload.userCount; i++ {
go func() {
// Every user submits tasksPerUser at the specified frequency
for i := 0; i < workload.taskCount; i++ {
poolSubmit(testFunc)
if workload.taskInterval > 0 {
time.Sleep(workload.taskInterval)
}
}
}()
}
wg.Wait()
}
func antsPool(taskCount int, taskFunc func(), config poolConfig) {
// Create pool
pool, _ := ants.NewPool(config.maxWorkers, ants.WithExpiryDuration(10*time.Second))
defer pool.Release()
// Submit tasks and wait for completion
var wg sync.WaitGroup
wg.Add(taskCount)
for i := 0; i < taskCount; i++ {
_ = pool.Submit(func() {
taskFunc()
wg.Done()
})
}
wg.Wait()
// Tear down
poolTeardown()
}
File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 264 KiB

+97 -267
View File
@@ -2,7 +2,6 @@ package pond
import (
"fmt"
"math"
"runtime/debug"
"sync"
"sync/atomic"
@@ -22,7 +21,7 @@ func defaultPanicHandler(panic interface{}) {
// ResizingStrategy represents a pool resizing strategy
type ResizingStrategy interface {
Resize(runningWorkers, idleWorkers, minWorkers, maxWorkers, incomingTasks, completedTasks int, delta time.Duration) int
Resize(runningWorkers, minWorkers, maxWorkers int) bool
}
// Option represents an option that can be passed when instantiating a worker pool to customize it
@@ -66,19 +65,13 @@ type WorkerPool struct {
strategy ResizingStrategy
panicHandler func(interface{})
// Atomic counters
workerCount int32
idleWorkerCount int32
completedTaskCount uint64
workerCount int32
idleWorkerCount int32
// Private properties
tasks chan func()
dispatchedTasks chan func()
stopOnce sync.Once
waitGroup sync.WaitGroup
lastResizeTime time.Time
lastResizeCompletedTasks uint64
// Debug information
debug bool
maxWorkerCount int
tasks chan func()
purgerQuit chan struct{}
stopOnce sync.Once
waitGroup sync.WaitGroup
}
// New creates a worker pool with that can scale up to the given maximum number of workers (maxWorkers).
@@ -92,9 +85,8 @@ func New(maxWorkers, maxCapacity int, options ...Option) *WorkerPool {
maxWorkers: maxWorkers,
maxCapacity: maxCapacity,
idleTimeout: defaultIdleTimeout,
strategy: Balanced(),
strategy: Eager(),
panicHandler: defaultPanicHandler,
debug: false,
}
// Apply all options
@@ -118,19 +110,21 @@ func New(maxWorkers, maxCapacity int, options ...Option) *WorkerPool {
// Create internal channels
pool.tasks = make(chan func(), pool.maxCapacity)
pool.dispatchedTasks = make(chan func(), pool.maxWorkers)
pool.purgerQuit = make(chan struct{})
// Start dispatcher goroutine
// Start purger goroutine
pool.waitGroup.Add(1)
go func() {
defer pool.waitGroup.Done()
pool.dispatch()
pool.purge()
}()
// Start minWorkers workers
if pool.minWorkers > 0 {
pool.startWorkers(pool.minWorkers, nil)
for i := 0; i < pool.minWorkers; i++ {
pool.startWorker(nil)
}
}
return pool
@@ -147,14 +141,51 @@ func (p *WorkerPool) Idle() int {
}
// Submit sends a task to this worker pool for execution. If the queue is full,
// it will wait until the task can be enqueued
// it will wait until the task is dispatched to a worker goroutine.
func (p *WorkerPool) Submit(task func()) {
p.submit(task, true)
}
// TrySubmit attempts to send a task to this worker pool for execution. If the queue is full,
// it will not wait for a worker to become idle. It returns true if it was able to dispatch
// the task and false otherwise.
func (p *WorkerPool) TrySubmit(task func()) bool {
return p.submit(task, false)
}
func (p *WorkerPool) submit(task func(), waitForIdle bool) bool {
if task == nil {
return
return false
}
// Submit the task to the task channel
runningWorkerCount := p.Running()
// Attempt to dispatch to an idle worker without blocking
if runningWorkerCount > 0 && p.Idle() > 0 {
select {
case p.tasks <- task:
return true
default:
// No idle worker available, continue
}
}
maxWorkersReached := runningWorkerCount >= p.maxWorkers
// Exit if we have reached the max. number of workers and can't wait for an idle worker
if maxWorkersReached && !waitForIdle {
return false
}
// Start a worker as long as we haven't reached the limit
if !maxWorkersReached && p.strategy.Resize(runningWorkerCount, p.minWorkers, p.maxWorkers) {
p.startWorker(task)
return true
}
// Submit the task to the tasks channel and wait for it to be picked up by a worker
p.tasks <- task
return true
}
// SubmitAndWait sends a task to this worker pool for execution and waits for it to complete
@@ -196,8 +227,8 @@ func (p *WorkerPool) SubmitBefore(task func(), deadline time.Duration) {
// Stop causes this pool to stop accepting tasks, without waiting for goroutines to exit
func (p *WorkerPool) Stop() {
p.stopOnce.Do(func() {
// Close the tasks channel to prevent receiving new tasks
close(p.tasks)
// Send the signal to stop the purger goroutine
close(p.purgerQuit)
})
}
@@ -209,245 +240,50 @@ func (p *WorkerPool) StopAndWait() {
p.waitGroup.Wait()
}
// dispatch represents the work done by the dispatcher goroutine
func (p *WorkerPool) dispatch() {
// purge represents the work done by the purger goroutine
func (p *WorkerPool) purge() {
// Declare vars
var (
maxBatchSize = 1000
batch = make([]func(), maxBatchSize)
batchSize = int(math.Max(float64(p.minWorkers), 100))
idleWorkers = 0
dispatchedToIdleWorkers = 0
dispatchedToNewWorkers = 0
dispatchedBlocking = 0
nextTask func() = nil
)
idleTicker := time.NewTicker(p.idleTimeout)
defer idleTicker.Stop()
idleTimer := time.NewTimer(p.idleTimeout)
defer idleTimer.Stop()
// Start dispatching cycle
DispatchCycle:
Purge:
for {
// Reset idle timer
idleTimer.Reset(p.idleTimeout)
select {
// Receive a task
case task, ok := <-p.tasks:
if !ok {
// Received the signal to exit
break DispatchCycle
// Timed out waiting for any activity to happen, attempt to kill an idle worker
case <-idleTicker.C:
if p.Idle() > 0 {
p.tasks <- nil
}
idleWorkers = p.Idle()
// Dispatch tasks to idle workers
nextTask, dispatchedToIdleWorkers = p.dispatchToIdleWorkers(task, idleWorkers)
if nextTask == nil {
continue DispatchCycle
}
// Read up to batchSize tasks without blocking
p.receiveBatch(nextTask, &batch, batchSize)
// Resize the pool
dispatchedToNewWorkers = p.resizePool(batch, dispatchedToIdleWorkers)
dispatchedBlocking = 0
if len(batch) > dispatchedToNewWorkers {
for _, task := range batch[dispatchedToNewWorkers:] {
// Attempt to dispatch the task without blocking
select {
case p.dispatchedTasks <- task:
default:
// Block until a worker accepts this task
p.dispatchedTasks <- task
dispatchedBlocking++
}
}
}
// Adjust batch size
if dispatchedBlocking > 0 {
if batchSize > 1 {
batchSize = 1
}
} else {
batchSize = batchSize * 2
if batchSize > maxBatchSize {
batchSize = maxBatchSize
}
}
// Timed out waiting for any activity to happen, attempt to resize the pool
case <-idleTimer.C:
p.resizePool(batch[:0], 0)
case <-p.purgerQuit:
break Purge
}
}
// Send signal to stop all workers
close(p.dispatchedTasks)
close(p.tasks)
if p.debug {
fmt.Printf("Max workers: %d", p.maxWorkerCount)
}
}
func (p *WorkerPool) dispatchToIdleWorkers(task func(), limit int) (nextTask func(), dispatched int) {
// Dispatch up to limit tasks without blocking
nextTask = task
for i := 0; i < limit; i++ {
// Attempt to dispatch without blocking
select {
case p.dispatchedTasks <- nextTask:
nextTask = nil
dispatched++
default:
// Could not dispatch, return the task
return
}
// Attempt to receive another task
select {
case t, ok := <-p.tasks:
if !ok {
// Nothing else to dispatch
nextTask = nil
return
}
nextTask = t
default:
nextTask = nil
return
}
}
return
}
func (p *WorkerPool) receiveBatch(task func(), batch *[]func(), batchSize int) {
// Reset batch slice
*batch = (*batch)[:0]
*batch = append(*batch, task)
// Read up to batchSize tasks without blocking
for i := 0; i < batchSize-1; i++ {
select {
case t, ok := <-p.tasks:
if !ok {
return
}
if t != nil {
*batch = append(*batch, t)
}
default:
return
}
}
}
func (p *WorkerPool) resizePool(batch []func(), dispatchedToIdleWorkers int) int {
// Time to resize the pool
now := time.Now()
workload := len(batch)
currentCompletedTasks := atomic.LoadUint64(&p.completedTaskCount)
completedTasksDelta := int(currentCompletedTasks - p.lastResizeCompletedTasks)
if completedTasksDelta < 0 {
completedTasksDelta = 0
}
duration := 0 * time.Millisecond
if !p.lastResizeTime.IsZero() {
duration = now.Sub(p.lastResizeTime)
}
poolSizeDelta := p.calculatePoolSizeDelta(p.Running(), p.Idle(),
workload+dispatchedToIdleWorkers, completedTasksDelta, duration)
// Capture values for next resize cycle
p.lastResizeTime = now
p.lastResizeCompletedTasks = currentCompletedTasks
// Start up to poolSizeDelta workers
dispatched := 0
if poolSizeDelta > 0 {
p.startWorkers(poolSizeDelta, batch)
dispatched = workload
if poolSizeDelta < workload {
dispatched = poolSizeDelta
}
} else if poolSizeDelta < 0 {
// Kill poolSizeDelta workers
for i := 0; i < -poolSizeDelta; i++ {
p.dispatchedTasks <- nil
}
}
return dispatched
}
// calculatePoolSizeDelta calculates what's the delta to reach the ideal pool size based on the current size and workload
func (p *WorkerPool) calculatePoolSizeDelta(runningWorkers, idleWorkers,
incomingTasks, completedTasks int, duration time.Duration) int {
delta := p.strategy.Resize(runningWorkers, idleWorkers, p.minWorkers, p.maxWorkers,
incomingTasks, completedTasks, duration)
targetSize := runningWorkers + delta
// Cannot go below minWorkers
if targetSize < p.minWorkers {
targetSize = p.minWorkers
}
// Cannot go above maxWorkers
if targetSize > p.maxWorkers {
targetSize = p.maxWorkers
}
if p.debug {
// Print debugging information
durationSecs := duration.Seconds()
inputRate := float64(incomingTasks) / durationSecs
outputRate := float64(completedTasks) / durationSecs
message := fmt.Sprintf("%d\t%d\t%d\t%d\t\"%f\"\t\"%f\"\t%d\t\"%f\"\n",
runningWorkers, idleWorkers, incomingTasks, completedTasks,
inputRate, outputRate,
delta, durationSecs)
fmt.Printf(message)
}
return targetSize - runningWorkers
}
// startWorkers creates new worker goroutines to run the given tasks
func (p *WorkerPool) startWorkers(count int, firstTasks []func()) {
func (p *WorkerPool) startWorker(firstTask func()) {
// Increment worker count
workerCount := atomic.AddInt32(&p.workerCount, int32(count))
p.incrementWorkerCount()
// Collect debug information
if p.debug && int(workerCount) > p.maxWorkerCount {
p.maxWorkerCount = int(workerCount)
}
// Increment waiting group semaphore
p.waitGroup.Add(count)
// Launch workers
var firstTask func()
for i := 0; i < count; i++ {
firstTask = nil
if i < len(firstTasks) {
firstTask = firstTasks[i]
}
go worker(firstTask, p.dispatchedTasks, &p.idleWorkerCount, &p.completedTaskCount, p.decrementWorkers, p.panicHandler)
}
// Launch worker
go worker(firstTask, p.tasks, &p.idleWorkerCount, p.decrementWorkerCount, p.panicHandler)
}
func (p *WorkerPool) decrementWorkers() {
func (p *WorkerPool) incrementWorkerCount() {
// Increment worker count
atomic.AddInt32(&p.workerCount, 1)
// Increment waiting group semaphore
p.waitGroup.Add(1)
}
func (p *WorkerPool) decrementWorkerCount() {
// Decrement worker count
atomic.AddInt32(&p.workerCount, -1)
@@ -464,32 +300,31 @@ func (p *WorkerPool) Group() *TaskGroup {
}
// worker launches a worker goroutine
func worker(firstTask func(), tasks chan func(), idleWorkerCount *int32, completedTaskCount *uint64, exitHandler func(), panicHandler func(interface{})) {
func worker(firstTask func(), tasks chan func(), idleWorkerCount *int32, exitHandler func(), panicHandler func(interface{})) {
defer func() {
if panic := recover(); panic != nil {
// Handle panic
panicHandler(panic)
// Restart goroutine
go worker(nil, tasks, idleWorkerCount, completedTaskCount, exitHandler, panicHandler)
go worker(nil, tasks, idleWorkerCount, exitHandler, panicHandler)
} else {
// Handle exit
// Handle normal exit
exitHandler()
// Decrement idle count
atomic.AddInt32(idleWorkerCount, -1)
}
}()
// We have received a task, execute it
func() {
// Increment idle count
defer atomic.AddInt32(idleWorkerCount, 1)
if firstTask != nil {
// Increment completed task count
defer atomic.AddUint64(completedTaskCount, 1)
firstTask()
}
}()
if firstTask != nil {
firstTask()
}
// Increment idle count
atomic.AddInt32(idleWorkerCount, 1)
for task := range tasks {
if task == nil {
@@ -501,15 +336,10 @@ func worker(firstTask func(), tasks chan func(), idleWorkerCount *int32, complet
atomic.AddInt32(idleWorkerCount, -1)
// We have received a task, execute it
func() {
// Increment idle count
defer atomic.AddInt32(idleWorkerCount, 1)
task()
// Increment completed task count
defer atomic.AddUint64(completedTaskCount, 1)
task()
}()
// Increment idle count
atomic.AddInt32(idleWorkerCount, 1)
}
}
+49
View File
@@ -164,6 +164,32 @@ func TestSubmitBeforeWithNilTask(t *testing.T) {
assertEqual(t, 0, pool.Running())
}
func TestTrySubmit(t *testing.T) {
pool := pond.New(1, 5)
// Submit a long-running task
var doneCount int32
pool.Submit(func() {
time.Sleep(5 * time.Millisecond)
atomic.AddInt32(&doneCount, 1)
})
// Attempt to submit a task without blocking
dispatched := pool.TrySubmit(func() {
time.Sleep(5 * time.Millisecond)
atomic.AddInt32(&doneCount, 1)
})
// Task was not dispatched because the pool was full
assertEqual(t, false, dispatched)
pool.StopAndWait()
// Only the first task must have executed
assertEqual(t, int32(1), atomic.LoadInt32(&doneCount))
}
func TestRunning(t *testing.T) {
workerCount := 5
@@ -338,3 +364,26 @@ func TestGroupSubmit(t *testing.T) {
assertEqual(t, int32(taskCount), atomic.LoadInt32(&doneCount))
}
func TestPoolWithCustomStrategy(t *testing.T) {
pool := pond.New(3, 3, pond.Strategy(pond.RatedResizer(2)))
// Submit 3 tasks
group := pool.Group()
for i := 0; i < 3; i++ {
group.Submit(func() {
time.Sleep(10 * time.Millisecond)
})
}
// Wait for them to complete
group.Wait()
// 2 workers should have been started
assertEqual(t, 2, pool.Running())
pool.StopAndWait()
assertEqual(t, 0, pool.Running())
}
+31 -165
View File
@@ -1,190 +1,56 @@
package pond
import (
"container/ring"
"math"
"time"
"runtime"
"sync/atomic"
)
var maxProcs = runtime.GOMAXPROCS(0)
// Preset pool resizing strategies
var (
// Eager maximizes responsiveness at the expense of higher resource usage,
// which can reduce throughput under certain conditions.
// This strategy is meant for worker pools that will operate at a small percentage of their capacity
// most of the time and may occasionally receive bursts of tasks.
Eager = func() ResizingStrategy { return DynamicResizer(1, 0.01) }
// most of the time and may occasionally receive bursts of tasks. It's the default strategy.
Eager = func() ResizingStrategy { return RatedResizer(1) }
// Balanced tries to find a balance between responsiveness and throughput.
// It's the default strategy and it's suitable for general purpose worker pools or those
// It's suitable for general purpose worker pools or those
// that will operate close to 50% of their capacity most of the time.
Balanced = func() ResizingStrategy { return DynamicResizer(3, 0.01) }
Balanced = func() ResizingStrategy { return RatedResizer(maxProcs / 2) }
// Lazy maximizes throughput at the expense of responsiveness.
// This strategy is meant for worker pools that will operate close to their max. capacity most of the time.
Lazy = func() ResizingStrategy { return DynamicResizer(5, 0.01) }
Lazy = func() ResizingStrategy { return RatedResizer(maxProcs) }
)
// dynamicResizer implements a configurable dynamic resizing strategy
type dynamicResizer struct {
windowSize int
tolerance float64
incomingTasks *ring.Ring
completedTasks *ring.Ring
duration *ring.Ring
busyWorkers *ring.Ring
// ratedResizer implements a rated resizing strategy
type ratedResizer struct {
rate int
hits int32
}
// DynamicResizer creates a dynamic resizing strategy that gradually increases or decreases
// the size of the pool to match the rate of incoming tasks (input rate) with the rate of
// completed tasks (output rate).
// windowSize: determines how many cycles to consider when calculating input and output rates.
// tolerance: defines a percentage (between 0 and 1)
func DynamicResizer(windowSize int, tolerance float64) ResizingStrategy {
// RatedResizer creates a resizing strategy which can be configured
// to create workers at a specific rate when the pool has no idle workers.
// rate: determines the number of tasks to receive before creating an extra worker.
// A value of 3 can be interpreted as: "Create a new worker every 3 tasks".
func RatedResizer(rate int) ResizingStrategy {
if windowSize < 1 {
windowSize = 1
}
if tolerance < 0 {
tolerance = 0
if rate < 1 {
rate = 1
}
dynamicResizer := &dynamicResizer{
windowSize: windowSize,
tolerance: tolerance,
}
dynamicResizer.reset()
return dynamicResizer
}
func (r *dynamicResizer) reset() {
// Create rings
r.incomingTasks = ring.New(r.windowSize)
r.completedTasks = ring.New(r.windowSize)
r.duration = ring.New(r.windowSize)
r.busyWorkers = ring.New(r.windowSize)
// Initialize with 0s
for i := 0; i < r.windowSize; i++ {
r.incomingTasks.Value = 0
r.completedTasks.Value = 0
r.duration.Value = 0 * time.Second
r.busyWorkers.Value = 0
r.incomingTasks = r.incomingTasks.Next()
r.completedTasks = r.completedTasks.Next()
r.duration = r.duration.Next()
r.busyWorkers = r.busyWorkers.Next()
return &ratedResizer{
rate: rate,
}
}
func (r *dynamicResizer) totalIncomingTasks() int {
var valueSum int = 0
r.incomingTasks.Do(func(value interface{}) {
valueSum += value.(int)
})
return valueSum
}
func (r *dynamicResizer) totalCompletedTasks() int {
var valueSum int = 0
r.completedTasks.Do(func(value interface{}) {
valueSum += value.(int)
})
return valueSum
}
func (r *dynamicResizer) totalDuration() time.Duration {
var valueSum time.Duration = 0
r.duration.Do(func(value interface{}) {
valueSum += value.(time.Duration)
})
return valueSum
}
func (r *dynamicResizer) avgBusyWorkers() float64 {
var valueSum int = 0
r.busyWorkers.Do(func(value interface{}) {
valueSum += value.(int)
})
return float64(valueSum) / float64(r.windowSize)
}
func (r *dynamicResizer) push(incomingTasks, completedTasks, busyWorkers int, duration time.Duration) {
r.incomingTasks.Value = incomingTasks
r.completedTasks.Value = completedTasks
r.duration.Value = duration
r.busyWorkers.Value = busyWorkers
r.incomingTasks = r.incomingTasks.Next()
r.completedTasks = r.completedTasks.Next()
r.duration = r.duration.Next()
r.busyWorkers = r.busyWorkers.Next()
}
func (r *dynamicResizer) Resize(runningWorkers, idleWorkers, minWorkers, maxWorkers, incomingTasks, completedTasks int, duration time.Duration) int {
r.push(incomingTasks, completedTasks, runningWorkers-idleWorkers, duration)
windowIncomingTasks := r.totalIncomingTasks()
windowCompletedTasks := r.totalCompletedTasks()
windowSecs := r.totalDuration().Seconds()
windowInputRate := float64(windowIncomingTasks) / windowSecs
windowOutputRate := float64(windowCompletedTasks) / windowSecs
if runningWorkers == 0 || windowCompletedTasks == 0 {
// No workers yet, create as many workers ar.incomingTasks-idleWorkers
delta := incomingTasks - idleWorkers
if delta < 0 {
delta = 0
}
return r.fitDelta(delta, runningWorkers, minWorkers, maxWorkers)
}
// Calculate max throughput
avgBusyWorkers := r.avgBusyWorkers()
if avgBusyWorkers < 1 {
avgBusyWorkers = 1
}
windowWorkerRate := windowOutputRate / avgBusyWorkers
if windowWorkerRate < 1 {
windowWorkerRate = 1
}
maxOutputRate := windowWorkerRate * float64(runningWorkers)
deltaRate := windowInputRate - maxOutputRate
// No changes, do not resize
if deltaRate == 0 {
return 0
}
// If delta % is below the defined tolerance, do not resize
if r.tolerance > 0 {
deltaPercentage := math.Abs(deltaRate / windowInputRate)
if deltaPercentage < r.tolerance {
return 0
}
}
if deltaRate > 0 {
ratio := windowSecs / float64(r.windowSize)
delta := int(ratio * (deltaRate / windowWorkerRate))
if delta < 0 {
delta = 0
}
if deltaRate > 0 && delta < 1 {
delta = 1
}
return r.fitDelta(delta, runningWorkers, minWorkers, maxWorkers)
} else if deltaRate < 0 && idleWorkers > 0 {
// Need to shrink the pool
return r.fitDelta(-1, runningWorkers, minWorkers, maxWorkers)
}
return 0
}
func (r *dynamicResizer) fitDelta(delta, current, min, max int) int {
if current+delta < min {
delta = -(current - min)
}
if current+delta > max {
delta = max - current
}
return delta
func (r *ratedResizer) Resize(runningWorkers, minWorkers, maxWorkers int) bool {
if r.rate == 1 {
return true
}
hits := int(atomic.AddInt32(&r.hits, 1))
return hits%r.rate == 1
}
+31 -27
View File
@@ -2,39 +2,43 @@ package pond
import (
"testing"
"time"
)
func TestResize(t *testing.T) {
func TestRatedResizer(t *testing.T) {
resizer := DynamicResizer(3, 0.1)
resizer := RatedResizer(3)
// First resize should grow the pool proportionally
assertEqual(t, 10, resizer.Resize(0, 0, 1, 100, 10, 0, 1*time.Second))
// Now the input rate grows but below the tolerance (10%)
assertEqual(t, -1, resizer.Resize(10, 10, 1, 100, 1, 10, 1*time.Second))
// Now the input rate grows more
assertEqual(t, 90, resizer.Resize(10, 10, 1, 100, 100000, 11, 1*time.Second))
// Now there's no new tasks for 3 cycles
assertEqual(t, -1, resizer.Resize(10, 10, 1, 100, 0, 100011, 1*time.Second))
assertEqual(t, -1, resizer.Resize(10, 10, 1, 100, 0, 100011, 1*time.Second))
assertEqual(t, 0, resizer.Resize(1, 1, 1, 100, 0, 100011, 10*time.Second))
assertEqual(t, true, resizer.Resize(0, 0, 10))
assertEqual(t, false, resizer.Resize(1, 0, 10))
assertEqual(t, false, resizer.Resize(2, 0, 10))
assertEqual(t, true, resizer.Resize(3, 0, 10))
}
func TestEagerPool(t *testing.T) {
pool := New(100, 1000, Strategy(Eager()))
pool.debug = true
func TestRatedResizerWithRate1(t *testing.T) {
for i := 0; i < 100; i++ {
pool.Submit(func() {
time.Sleep(1 * time.Millisecond)
})
}
resizer := RatedResizer(1)
pool.StopAndWait()
assertEqual(t, 100, pool.maxWorkerCount)
assertEqual(t, true, resizer.Resize(0, 0, 10))
assertEqual(t, true, resizer.Resize(1, 0, 10))
assertEqual(t, true, resizer.Resize(2, 0, 10))
}
func TestRatedResizerWithInvalidRate(t *testing.T) {
resizer := RatedResizer(0)
assertEqual(t, true, resizer.Resize(0, 0, 10))
assertEqual(t, true, resizer.Resize(1, 0, 10))
assertEqual(t, true, resizer.Resize(2, 0, 10))
}
func TestPresetRatedResizers(t *testing.T) {
eager := Eager()
balanced := Balanced()
lazy := Lazy()
assertEqual(t, true, eager.Resize(0, 0, 10))
assertEqual(t, true, balanced.Resize(0, 0, 10))
assertEqual(t, true, lazy.Resize(0, 0, 10))
}