From da4965aefb8966c6121e0f585423d7ace0b6565d Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Wed, 13 Nov 2019 18:28:42 +0000 Subject: [PATCH 01/21] Queue: Add generic graceful queues with settings --- custom/conf/app.ini.sample | 13 ++ .../doc/advanced/config-cheat-sheet.en-us.md | 8 + modules/queue/queue.go | 128 ++++++++++++ modules/queue/queue_batch.go | 78 +++++++ modules/queue/queue_batch_test.go | 46 +++++ modules/queue/queue_channel.go | 75 +++++++ modules/queue/queue_channel_test.go | 38 ++++ modules/queue/queue_disk.go | 158 +++++++++++++++ modules/queue/queue_disk_channel.go | 160 +++++++++++++++ modules/queue/queue_disk_channel_test.go | 105 ++++++++++ modules/queue/queue_disk_test.go | 99 +++++++++ modules/queue/queue_redis.go | 190 ++++++++++++++++++ modules/queue/queue_test.go | 42 ++++ modules/queue/queue_wrapped.go | 183 +++++++++++++++++ modules/setting/queue.go | 143 +++++++++++++ 15 files changed, 1466 insertions(+) create mode 100644 modules/queue/queue.go create mode 100644 modules/queue/queue_batch.go create mode 100644 modules/queue/queue_batch_test.go create mode 100644 modules/queue/queue_channel.go create mode 100644 modules/queue/queue_channel_test.go create mode 100644 modules/queue/queue_disk.go create mode 100644 modules/queue/queue_disk_channel.go create mode 100644 modules/queue/queue_disk_channel_test.go create mode 100644 modules/queue/queue_disk_test.go create mode 100644 modules/queue/queue_redis.go create mode 100644 modules/queue/queue_test.go create mode 100644 modules/queue/queue_wrapped.go create mode 100644 modules/setting/queue.go diff --git a/custom/conf/app.ini.sample b/custom/conf/app.ini.sample index 1617d649732a1..014ce397fa174 100644 --- a/custom/conf/app.ini.sample +++ b/custom/conf/app.ini.sample @@ -371,6 +371,19 @@ REPO_INDEXER_INCLUDE = ; A comma separated list of glob patterns to exclude from the index; ; default is empty REPO_INDEXER_EXCLUDE = +[queue] +; General queue queue type, currently support: persistable-channel, channel, level, redis, dummy +; default to persistable-channel +TYPE = persistable-channel +; data-dir for storing persistable queues and level queues, individual queues will be named by their type +DATADIR = queues/ +; Default queue length before a channel queue will block +LENGTH = 20 +; Batch size to send for batched queues +BATCH_LENGTH = 20 +; Connection string for redis queues this will store the redis connection string. +CONN_STR = "addrs=127.0.0.1:6379 db=0" + [admin] ; Disallow regular (non-admin) users from creating organizations. DISABLE_REGULAR_ORG_CREATION = false diff --git a/docs/content/doc/advanced/config-cheat-sheet.en-us.md b/docs/content/doc/advanced/config-cheat-sheet.en-us.md index c059fe55b5ba2..16b4e3bee5862 100644 --- a/docs/content/doc/advanced/config-cheat-sheet.en-us.md +++ b/docs/content/doc/advanced/config-cheat-sheet.en-us.md @@ -234,6 +234,14 @@ relation to port exhaustion. - `MAX_FILE_SIZE`: **1048576**: Maximum size in bytes of files to be indexed. - `STARTUP_TIMEOUT`: **30s**: If the indexer takes longer than this timeout to start - fail. (This timeout will be added to the hammer time above for child processes - as bleve will not start until the previous parent is shutdown.) Set to zero to never timeout. +## Queue (`queue`) + +- `TYPE`: **persistable-channel**: General queue type, currently support: `persistable-channel`, `batched-channel`, `channel`, `level`, `redis`, `dummy` +- `DATADIR`: **queues/**: Base DataDir for storing persistent and level queues. +- `LENGTH`: **20**: Maximal queue size before channel queues block +- `BATCH_LENGTH`: **20**: Batch data before passing to the handler +- `CONN_STR`: **addrs=127.0.0.1:6379 db=0**: Connection string for the redis queue type. + ## Admin (`admin`) - `DEFAULT_EMAIL_NOTIFICATIONS`: **enabled**: Default configuration for email notifications for users (user configurable). Options: enabled, onmention, disabled diff --git a/modules/queue/queue.go b/modules/queue/queue.go new file mode 100644 index 0000000000000..1220db5c03bbc --- /dev/null +++ b/modules/queue/queue.go @@ -0,0 +1,128 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "encoding/json" + "fmt" + "reflect" +) + +// ErrInvalidConfiguration is called when there is invalid configuration for a queue +type ErrInvalidConfiguration struct { + cfg interface{} + err error +} + +func (err ErrInvalidConfiguration) Error() string { + if err.err != nil { + return fmt.Sprintf("Invalid Configuration Argument: %v: Error: %v", err.cfg, err.err) + } + return fmt.Sprintf("Invalid Configuration Argument: %v", err.cfg) +} + +// IsErrInvalidConfiguration checks if an error is an ErrInvalidConfiguration +func IsErrInvalidConfiguration(err error) bool { + _, ok := err.(ErrInvalidConfiguration) + return ok +} + +// Type is a type of Queue +type Type string + +// Data defines an type of queuable data +type Data interface{} + +// HandlerFunc is a function that takes a variable amount of data and processes it +type HandlerFunc func(...Data) + +// NewQueueFunc is a function that creates a queue +type NewQueueFunc func(handler HandlerFunc, config interface{}, exemplar interface{}) (Queue, error) + +// Shutdownable represents a queue that can be shutdown +type Shutdownable interface { + Shutdown() + Terminate() +} + +// Queue defines an interface to save an issue indexer queue +type Queue interface { + Run(atShutdown, atTerminate func(context.Context, func())) + Push(Data) error +} + +// DummyQueueType is the type for the dummy queue +const DummyQueueType Type = "dummy" + +// NewDummyQueue creates a new DummyQueue +func NewDummyQueue(handler HandlerFunc, opts, exemplar interface{}) (Queue, error) { + return &DummyQueue{}, nil +} + +// DummyQueue represents an empty queue +type DummyQueue struct { +} + +// Run starts to run the queue +func (b *DummyQueue) Run(_, _ func(context.Context, func())) {} + +// Push pushes data to the queue +func (b *DummyQueue) Push(Data) error { + return nil +} + +func toConfig(exemplar, cfg interface{}) (interface{}, error) { + if reflect.TypeOf(cfg).AssignableTo(reflect.TypeOf(exemplar)) { + return cfg, nil + } + + configBytes, ok := cfg.([]byte) + if !ok { + configStr, ok := cfg.(string) + if !ok { + return nil, ErrInvalidConfiguration{cfg: cfg} + } + configBytes = []byte(configStr) + } + newVal := reflect.New(reflect.TypeOf(exemplar)) + if err := json.Unmarshal(configBytes, newVal.Interface()); err != nil { + return nil, ErrInvalidConfiguration{cfg: cfg, err: err} + } + return newVal.Elem().Interface(), nil +} + +var queuesMap = map[Type]NewQueueFunc{DummyQueueType: NewDummyQueue} + +// RegisteredTypes provides the list of requested types of queues +func RegisteredTypes() []Type { + types := make([]Type, len(queuesMap)) + i := 0 + for key := range queuesMap { + types[i] = key + i++ + } + return types +} + +// RegisteredTypesAsString provides the list of requested types of queues +func RegisteredTypesAsString() []string { + types := make([]string, len(queuesMap)) + i := 0 + for key := range queuesMap { + types[i] = string(key) + i++ + } + return types +} + +// CreateQueue takes a queue Type and HandlerFunc some options and possibly an exemplar and returns a Queue or an error +func CreateQueue(queueType Type, handlerFunc HandlerFunc, opts, exemplar interface{}) (Queue, error) { + newFn, ok := queuesMap[queueType] + if !ok { + return nil, fmt.Errorf("Unsupported queue type: %v", queueType) + } + return newFn(handlerFunc, opts, exemplar) +} diff --git a/modules/queue/queue_batch.go b/modules/queue/queue_batch.go new file mode 100644 index 0000000000000..07166441e6df0 --- /dev/null +++ b/modules/queue/queue_batch.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "time" + + "code.gitea.io/gitea/modules/log" +) + +// BatchedChannelQueueType is the type for batched channel queue +const BatchedChannelQueueType Type = "batched-channel" + +// BatchedChannelQueueConfiguration is the configuration for a BatchedChannelQueue +type BatchedChannelQueueConfiguration struct { + QueueLength int + BatchLength int +} + +// BatchedChannelQueue implements +type BatchedChannelQueue struct { + *ChannelQueue + batchLength int +} + +// NewBatchedChannelQueue create a memory channel queue +func NewBatchedChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) { + configInterface, err := toConfig(BatchedChannelQueueConfiguration{}, cfg) + if err != nil { + return nil, err + } + config := configInterface.(BatchedChannelQueueConfiguration) + return &BatchedChannelQueue{ + &ChannelQueue{ + queue: make(chan Data, config.QueueLength), + handle: handle, + exemplar: exemplar, + }, + config.BatchLength, + }, nil +} + +// Run starts to run the queue +func (c *BatchedChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { + atShutdown(context.Background(), func() { + log.Warn("BatchedChannelQueue is not shutdownable!") + }) + atTerminate(context.Background(), func() { + log.Warn("BatchedChannelQueue is not terminatable!") + }) + go func() { + delay := time.Millisecond * 300 + var datas = make([]Data, 0, c.batchLength) + for { + select { + case data := <-c.queue: + datas = append(datas, data) + if len(datas) >= c.batchLength { + c.handle(datas...) + datas = make([]Data, 0, c.batchLength) + } + case <-time.After(delay): + delay = time.Millisecond * 100 + if len(datas) > 0 { + c.handle(datas...) + datas = make([]Data, 0, c.batchLength) + } + } + } + }() +} + +func init() { + queuesMap[BatchedChannelQueueType] = NewBatchedChannelQueue +} diff --git a/modules/queue/queue_batch_test.go b/modules/queue/queue_batch_test.go new file mode 100644 index 0000000000000..08d3641da123f --- /dev/null +++ b/modules/queue/queue_batch_test.go @@ -0,0 +1,46 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import "testing" + +import "github.com/stretchr/testify/assert" + +import "context" + +func TestBatchedChannelQueue(t *testing.T) { + handleChan := make(chan *testData) + handle := func(data ...Data) { + assert.True(t, len(data) == 2) + for _, datum := range data { + testDatum := datum.(*testData) + handleChan <- testDatum + } + } + + nilFn := func(_ context.Context, _ func()) {} + + queue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{QueueLength: 20, BatchLength: 2}, &testData{}) + assert.NoError(t, err) + + go queue.Run(nilFn, nilFn) + + test1 := testData{"A", 1} + test2 := testData{"B", 2} + + queue.Push(&test1) + go queue.Push(&test2) + + result1 := <-handleChan + assert.Equal(t, test1.TestString, result1.TestString) + assert.Equal(t, test1.TestInt, result1.TestInt) + + result2 := <-handleChan + assert.Equal(t, test2.TestString, result2.TestString) + assert.Equal(t, test2.TestInt, result2.TestInt) + + err = queue.Push(test1) + assert.Error(t, err) +} diff --git a/modules/queue/queue_channel.go b/modules/queue/queue_channel.go new file mode 100644 index 0000000000000..e0cba2db01ddb --- /dev/null +++ b/modules/queue/queue_channel.go @@ -0,0 +1,75 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "fmt" + "reflect" + + "code.gitea.io/gitea/modules/log" +) + +// ChannelQueueType is the type for channel queue +const ChannelQueueType Type = "channel" + +// ChannelQueueConfiguration is the configuration for a ChannelQueue +type ChannelQueueConfiguration struct { + QueueLength int +} + +// ChannelQueue implements +type ChannelQueue struct { + queue chan Data + handle HandlerFunc + exemplar interface{} +} + +// NewChannelQueue create a memory channel queue +func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) { + configInterface, err := toConfig(ChannelQueueConfiguration{}, cfg) + if err != nil { + return nil, err + } + config := configInterface.(ChannelQueueConfiguration) + return &ChannelQueue{ + queue: make(chan Data, config.QueueLength), + handle: handle, + exemplar: exemplar, + }, nil +} + +// Run starts to run the queue +func (c *ChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { + atShutdown(context.Background(), func() { + log.Warn("ChannelQueue is not shutdownable!") + }) + atTerminate(context.Background(), func() { + log.Warn("ChannelQueue is not terminatable!") + }) + go func() { + for data := range c.queue { + c.handle(data) + } + }() +} + +// Push will push the indexer data to queue +func (c *ChannelQueue) Push(data Data) error { + if c.exemplar != nil { + // Assert data is of same type as r.exemplar + t := reflect.TypeOf(data) + exemplarType := reflect.TypeOf(c.exemplar) + if !t.AssignableTo(exemplarType) || data == nil { + return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in queue: %s", data, c.exemplar, c.name) + } + } + c.queue <- data + return nil +} + +func init() { + queuesMap[ChannelQueueType] = NewChannelQueue +} diff --git a/modules/queue/queue_channel_test.go b/modules/queue/queue_channel_test.go new file mode 100644 index 0000000000000..77f4a8fe8f59a --- /dev/null +++ b/modules/queue/queue_channel_test.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestChannelQueue(t *testing.T) { + handleChan := make(chan *testData) + handle := func(data ...Data) { + for _, datum := range data { + testDatum := datum.(*testData) + handleChan <- testDatum + } + } + + nilFn := func(_ context.Context, _ func()) {} + + queue, err := NewChannelQueue(handle, ChannelQueueConfiguration{QueueLength: 20}, &testData{}) + assert.NoError(t, err) + + go queue.Run(nilFn, nilFn) + + test1 := testData{"A", 1} + go queue.Push(&test1) + result1 := <-handleChan + assert.Equal(t, test1.TestString, result1.TestString) + assert.Equal(t, test1.TestInt, result1.TestInt) + + err = queue.Push(test1) + assert.Error(t, err) +} diff --git a/modules/queue/queue_disk.go b/modules/queue/queue_disk.go new file mode 100644 index 0000000000000..dafff5c21c8e6 --- /dev/null +++ b/modules/queue/queue_disk.go @@ -0,0 +1,158 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "time" + + "code.gitea.io/gitea/modules/log" + + "gitea.com/lunny/levelqueue" +) + +// LevelQueueType is the type for level queue +const LevelQueueType Type = "level" + +// LevelQueueConfiguration is the configuration for a LevelQueue +type LevelQueueConfiguration struct { + DataDir string + BatchLength int +} + +// LevelQueue implements a disk library queue +type LevelQueue struct { + handle HandlerFunc + queue *levelqueue.Queue + batchLength int + closed chan struct{} + exemplar interface{} +} + +// NewLevelQueue creates a ledis local queue +func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) { + configInterface, err := toConfig(LevelQueueConfiguration{}, cfg) + if err != nil { + return nil, err + } + config := configInterface.(LevelQueueConfiguration) + + queue, err := levelqueue.Open(config.DataDir) + if err != nil { + return nil, err + } + + return &LevelQueue{ + handle: handle, + queue: queue, + batchLength: config.BatchLength, + exemplar: exemplar, + closed: make(chan struct{}), + }, nil +} + +// Run starts to run the queue +func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { + atShutdown(context.Background(), l.Shutdown) + atTerminate(context.Background(), l.Terminate) + var i int + var datas = make([]Data, 0, l.batchLength) + for { + select { + case <-l.closed: + if len(datas) > 0 { + log.Trace("Handling: %d data, %v", len(datas), datas) + l.handle(datas...) + } + return + default: + } + i++ + if len(datas) > l.batchLength || (len(datas) > 0 && i > 3) { + log.Trace("Handling: %d data, %v", len(datas), datas) + l.handle(datas...) + datas = make([]Data, 0, l.batchLength) + i = 0 + continue + } + + bs, err := l.queue.RPop() + if err != nil { + if err != levelqueue.ErrNotFound { + log.Error("RPop: %v", err) + } + time.Sleep(time.Millisecond * 100) + continue + } + + if len(bs) == 0 { + time.Sleep(time.Millisecond * 100) + continue + } + + var data Data + if l.exemplar != nil { + t := reflect.TypeOf(l.exemplar) + n := reflect.New(t) + ne := n.Elem() + err = json.Unmarshal(bs, ne.Addr().Interface()) + data = ne.Interface().(Data) + } else { + err = json.Unmarshal(bs, &data) + } + if err != nil { + log.Error("Unmarshal: %v", err) + time.Sleep(time.Millisecond * 10) + continue + } + + log.Trace("LevelQueue: task found: %#v", data) + + datas = append(datas, data) + } +} + +// Push will push the indexer data to queue +func (l *LevelQueue) Push(data Data) error { + if l.exemplar != nil { + // Assert data is of same type as r.exemplar + value := reflect.ValueOf(data) + t := value.Type() + exemplarType := reflect.ValueOf(l.exemplar).Type() + if !t.AssignableTo(exemplarType) || data == nil { + return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, l.exemplar, l.name) + } + } + bs, err := json.Marshal(data) + if err != nil { + return err + } + return l.queue.LPush(bs) +} + +// Shutdown this queue and stop processing +func (l *LevelQueue) Shutdown() { + select { + case <-l.closed: + default: + close(l.closed) + } +} + +// Terminate this queue and close the queue +func (l *LevelQueue) Terminate() { + l.Shutdown() + if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" { + log.Error("Error whilst closing internal queue: %v", err) + } + +} + +func init() { + queuesMap[LevelQueueType] = NewLevelQueue +} diff --git a/modules/queue/queue_disk_channel.go b/modules/queue/queue_disk_channel.go new file mode 100644 index 0000000000000..b13f1b9603def --- /dev/null +++ b/modules/queue/queue_disk_channel.go @@ -0,0 +1,160 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "time" +) + +// PersistableChannelQueueType is the type for persistable queue +const PersistableChannelQueueType Type = "persistable-channel" + +// PersistableChannelQueueConfiguration is the configuration for a PersistableChannelQueue +type PersistableChannelQueueConfiguration struct { + DataDir string + BatchLength int + QueueLength int + Timeout time.Duration + MaxAttempts int +} + +// PersistableChannelQueue wraps a channel queue and level queue together +type PersistableChannelQueue struct { + *BatchedChannelQueue + delayedStarter + closed chan struct{} +} + +// NewPersistableChannelQueue creates a wrapped batched channel queue with persistable level queue backend when shutting down +// This differs from a wrapped queue in that the persistent queue is only used to persist at shutdown/terminate +func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) { + configInterface, err := toConfig(PersistableChannelQueueConfiguration{}, cfg) + if err != nil { + return nil, err + } + config := configInterface.(PersistableChannelQueueConfiguration) + + batchChannelQueue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{ + QueueLength: config.QueueLength, + BatchLength: config.BatchLength, + }, exemplar) + if err != nil { + return nil, err + } + + levelCfg := LevelQueueConfiguration{ + DataDir: config.DataDir, + BatchLength: config.BatchLength, + } + + levelQueue, err := NewLevelQueue(handle, levelCfg, exemplar) + if err == nil { + return &PersistableChannelQueue{ + BatchedChannelQueue: batchChannelQueue.(*BatchedChannelQueue), + delayedStarter: delayedStarter{ + internal: levelQueue.(*LevelQueue), + }, + closed: make(chan struct{}), + }, nil + } + if IsErrInvalidConfiguration(err) { + // Retrying ain't gonna make this any better... + return nil, ErrInvalidConfiguration{cfg: cfg} + } + + return &PersistableChannelQueue{ + BatchedChannelQueue: batchChannelQueue.(*BatchedChannelQueue), + delayedStarter: delayedStarter{ + cfg: levelCfg, + underlying: LevelQueueType, + timeout: config.Timeout, + maxAttempts: config.MaxAttempts, + }, + closed: make(chan struct{}), + }, nil +} + +// Push will push the indexer data to queue +func (p *PersistableChannelQueue) Push(data Data) error { + select { + case <-p.closed: + return p.internal.Push(data) + default: + return p.BatchedChannelQueue.Push(data) + } +} + +// Run starts to run the queue +func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { + p.lock.Lock() + if p.internal == nil { + p.setInternal(atShutdown, p.handle, p.exemplar) + } else { + p.lock.Unlock() + } + atShutdown(context.Background(), p.Shutdown) + atTerminate(context.Background(), p.Terminate) + + // Just run the level queue - we shut it down later + go p.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {}) + delay := time.Millisecond * 300 + var datas = make([]Data, 0, p.batchLength) +loop: + for { + select { + case data := <-p.queue: + datas = append(datas, data) + if len(datas) >= p.batchLength { + p.handle(datas...) + datas = make([]Data, 0, p.batchLength) + } + case <-time.After(delay): + delay = time.Millisecond * 100 + if len(datas) > 0 { + p.handle(datas...) + datas = make([]Data, 0, p.batchLength) + } + case <-p.closed: + if len(datas) > 0 { + p.handle(datas...) + } + break loop + } + } + go func() { + for data := range p.queue { + _ = p.internal.Push(data) + } + }() +} + +// Shutdown processing this queue +func (p *PersistableChannelQueue) Shutdown() { + select { + case <-p.closed: + default: + close(p.closed) + p.lock.Lock() + defer p.lock.Unlock() + if p.internal != nil { + p.internal.(*LevelQueue).Shutdown() + } + } +} + +// Terminate this queue and close the queue +func (p *PersistableChannelQueue) Terminate() { + p.Shutdown() + p.lock.Lock() + defer p.lock.Unlock() + if p.internal != nil { + p.internal.(*LevelQueue).Terminate() + } +} + +func init() { + queuesMap[PersistableChannelQueueType] = NewPersistableChannelQueue +} diff --git a/modules/queue/queue_disk_channel_test.go b/modules/queue/queue_disk_channel_test.go new file mode 100644 index 0000000000000..66c90f3bc3f35 --- /dev/null +++ b/modules/queue/queue_disk_channel_test.go @@ -0,0 +1,105 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestPersistableChannelQueue(t *testing.T) { + handleChan := make(chan *testData) + handle := func(data ...Data) { + assert.True(t, len(data) == 2) + for _, datum := range data { + testDatum := datum.(*testData) + handleChan <- testDatum + } + } + + var queueShutdown func() + var queueTerminate func() + + tmpDir, err := ioutil.TempDir("", "persistable-channel-queue-test-data") + assert.NoError(t, err) + defer os.RemoveAll(tmpDir) + + queue, err := NewPersistableChannelQueue(handle, PersistableChannelQueueConfiguration{ + DataDir: tmpDir, + BatchLength: 2, + QueueLength: 20, + }, &testData{}) + assert.NoError(t, err) + + go queue.Run(func(_ context.Context, shutdown func()) { + queueShutdown = shutdown + }, func(_ context.Context, terminate func()) { + queueTerminate = terminate + }) + + test1 := testData{"A", 1} + test2 := testData{"B", 2} + + err = queue.Push(&test1) + assert.NoError(t, err) + go func() { + err = queue.Push(&test2) + assert.NoError(t, err) + }() + + result1 := <-handleChan + assert.Equal(t, test1.TestString, result1.TestString) + assert.Equal(t, test1.TestInt, result1.TestInt) + + result2 := <-handleChan + assert.Equal(t, test2.TestString, result2.TestString) + assert.Equal(t, test2.TestInt, result2.TestInt) + + err = queue.Push(test1) + assert.Error(t, err) + + queueShutdown() + time.Sleep(200 * time.Millisecond) + err = queue.Push(&test1) + assert.NoError(t, err) + err = queue.Push(&test2) + assert.NoError(t, err) + select { + case <-handleChan: + assert.Fail(t, "Handler processing should have stopped") + default: + } + queueTerminate() + + // Reopen queue + queue, err = NewPersistableChannelQueue(handle, PersistableChannelQueueConfiguration{ + DataDir: tmpDir, + BatchLength: 2, + QueueLength: 20, + }, &testData{}) + assert.NoError(t, err) + + go queue.Run(func(_ context.Context, shutdown func()) { + queueShutdown = shutdown + }, func(_ context.Context, terminate func()) { + queueTerminate = terminate + }) + + result3 := <-handleChan + assert.Equal(t, test1.TestString, result3.TestString) + assert.Equal(t, test1.TestInt, result3.TestInt) + + result4 := <-handleChan + assert.Equal(t, test2.TestString, result4.TestString) + assert.Equal(t, test2.TestInt, result4.TestInt) + queueShutdown() + queueTerminate() + +} diff --git a/modules/queue/queue_disk_test.go b/modules/queue/queue_disk_test.go new file mode 100644 index 0000000000000..9bc689b5f0607 --- /dev/null +++ b/modules/queue/queue_disk_test.go @@ -0,0 +1,99 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestLevelQueue(t *testing.T) { + handleChan := make(chan *testData) + handle := func(data ...Data) { + assert.True(t, len(data) == 2) + for _, datum := range data { + testDatum := datum.(*testData) + handleChan <- testDatum + } + } + + var queueShutdown func() + var queueTerminate func() + + queue, err := NewLevelQueue(handle, LevelQueueConfiguration{ + DataDir: "level-queue-test-data", + BatchLength: 2, + }, &testData{}) + assert.NoError(t, err) + + go queue.Run(func(_ context.Context, shutdown func()) { + queueShutdown = shutdown + }, func(_ context.Context, terminate func()) { + queueTerminate = terminate + }) + + test1 := testData{"A", 1} + test2 := testData{"B", 2} + + err = queue.Push(&test1) + assert.NoError(t, err) + go func() { + err = queue.Push(&test2) + assert.NoError(t, err) + }() + + result1 := <-handleChan + assert.Equal(t, test1.TestString, result1.TestString) + assert.Equal(t, test1.TestInt, result1.TestInt) + + result2 := <-handleChan + assert.Equal(t, test2.TestString, result2.TestString) + assert.Equal(t, test2.TestInt, result2.TestInt) + + err = queue.Push(test1) + assert.Error(t, err) + + queueShutdown() + time.Sleep(200 * time.Millisecond) + err = queue.Push(&test1) + assert.NoError(t, err) + err = queue.Push(&test2) + assert.NoError(t, err) + select { + case <-handleChan: + assert.Fail(t, "Handler processing should have stopped") + default: + } + queueTerminate() + + // Reopen queue + queue, err = NewLevelQueue(handle, LevelQueueConfiguration{ + DataDir: "level-queue-test-data", + BatchLength: 2, + }, &testData{}) + assert.NoError(t, err) + + go queue.Run(func(_ context.Context, shutdown func()) { + queueShutdown = shutdown + }, func(_ context.Context, terminate func()) { + queueTerminate = terminate + }) + + result3 := <-handleChan + assert.Equal(t, test1.TestString, result3.TestString) + assert.Equal(t, test1.TestInt, result3.TestInt) + + result4 := <-handleChan + assert.Equal(t, test2.TestString, result4.TestString) + assert.Equal(t, test2.TestInt, result4.TestInt) + queueShutdown() + queueTerminate() + + os.RemoveAll("level-queue-test-data") +} diff --git a/modules/queue/queue_redis.go b/modules/queue/queue_redis.go new file mode 100644 index 0000000000000..b785f0073f79c --- /dev/null +++ b/modules/queue/queue_redis.go @@ -0,0 +1,190 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "reflect" + "strings" + "time" + + "code.gitea.io/gitea/modules/log" + + "github.com/go-redis/redis" +) + +// RedisQueueType is the type for redis queue +const RedisQueueType Type = "redis" + +type redisClient interface { + RPush(key string, args ...interface{}) *redis.IntCmd + LPop(key string) *redis.StringCmd + Ping() *redis.StatusCmd + Close() error +} + +// RedisQueue redis queue +type RedisQueue struct { + client redisClient + queueName string + handle HandlerFunc + batchLength int + closed chan struct{} + exemplar interface{} +} + +// RedisQueueConfiguration is the configuration for the redis queue +type RedisQueueConfiguration struct { + Addresses string + Password string + DBIndex int + BatchLength int + QueueName string +} + +// NewRedisQueue creates single redis or cluster redis queue +func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) { + configInterface, err := toConfig(RedisQueueConfiguration{}, cfg) + if err != nil { + return nil, err + } + config := configInterface.(RedisQueueConfiguration) + + dbs := strings.Split(config.Addresses, ",") + var queue = RedisQueue{ + queueName: config.QueueName, + handle: handle, + batchLength: config.BatchLength, + exemplar: exemplar, + closed: make(chan struct{}), + } + if len(dbs) == 0 { + return nil, errors.New("no redis host found") + } else if len(dbs) == 1 { + queue.client = redis.NewClient(&redis.Options{ + Addr: strings.TrimSpace(dbs[0]), // use default Addr + Password: config.Password, // no password set + DB: config.DBIndex, // use default DB + }) + } else { + queue.client = redis.NewClusterClient(&redis.ClusterOptions{ + Addrs: dbs, + }) + } + if err := queue.client.Ping().Err(); err != nil { + return nil, err + } + return &queue, nil +} + +// Run runs the redis queue +func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func())) { + atShutdown(context.Background(), r.Shutdown) + atTerminate(context.Background(), r.Terminate) + var i int + var datas = make([]Data, 0, r.batchLength) + for { + select { + case <-r.closed: + if len(datas) > 0 { + log.Trace("Handling: %d data, %v", len(datas), datas) + r.handle(datas...) + } + return + default: + } + bs, err := r.client.LPop(r.queueName).Bytes() + if err != nil && err != redis.Nil { + log.Error("LPop failed: %v", err) + time.Sleep(time.Millisecond * 100) + continue + } + + i++ + if len(datas) > r.batchLength || (len(datas) > 0 && i > 3) { + log.Trace("Handling: %d data, %v", len(datas), datas) + r.handle(datas...) + datas = make([]Data, 0, r.batchLength) + i = 0 + } + + if len(bs) == 0 { + time.Sleep(time.Millisecond * 100) + continue + } + + var data Data + if r.exemplar != nil { + t := reflect.TypeOf(r.exemplar) + n := reflect.New(t) + ne := n.Elem() + err = json.Unmarshal(bs, ne.Addr().Interface()) + data = ne.Interface().(Data) + } else { + err = json.Unmarshal(bs, &data) + } + if err != nil { + log.Error("Unmarshal: %v", err) + time.Sleep(time.Millisecond * 100) + continue + } + + log.Trace("RedisQueue: task found: %#v", data) + + datas = append(datas, data) + select { + case <-r.closed: + if len(datas) > 0 { + log.Trace("Handling: %d data, %v", len(datas), datas) + r.handle(datas...) + } + return + default: + } + time.Sleep(time.Millisecond * 100) + } +} + +// Push implements Queue +func (r *RedisQueue) Push(data Data) error { + if r.exemplar != nil { + // Assert data is of same type as r.exemplar + value := reflect.ValueOf(data) + t := value.Type() + exemplarType := reflect.ValueOf(r.exemplar).Type() + if !t.AssignableTo(exemplarType) || data == nil { + return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, r.exemplar, r.name) + } + } + bs, err := json.Marshal(data) + if err != nil { + return err + } + return r.client.RPush(r.queueName, bs).Err() +} + +// Shutdown processing from this queue +func (r *RedisQueue) Shutdown() { + select { + case <-r.closed: + default: + close(r.closed) + } +} + +// Terminate this queue and close the queue +func (r *RedisQueue) Terminate() { + r.Shutdown() + if err := r.client.Close(); err != nil { + log.Error("Error whilst closing internal redis client: %v", err) + } +} + +func init() { + queuesMap[RedisQueueType] = NewRedisQueue +} diff --git a/modules/queue/queue_test.go b/modules/queue/queue_test.go new file mode 100644 index 0000000000000..e41643da211c6 --- /dev/null +++ b/modules/queue/queue_test.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import "testing" + +import "github.com/stretchr/testify/assert" + +import "encoding/json" + +type testData struct { + TestString string + TestInt int +} + +func TestToConfig(t *testing.T) { + cfg := testData{ + TestString: "Config", + TestInt: 10, + } + exemplar := testData{} + + cfg2I, err := toConfig(exemplar, cfg) + assert.NoError(t, err) + cfg2, ok := (cfg2I).(testData) + assert.True(t, ok) + assert.NotEqual(t, cfg2, exemplar) + assert.Equal(t, &cfg, &cfg2) + + cfgString, err := json.Marshal(cfg) + assert.NoError(t, err) + + cfg3I, err := toConfig(exemplar, cfgString) + assert.NoError(t, err) + cfg3, ok := (cfg3I).(testData) + assert.True(t, ok) + assert.Equal(t, cfg.TestString, cfg3.TestString) + assert.Equal(t, cfg.TestInt, cfg3.TestInt) + assert.NotEqual(t, cfg3, exemplar) +} diff --git a/modules/queue/queue_wrapped.go b/modules/queue/queue_wrapped.go new file mode 100644 index 0000000000000..f99675a9f913c --- /dev/null +++ b/modules/queue/queue_wrapped.go @@ -0,0 +1,183 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "fmt" + "reflect" + "sync" + "time" + + "code.gitea.io/gitea/modules/log" +) + +// WrappedQueueType is the type for a wrapped delayed starting queue +const WrappedQueueType Type = "wrapped" + +// WrappedQueueConfiguration is the configuration for a WrappedQueue +type WrappedQueueConfiguration struct { + Underlying Type + Timeout time.Duration + MaxAttempts int + Config interface{} + QueueLength int +} + +type delayedStarter struct { + lock sync.Mutex + internal Queue + underlying Type + cfg interface{} + timeout time.Duration + maxAttempts int +} + +func (q *delayedStarter) setInternal(atShutdown func(context.Context, func()), handle HandlerFunc, exemplar interface{}) { + var ctx context.Context + var cancel context.CancelFunc + if q.timeout > 0 { + ctx, cancel = context.WithTimeout(context.Background(), q.timeout) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + + defer cancel() + // Ensure we also stop at shutdown + atShutdown(ctx, func() { + cancel() + }) + + i := 1 + for q.internal == nil { + select { + case <-ctx.Done(): + q.lock.Unlock() + log.Fatal("Timedout creating queue %v with cfg %v ", q.underlying, q.cfg) + default: + queue, err := CreateQueue(q.underlying, handle, q.cfg, exemplar) + if err == nil { + q.internal = queue + q.lock.Unlock() + break + } + if err.Error() != "resource temporarily unavailable" { + log.Warn("[Attempt: %d] Failed to create queue: %v cfg: %v error: %v", i, q.underlying, q.cfg, err) + } + i++ + if q.maxAttempts > 0 && i > q.maxAttempts { + q.lock.Unlock() + log.Fatal("Unable to create queue %v with cfg %v by max attempts: error: %v", q.underlying, q.cfg, err) + } + sleepTime := 100 * time.Millisecond + if q.timeout > 0 && q.maxAttempts > 0 { + sleepTime = (q.timeout - 200*time.Millisecond) / time.Duration(q.maxAttempts) + } + time.Sleep(sleepTime) + } + } +} + +// WrappedQueue wraps a delayed starting queue +type WrappedQueue struct { + delayedStarter + handle HandlerFunc + exemplar interface{} + channel chan Data +} + +// NewWrappedQueue will attempt to create a queue of the provided type, +// but if there is a problem creating this queue it will instead create +// a WrappedQueue with delayed the startup of the queue instead and a +// channel which will be redirected to the queue +func NewWrappedQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) { + configInterface, err := toConfig(WrappedQueueConfiguration{}, cfg) + if err != nil { + return nil, err + } + config := configInterface.(WrappedQueueConfiguration) + + queue, err := CreateQueue(config.Underlying, handle, config.Config, exemplar) + if err == nil { + // Just return the queue there is no need to wrap + return queue, nil + } + if IsErrInvalidConfiguration(err) { + // Retrying ain't gonna make this any better... + return nil, ErrInvalidConfiguration{cfg: cfg} + } + + return &WrappedQueue{ + handle: handle, + channel: make(chan Data, config.QueueLength), + exemplar: exemplar, + delayedStarter: delayedStarter{ + cfg: config.Config, + underlying: config.Underlying, + timeout: config.Timeout, + maxAttempts: config.MaxAttempts, + }, + }, nil +} + +// Push will push the data to the internal channel checking it against the exemplar +func (q *WrappedQueue) Push(data Data) error { + if q.exemplar != nil { + // Assert data is of same type as r.exemplar + value := reflect.ValueOf(data) + t := value.Type() + exemplarType := reflect.ValueOf(q.exemplar).Type() + if !t.AssignableTo(exemplarType) || data == nil { + return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in %s", data, q.exemplar, q.name) + } + } + q.channel <- data + return nil +} + +// Run starts to run the queue and attempts to create the internal queue +func (q *WrappedQueue) Run(atShutdown, atTerminate func(context.Context, func())) { + q.lock.Lock() + if q.internal == nil { + q.setInternal(atShutdown, q.handle, q.exemplar) + go func() { + for data := range q.channel { + _ = q.internal.Push(data) + } + }() + } else { + q.lock.Unlock() + } + + q.internal.Run(atShutdown, atTerminate) +} + +// Shutdown this queue and stop processing +func (q *WrappedQueue) Shutdown() { + q.lock.Lock() + defer q.lock.Unlock() + if q.internal == nil { + return + } + if shutdownable, ok := q.internal.(Shutdownable); ok { + shutdownable.Shutdown() + } +} + +// Terminate this queue and close the queue +func (q *WrappedQueue) Terminate() { + q.lock.Lock() + defer q.lock.Unlock() + if q.internal == nil { + return + } + if shutdownable, ok := q.internal.(Shutdownable); ok { + shutdownable.Terminate() + } +} + +func init() { + queuesMap[WrappedQueueType] = NewWrappedQueue +} diff --git a/modules/setting/queue.go b/modules/setting/queue.go new file mode 100644 index 0000000000000..4c80c79079e13 --- /dev/null +++ b/modules/setting/queue.go @@ -0,0 +1,143 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package setting + +import ( + "encoding/json" + "path" + "strconv" + "strings" + "time" + + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/queue" +) + +type queueSettings struct { + DataDir string + Length int + BatchLength int + ConnectionString string + Type string + Addresses string + Password string + DBIndex int + WrapIfNecessary bool + MaxAttempts int + Timeout time.Duration + Workers int +} + +// Queue settings +var Queue = queueSettings{} + +// CreateQueue for name with provided handler and exemplar +func CreateQueue(name string, handle queue.HandlerFunc, exemplar interface{}) queue.Queue { + q := getQueueSettings(name) + opts := make(map[string]interface{}) + opts["QueueLength"] = q.Length + opts["BatchLength"] = q.BatchLength + opts["DataDir"] = q.DataDir + opts["Addresses"] = q.Addresses + opts["Password"] = q.Password + opts["DBIndex"] = q.DBIndex + opts["QueueName"] = name + + cfg, err := json.Marshal(opts) + if err != nil { + log.Error("Unable to marshall generic options: %v Error: %v", opts, err) + log.Error("Unable to create queue for %s", name, err) + return nil + } + + returnable, err := queue.CreateQueue(queue.Type(q.Type), handle, cfg, exemplar) + if q.WrapIfNecessary && err != nil { + log.Warn("Unable to create queue for %s: %v", name, err) + log.Warn("Attempting to create wrapped queue") + returnable, err = queue.CreateQueue(queue.WrappedQueueType, handle, queue.WrappedQueueConfiguration{ + Underlying: queue.Type(q.Type), + Timeout: q.Timeout, + MaxAttempts: q.MaxAttempts, + Config: cfg, + QueueLength: q.Length, + }, exemplar) + } + if err != nil { + log.Error("Unable to create queue for %s: %v", name, err) + return nil + } + return returnable +} + +func getQueueSettings(name string) queueSettings { + q := queueSettings{} + sec := Cfg.Section("queue." + name) + // DataDir is not directly inheritable + q.DataDir = path.Join(Queue.DataDir, name) + for _, key := range sec.Keys() { + switch key.Name() { + case "DATADIR": + q.DataDir = key.MustString(q.DataDir) + } + } + if !path.IsAbs(q.DataDir) { + q.DataDir = path.Join(AppDataPath, q.DataDir) + } + sec.Key("DATADIR").SetValue(q.DataDir) + // The rest are... + q.Length = sec.Key("LENGTH").MustInt(Queue.Length) + q.BatchLength = sec.Key("BATCH_LENGTH").MustInt(Queue.BatchLength) + q.ConnectionString = sec.Key("CONN_STR").MustString(Queue.ConnectionString) + validTypes := queue.RegisteredTypesAsString() + q.Type = sec.Key("TYPE").In(Queue.Type, validTypes) + q.WrapIfNecessary = sec.Key("WRAP_IF_NECESSARY").MustBool(Queue.WrapIfNecessary) + q.MaxAttempts = sec.Key("MAX_ATTEMPTS").MustInt(Queue.MaxAttempts) + q.Timeout = sec.Key("TIMEOUT").MustDuration(Queue.Timeout) + q.Workers = sec.Key("WORKER").MustInt(Queue.Workers) + + q.Addresses, q.Password, q.DBIndex, _ = ParseQueueConnStr(q.ConnectionString) + return q +} + +func newQueueService() { + sec := Cfg.Section("queue") + Queue.DataDir = sec.Key("DATADIR").MustString("queues/") + if !path.IsAbs(Queue.DataDir) { + Queue.DataDir = path.Join(AppDataPath, Queue.DataDir) + } + Queue.Length = sec.Key("LENGTH").MustInt(20) + Queue.BatchLength = sec.Key("BATCH_LENGTH").MustInt(20) + Queue.ConnectionString = sec.Key("CONN_STR").MustString(path.Join(AppDataPath, "")) + validTypes := queue.RegisteredTypesAsString() + Queue.Type = sec.Key("TYPE").In(string(queue.PersistableChannelQueueType), validTypes) + Queue.Addresses, Queue.Password, Queue.DBIndex, _ = ParseQueueConnStr(Queue.ConnectionString) + Queue.WrapIfNecessary = sec.Key("WRAP_IF_NECESSARY").MustBool(true) + Queue.MaxAttempts = sec.Key("MAX_ATTEMPTS").MustInt(10) + Queue.Timeout = sec.Key("TIMEOUT").MustDuration(GracefulHammerTime + 30*time.Second) + Queue.Workers = sec.Key("WORKER").MustInt(1) +} + +// ParseQueueConnStr parses a queue connection string +func ParseQueueConnStr(connStr string) (addrs, password string, dbIdx int, err error) { + fields := strings.Fields(connStr) + for _, f := range fields { + items := strings.SplitN(f, "=", 2) + if len(items) < 2 { + continue + } + switch strings.ToLower(items[0]) { + case "addrs": + addrs = items[1] + case "password": + password = items[1] + case "db": + dbIdx, err = strconv.Atoi(items[1]) + if err != nil { + return + } + } + } + return +} From d06e882ca1275a0b4f4bc7ccd33b7888d73a7b7a Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Wed, 20 Nov 2019 21:31:39 +0000 Subject: [PATCH 02/21] Queue & Setting: Add worker pool implementation --- modules/queue/queue_batch.go | 40 +++++++++++++----------- modules/queue/queue_batch_test.go | 2 +- modules/queue/queue_channel.go | 15 ++++++--- modules/queue/queue_channel_test.go | 2 +- modules/queue/queue_disk.go | 17 ++++++++++ modules/queue/queue_disk_channel.go | 18 +++++++++++ modules/queue/queue_disk_channel_test.go | 2 ++ modules/queue/queue_disk_test.go | 2 ++ modules/queue/queue_redis.go | 16 ++++++++++ modules/setting/queue.go | 3 ++ 10 files changed, 92 insertions(+), 25 deletions(-) diff --git a/modules/queue/queue_batch.go b/modules/queue/queue_batch.go index 07166441e6df0..2731ac5e23c9a 100644 --- a/modules/queue/queue_batch.go +++ b/modules/queue/queue_batch.go @@ -18,6 +18,7 @@ const BatchedChannelQueueType Type = "batched-channel" type BatchedChannelQueueConfiguration struct { QueueLength int BatchLength int + Workers int } // BatchedChannelQueue implements @@ -38,6 +39,7 @@ func NewBatchedChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queu queue: make(chan Data, config.QueueLength), handle: handle, exemplar: exemplar, + workers: config.Workers, }, config.BatchLength, }, nil @@ -51,26 +53,28 @@ func (c *BatchedChannelQueue) Run(atShutdown, atTerminate func(context.Context, atTerminate(context.Background(), func() { log.Warn("BatchedChannelQueue is not terminatable!") }) - go func() { - delay := time.Millisecond * 300 - var datas = make([]Data, 0, c.batchLength) - for { - select { - case data := <-c.queue: - datas = append(datas, data) - if len(datas) >= c.batchLength { - c.handle(datas...) - datas = make([]Data, 0, c.batchLength) - } - case <-time.After(delay): - delay = time.Millisecond * 100 - if len(datas) > 0 { - c.handle(datas...) - datas = make([]Data, 0, c.batchLength) + for i := 0; i < c.workers; i++ { + go func() { + delay := time.Millisecond * 300 + var datas = make([]Data, 0, c.batchLength) + for { + select { + case data := <-c.queue: + datas = append(datas, data) + if len(datas) >= c.batchLength { + c.handle(datas...) + datas = make([]Data, 0, c.batchLength) + } + case <-time.After(delay): + delay = time.Millisecond * 100 + if len(datas) > 0 { + c.handle(datas...) + datas = make([]Data, 0, c.batchLength) + } } } - } - }() + }() + } } func init() { diff --git a/modules/queue/queue_batch_test.go b/modules/queue/queue_batch_test.go index 08d3641da123f..13a85a0aadafd 100644 --- a/modules/queue/queue_batch_test.go +++ b/modules/queue/queue_batch_test.go @@ -22,7 +22,7 @@ func TestBatchedChannelQueue(t *testing.T) { nilFn := func(_ context.Context, _ func()) {} - queue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{QueueLength: 20, BatchLength: 2}, &testData{}) + queue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{QueueLength: 20, BatchLength: 2, Workers: 1}, &testData{}) assert.NoError(t, err) go queue.Run(nilFn, nilFn) diff --git a/modules/queue/queue_channel.go b/modules/queue/queue_channel.go index e0cba2db01ddb..9d0ab11d21c04 100644 --- a/modules/queue/queue_channel.go +++ b/modules/queue/queue_channel.go @@ -18,6 +18,7 @@ const ChannelQueueType Type = "channel" // ChannelQueueConfiguration is the configuration for a ChannelQueue type ChannelQueueConfiguration struct { QueueLength int + Workers int } // ChannelQueue implements @@ -25,6 +26,7 @@ type ChannelQueue struct { queue chan Data handle HandlerFunc exemplar interface{} + workers int } // NewChannelQueue create a memory channel queue @@ -38,6 +40,7 @@ func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro queue: make(chan Data, config.QueueLength), handle: handle, exemplar: exemplar, + workers: config.Workers, }, nil } @@ -49,11 +52,13 @@ func (c *ChannelQueue) Run(atShutdown, atTerminate func(context.Context, func()) atTerminate(context.Background(), func() { log.Warn("ChannelQueue is not terminatable!") }) - go func() { - for data := range c.queue { - c.handle(data) - } - }() + for i := 0; i < c.workers; i++ { + go func() { + for data := range c.queue { + c.handle(data) + } + }() + } } // Push will push the indexer data to queue diff --git a/modules/queue/queue_channel_test.go b/modules/queue/queue_channel_test.go index 77f4a8fe8f59a..9e72bed85d7aa 100644 --- a/modules/queue/queue_channel_test.go +++ b/modules/queue/queue_channel_test.go @@ -22,7 +22,7 @@ func TestChannelQueue(t *testing.T) { nilFn := func(_ context.Context, _ func()) {} - queue, err := NewChannelQueue(handle, ChannelQueueConfiguration{QueueLength: 20}, &testData{}) + queue, err := NewChannelQueue(handle, ChannelQueueConfiguration{QueueLength: 20, Workers: 1}, &testData{}) assert.NoError(t, err) go queue.Run(nilFn, nilFn) diff --git a/modules/queue/queue_disk.go b/modules/queue/queue_disk.go index dafff5c21c8e6..799bc98046fb4 100644 --- a/modules/queue/queue_disk.go +++ b/modules/queue/queue_disk.go @@ -9,6 +9,7 @@ import ( "encoding/json" "fmt" "reflect" + "sync" "time" "code.gitea.io/gitea/modules/log" @@ -23,6 +24,7 @@ const LevelQueueType Type = "level" type LevelQueueConfiguration struct { DataDir string BatchLength int + Workers int } // LevelQueue implements a disk library queue @@ -32,6 +34,7 @@ type LevelQueue struct { batchLength int closed chan struct{} exemplar interface{} + workers int } // NewLevelQueue creates a ledis local queue @@ -53,6 +56,7 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) batchLength: config.BatchLength, exemplar: exemplar, closed: make(chan struct{}), + workers: config.Workers, }, nil } @@ -60,6 +64,19 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { atShutdown(context.Background(), l.Shutdown) atTerminate(context.Background(), l.Terminate) + + wg := sync.WaitGroup{} + for i := 0; i < l.workers; i++ { + wg.Add(1) + go func() { + l.worker() + wg.Done() + }() + } + wg.Wait() +} + +func (l *LevelQueue) worker() { var i int var datas = make([]Data, 0, l.batchLength) for { diff --git a/modules/queue/queue_disk_channel.go b/modules/queue/queue_disk_channel.go index b13f1b9603def..428e104fb5ac9 100644 --- a/modules/queue/queue_disk_channel.go +++ b/modules/queue/queue_disk_channel.go @@ -6,6 +6,7 @@ package queue import ( "context" + "sync" "time" ) @@ -19,6 +20,7 @@ type PersistableChannelQueueConfiguration struct { QueueLength int Timeout time.Duration MaxAttempts int + Workers int } // PersistableChannelQueue wraps a channel queue and level queue together @@ -40,14 +42,17 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) ( batchChannelQueue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{ QueueLength: config.QueueLength, BatchLength: config.BatchLength, + Workers: config.Workers, }, exemplar) if err != nil { return nil, err } + // the level backend only needs one worker to catch up with the previously dropped work levelCfg := LevelQueueConfiguration{ DataDir: config.DataDir, BatchLength: config.BatchLength, + Workers: 1, } levelQueue, err := NewLevelQueue(handle, levelCfg, exemplar) @@ -100,6 +105,19 @@ func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Conte // Just run the level queue - we shut it down later go p.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {}) + + wg := sync.WaitGroup{} + for i := 0; i < p.workers; i++ { + wg.Add(1) + go func() { + p.worker() + wg.Done() + }() + } + wg.Wait() +} + +func (p *PersistableChannelQueue) worker() { delay := time.Millisecond * 300 var datas = make([]Data, 0, p.batchLength) loop: diff --git a/modules/queue/queue_disk_channel_test.go b/modules/queue/queue_disk_channel_test.go index 66c90f3bc3f35..5f6f614bd8c8b 100644 --- a/modules/queue/queue_disk_channel_test.go +++ b/modules/queue/queue_disk_channel_test.go @@ -35,6 +35,7 @@ func TestPersistableChannelQueue(t *testing.T) { DataDir: tmpDir, BatchLength: 2, QueueLength: 20, + Workers: 1, }, &testData{}) assert.NoError(t, err) @@ -83,6 +84,7 @@ func TestPersistableChannelQueue(t *testing.T) { DataDir: tmpDir, BatchLength: 2, QueueLength: 20, + Workers: 1, }, &testData{}) assert.NoError(t, err) diff --git a/modules/queue/queue_disk_test.go b/modules/queue/queue_disk_test.go index 9bc689b5f0607..7033fc6a34a46 100644 --- a/modules/queue/queue_disk_test.go +++ b/modules/queue/queue_disk_test.go @@ -29,6 +29,7 @@ func TestLevelQueue(t *testing.T) { queue, err := NewLevelQueue(handle, LevelQueueConfiguration{ DataDir: "level-queue-test-data", BatchLength: 2, + Workers: 1, }, &testData{}) assert.NoError(t, err) @@ -76,6 +77,7 @@ func TestLevelQueue(t *testing.T) { queue, err = NewLevelQueue(handle, LevelQueueConfiguration{ DataDir: "level-queue-test-data", BatchLength: 2, + Workers: 1, }, &testData{}) assert.NoError(t, err) diff --git a/modules/queue/queue_redis.go b/modules/queue/queue_redis.go index b785f0073f79c..80ce67233c3cf 100644 --- a/modules/queue/queue_redis.go +++ b/modules/queue/queue_redis.go @@ -11,6 +11,7 @@ import ( "fmt" "reflect" "strings" + "sync" "time" "code.gitea.io/gitea/modules/log" @@ -36,6 +37,7 @@ type RedisQueue struct { batchLength int closed chan struct{} exemplar interface{} + workers int } // RedisQueueConfiguration is the configuration for the redis queue @@ -45,6 +47,7 @@ type RedisQueueConfiguration struct { DBIndex int BatchLength int QueueName string + Workers int } // NewRedisQueue creates single redis or cluster redis queue @@ -62,6 +65,7 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) batchLength: config.BatchLength, exemplar: exemplar, closed: make(chan struct{}), + workers: config.Workers, } if len(dbs) == 0 { return nil, errors.New("no redis host found") @@ -86,6 +90,18 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func())) { atShutdown(context.Background(), r.Shutdown) atTerminate(context.Background(), r.Terminate) + wg := sync.WaitGroup{} + for i := 0; i < r.workers; i++ { + wg.Add(1) + go func() { + r.worker() + wg.Done() + }() + } + wg.Wait() +} + +func (r *RedisQueue) worker() { var i int var datas = make([]Data, 0, r.batchLength) for { diff --git a/modules/setting/queue.go b/modules/setting/queue.go index 4c80c79079e13..4f7da32ce9163 100644 --- a/modules/setting/queue.go +++ b/modules/setting/queue.go @@ -44,6 +44,7 @@ func CreateQueue(name string, handle queue.HandlerFunc, exemplar interface{}) qu opts["Password"] = q.Password opts["DBIndex"] = q.DBIndex opts["QueueName"] = name + opts["Workers"] = q.Workers cfg, err := json.Marshal(opts) if err != nil { @@ -117,6 +118,8 @@ func newQueueService() { Queue.MaxAttempts = sec.Key("MAX_ATTEMPTS").MustInt(10) Queue.Timeout = sec.Key("TIMEOUT").MustDuration(GracefulHammerTime + 30*time.Second) Queue.Workers = sec.Key("WORKER").MustInt(1) + + Cfg.Section("queue.notification").Key("WORKER").MustInt(5) } // ParseQueueConnStr parses a queue connection string From 7122ce74e7abdc405dce48bf7d9ea5ce7ef27056 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Sat, 7 Dec 2019 16:41:53 +0000 Subject: [PATCH 03/21] Queue: Add worker settings --- modules/setting/queue.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/modules/setting/queue.go b/modules/setting/queue.go index 4f7da32ce9163..1a33e232c3348 100644 --- a/modules/setting/queue.go +++ b/modules/setting/queue.go @@ -119,7 +119,16 @@ func newQueueService() { Queue.Timeout = sec.Key("TIMEOUT").MustDuration(GracefulHammerTime + 30*time.Second) Queue.Workers = sec.Key("WORKER").MustInt(1) - Cfg.Section("queue.notification").Key("WORKER").MustInt(5) + hasWorkers := false + for _, key := range Cfg.Section("queue.notification").Keys() { + if key.Name() == "WORKERS" { + hasWorkers = true + break + } + } + if !hasWorkers { + Cfg.Section("queue.notification").Key("WORKERS").SetValue("5") + } } // ParseQueueConnStr parses a queue connection string From e52520fee5cf0de3a1f665a9562d3bae758e6643 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Sat, 7 Dec 2019 16:44:37 +0000 Subject: [PATCH 04/21] Queue: Make resizing worker pools --- .../doc/advanced/config-cheat-sheet.en-us.md | 2 +- modules/queue/queue_batch.go | 82 ------ modules/queue/queue_batch_test.go | 46 ---- modules/queue/queue_channel.go | 40 ++- modules/queue/queue_channel_test.go | 53 +++- modules/queue/queue_disk.go | 148 ++++++----- modules/queue/queue_disk_channel.go | 98 +++---- modules/queue/queue_disk_test.go | 20 +- modules/queue/queue_redis.go | 156 ++++++------ modules/queue/workerpool.go | 239 ++++++++++++++++++ modules/setting/queue.go | 16 +- 11 files changed, 540 insertions(+), 360 deletions(-) delete mode 100644 modules/queue/queue_batch.go delete mode 100644 modules/queue/queue_batch_test.go create mode 100644 modules/queue/workerpool.go diff --git a/docs/content/doc/advanced/config-cheat-sheet.en-us.md b/docs/content/doc/advanced/config-cheat-sheet.en-us.md index 16b4e3bee5862..14ad0be98233d 100644 --- a/docs/content/doc/advanced/config-cheat-sheet.en-us.md +++ b/docs/content/doc/advanced/config-cheat-sheet.en-us.md @@ -236,7 +236,7 @@ relation to port exhaustion. ## Queue (`queue`) -- `TYPE`: **persistable-channel**: General queue type, currently support: `persistable-channel`, `batched-channel`, `channel`, `level`, `redis`, `dummy` +- `TYPE`: **persistable-channel**: General queue type, currently support: `persistable-channel`, `channel`, `level`, `redis`, `dummy` - `DATADIR`: **queues/**: Base DataDir for storing persistent and level queues. - `LENGTH`: **20**: Maximal queue size before channel queues block - `BATCH_LENGTH`: **20**: Batch data before passing to the handler diff --git a/modules/queue/queue_batch.go b/modules/queue/queue_batch.go deleted file mode 100644 index 2731ac5e23c9a..0000000000000 --- a/modules/queue/queue_batch.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package queue - -import ( - "context" - "time" - - "code.gitea.io/gitea/modules/log" -) - -// BatchedChannelQueueType is the type for batched channel queue -const BatchedChannelQueueType Type = "batched-channel" - -// BatchedChannelQueueConfiguration is the configuration for a BatchedChannelQueue -type BatchedChannelQueueConfiguration struct { - QueueLength int - BatchLength int - Workers int -} - -// BatchedChannelQueue implements -type BatchedChannelQueue struct { - *ChannelQueue - batchLength int -} - -// NewBatchedChannelQueue create a memory channel queue -func NewBatchedChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) { - configInterface, err := toConfig(BatchedChannelQueueConfiguration{}, cfg) - if err != nil { - return nil, err - } - config := configInterface.(BatchedChannelQueueConfiguration) - return &BatchedChannelQueue{ - &ChannelQueue{ - queue: make(chan Data, config.QueueLength), - handle: handle, - exemplar: exemplar, - workers: config.Workers, - }, - config.BatchLength, - }, nil -} - -// Run starts to run the queue -func (c *BatchedChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { - atShutdown(context.Background(), func() { - log.Warn("BatchedChannelQueue is not shutdownable!") - }) - atTerminate(context.Background(), func() { - log.Warn("BatchedChannelQueue is not terminatable!") - }) - for i := 0; i < c.workers; i++ { - go func() { - delay := time.Millisecond * 300 - var datas = make([]Data, 0, c.batchLength) - for { - select { - case data := <-c.queue: - datas = append(datas, data) - if len(datas) >= c.batchLength { - c.handle(datas...) - datas = make([]Data, 0, c.batchLength) - } - case <-time.After(delay): - delay = time.Millisecond * 100 - if len(datas) > 0 { - c.handle(datas...) - datas = make([]Data, 0, c.batchLength) - } - } - } - }() - } -} - -func init() { - queuesMap[BatchedChannelQueueType] = NewBatchedChannelQueue -} diff --git a/modules/queue/queue_batch_test.go b/modules/queue/queue_batch_test.go deleted file mode 100644 index 13a85a0aadafd..0000000000000 --- a/modules/queue/queue_batch_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package queue - -import "testing" - -import "github.com/stretchr/testify/assert" - -import "context" - -func TestBatchedChannelQueue(t *testing.T) { - handleChan := make(chan *testData) - handle := func(data ...Data) { - assert.True(t, len(data) == 2) - for _, datum := range data { - testDatum := datum.(*testData) - handleChan <- testDatum - } - } - - nilFn := func(_ context.Context, _ func()) {} - - queue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{QueueLength: 20, BatchLength: 2, Workers: 1}, &testData{}) - assert.NoError(t, err) - - go queue.Run(nilFn, nilFn) - - test1 := testData{"A", 1} - test2 := testData{"B", 2} - - queue.Push(&test1) - go queue.Push(&test2) - - result1 := <-handleChan - assert.Equal(t, test1.TestString, result1.TestString) - assert.Equal(t, test1.TestInt, result1.TestInt) - - result2 := <-handleChan - assert.Equal(t, test2.TestString, result2.TestString) - assert.Equal(t, test2.TestInt, result2.TestInt) - - err = queue.Push(test1) - assert.Error(t, err) -} diff --git a/modules/queue/queue_channel.go b/modules/queue/queue_channel.go index 9d0ab11d21c04..ebcf22ef7932c 100644 --- a/modules/queue/queue_channel.go +++ b/modules/queue/queue_channel.go @@ -8,6 +8,7 @@ import ( "context" "fmt" "reflect" + "time" "code.gitea.io/gitea/modules/log" ) @@ -17,14 +18,17 @@ const ChannelQueueType Type = "channel" // ChannelQueueConfiguration is the configuration for a ChannelQueue type ChannelQueueConfiguration struct { - QueueLength int - Workers int + QueueLength int + BatchLength int + Workers int + BlockTimeout time.Duration + BoostTimeout time.Duration + BoostWorkers int } // ChannelQueue implements type ChannelQueue struct { - queue chan Data - handle HandlerFunc + pool *WorkerPool exemplar interface{} workers int } @@ -36,9 +40,23 @@ func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro return nil, err } config := configInterface.(ChannelQueueConfiguration) + if config.BatchLength == 0 { + config.BatchLength = 1 + } + dataChan := make(chan Data, config.QueueLength) + + ctx, cancel := context.WithCancel(context.Background()) return &ChannelQueue{ - queue: make(chan Data, config.QueueLength), - handle: handle, + pool: &WorkerPool{ + baseCtx: ctx, + cancel: cancel, + batchLength: config.BatchLength, + handle: handle, + dataChan: dataChan, + blockTimeout: config.BlockTimeout, + boostTimeout: config.BoostTimeout, + boostWorkers: config.BoostWorkers, + }, exemplar: exemplar, workers: config.Workers, }, nil @@ -52,13 +70,7 @@ func (c *ChannelQueue) Run(atShutdown, atTerminate func(context.Context, func()) atTerminate(context.Background(), func() { log.Warn("ChannelQueue is not terminatable!") }) - for i := 0; i < c.workers; i++ { - go func() { - for data := range c.queue { - c.handle(data) - } - }() - } + c.pool.addWorkers(c.pool.baseCtx, c.workers) } // Push will push the indexer data to queue @@ -71,7 +83,7 @@ func (c *ChannelQueue) Push(data Data) error { return fmt.Errorf("Unable to assign data: %v to same type as exemplar: %v in queue: %s", data, c.exemplar, c.name) } } - c.queue <- data + c.pool.Push(data) return nil } diff --git a/modules/queue/queue_channel_test.go b/modules/queue/queue_channel_test.go index 9e72bed85d7aa..c04407aa243f1 100644 --- a/modules/queue/queue_channel_test.go +++ b/modules/queue/queue_channel_test.go @@ -7,6 +7,7 @@ package queue import ( "context" "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -22,7 +23,14 @@ func TestChannelQueue(t *testing.T) { nilFn := func(_ context.Context, _ func()) {} - queue, err := NewChannelQueue(handle, ChannelQueueConfiguration{QueueLength: 20, Workers: 1}, &testData{}) + queue, err := NewChannelQueue(handle, + ChannelQueueConfiguration{ + QueueLength: 20, + Workers: 1, + BlockTimeout: 1 * time.Second, + BoostTimeout: 5 * time.Minute, + BoostWorkers: 5, + }, &testData{}) assert.NoError(t, err) go queue.Run(nilFn, nilFn) @@ -36,3 +44,46 @@ func TestChannelQueue(t *testing.T) { err = queue.Push(test1) assert.Error(t, err) } + +func TestChannelQueue_Batch(t *testing.T) { + handleChan := make(chan *testData) + handle := func(data ...Data) { + assert.True(t, len(data) == 2) + for _, datum := range data { + testDatum := datum.(*testData) + handleChan <- testDatum + } + } + + nilFn := func(_ context.Context, _ func()) {} + + queue, err := NewChannelQueue(handle, + ChannelQueueConfiguration{ + QueueLength: 20, + BatchLength: 2, + Workers: 1, + BlockTimeout: 1 * time.Second, + BoostTimeout: 5 * time.Minute, + BoostWorkers: 5, + }, &testData{}) + assert.NoError(t, err) + + go queue.Run(nilFn, nilFn) + + test1 := testData{"A", 1} + test2 := testData{"B", 2} + + queue.Push(&test1) + go queue.Push(&test2) + + result1 := <-handleChan + assert.Equal(t, test1.TestString, result1.TestString) + assert.Equal(t, test1.TestInt, result1.TestInt) + + result2 := <-handleChan + assert.Equal(t, test2.TestString, result2.TestString) + assert.Equal(t, test2.TestInt, result2.TestInt) + + err = queue.Push(test1) + assert.Error(t, err) +} diff --git a/modules/queue/queue_disk.go b/modules/queue/queue_disk.go index 799bc98046fb4..50e49f3a29ef8 100644 --- a/modules/queue/queue_disk.go +++ b/modules/queue/queue_disk.go @@ -9,7 +9,6 @@ import ( "encoding/json" "fmt" "reflect" - "sync" "time" "code.gitea.io/gitea/modules/log" @@ -22,19 +21,23 @@ const LevelQueueType Type = "level" // LevelQueueConfiguration is the configuration for a LevelQueue type LevelQueueConfiguration struct { - DataDir string - BatchLength int - Workers int + DataDir string + QueueLength int + BatchLength int + Workers int + BlockTimeout time.Duration + BoostTimeout time.Duration + BoostWorkers int } // LevelQueue implements a disk library queue type LevelQueue struct { - handle HandlerFunc - queue *levelqueue.Queue - batchLength int - closed chan struct{} - exemplar interface{} - workers int + pool *WorkerPool + queue *levelqueue.Queue + closed chan struct{} + terminated chan struct{} + exemplar interface{} + workers int } // NewLevelQueue creates a ledis local queue @@ -50,13 +53,25 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) return nil, err } + dataChan := make(chan Data, config.QueueLength) + ctx, cancel := context.WithCancel(context.Background()) + return &LevelQueue{ - handle: handle, - queue: queue, - batchLength: config.BatchLength, - exemplar: exemplar, - closed: make(chan struct{}), - workers: config.Workers, + pool: &WorkerPool{ + baseCtx: ctx, + cancel: cancel, + batchLength: config.BatchLength, + handle: handle, + dataChan: dataChan, + blockTimeout: config.BlockTimeout, + boostTimeout: config.BoostTimeout, + boostWorkers: config.BoostWorkers, + }, + queue: queue, + exemplar: exemplar, + closed: make(chan struct{}), + terminated: make(chan struct{}), + workers: config.Workers, }, nil } @@ -65,72 +80,66 @@ func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) atShutdown(context.Background(), l.Shutdown) atTerminate(context.Background(), l.Terminate) - wg := sync.WaitGroup{} - for i := 0; i < l.workers; i++ { - wg.Add(1) - go func() { - l.worker() - wg.Done() - }() - } - wg.Wait() + go l.pool.addWorkers(l.pool.baseCtx, l.workers) + + go l.readToChan() + + log.Trace("Waiting til closed") + <-l.closed + + log.Trace("Waiting til done") + l.pool.Wait() + // FIXME: graceful: Needs HammerContext + log.Trace("Waiting til cleaned") + + l.pool.CleanUp(context.TODO()) + log.Trace("cleaned") + } -func (l *LevelQueue) worker() { - var i int - var datas = make([]Data, 0, l.batchLength) +func (l *LevelQueue) readToChan() { for { select { case <-l.closed: - if len(datas) > 0 { - log.Trace("Handling: %d data, %v", len(datas), datas) - l.handle(datas...) - } + // tell the pool to shutdown. + l.pool.cancel() return default: - } - i++ - if len(datas) > l.batchLength || (len(datas) > 0 && i > 3) { - log.Trace("Handling: %d data, %v", len(datas), datas) - l.handle(datas...) - datas = make([]Data, 0, l.batchLength) - i = 0 - continue - } + bs, err := l.queue.RPop() + if err != nil { + if err != levelqueue.ErrNotFound { + log.Error("RPop: %v", err) + } + time.Sleep(time.Millisecond * 100) + continue + } - bs, err := l.queue.RPop() - if err != nil { - if err != levelqueue.ErrNotFound { - log.Error("RPop: %v", err) + if len(bs) == 0 { + time.Sleep(time.Millisecond * 100) + continue } - time.Sleep(time.Millisecond * 100) - continue - } - if len(bs) == 0 { - time.Sleep(time.Millisecond * 100) - continue - } + var data Data + if l.exemplar != nil { + t := reflect.TypeOf(l.exemplar) + n := reflect.New(t) + ne := n.Elem() + err = json.Unmarshal(bs, ne.Addr().Interface()) + data = ne.Interface().(Data) + } else { + err = json.Unmarshal(bs, &data) + } + if err != nil { + log.Error("LevelQueue failed to unmarshal: %v", err) + time.Sleep(time.Millisecond * 10) + continue + } - var data Data - if l.exemplar != nil { - t := reflect.TypeOf(l.exemplar) - n := reflect.New(t) - ne := n.Elem() - err = json.Unmarshal(bs, ne.Addr().Interface()) - data = ne.Interface().(Data) - } else { - err = json.Unmarshal(bs, &data) - } - if err != nil { - log.Error("Unmarshal: %v", err) + log.Trace("LevelQueue: task found: %#v", data) + l.pool.Push(data) time.Sleep(time.Millisecond * 10) - continue - } - log.Trace("LevelQueue: task found: %#v", data) - - datas = append(datas, data) + } } } @@ -163,6 +172,7 @@ func (l *LevelQueue) Shutdown() { // Terminate this queue and close the queue func (l *LevelQueue) Terminate() { + log.Trace("Terminating") l.Shutdown() if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" { log.Error("Error whilst closing internal queue: %v", err) diff --git a/modules/queue/queue_disk_channel.go b/modules/queue/queue_disk_channel.go index 428e104fb5ac9..f3278271527cf 100644 --- a/modules/queue/queue_disk_channel.go +++ b/modules/queue/queue_disk_channel.go @@ -6,8 +6,9 @@ package queue import ( "context" - "sync" "time" + + "code.gitea.io/gitea/modules/log" ) // PersistableChannelQueueType is the type for persistable queue @@ -15,17 +16,20 @@ const PersistableChannelQueueType Type = "persistable-channel" // PersistableChannelQueueConfiguration is the configuration for a PersistableChannelQueue type PersistableChannelQueueConfiguration struct { - DataDir string - BatchLength int - QueueLength int - Timeout time.Duration - MaxAttempts int - Workers int + DataDir string + BatchLength int + QueueLength int + Timeout time.Duration + MaxAttempts int + Workers int + BlockTimeout time.Duration + BoostTimeout time.Duration + BoostWorkers int } // PersistableChannelQueue wraps a channel queue and level queue together type PersistableChannelQueue struct { - *BatchedChannelQueue + *ChannelQueue delayedStarter closed chan struct{} } @@ -39,26 +43,33 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) ( } config := configInterface.(PersistableChannelQueueConfiguration) - batchChannelQueue, err := NewBatchedChannelQueue(handle, BatchedChannelQueueConfiguration{ - QueueLength: config.QueueLength, - BatchLength: config.BatchLength, - Workers: config.Workers, + channelQueue, err := NewChannelQueue(handle, ChannelQueueConfiguration{ + QueueLength: config.QueueLength, + BatchLength: config.BatchLength, + Workers: config.Workers, + BlockTimeout: config.BlockTimeout, + BoostTimeout: config.BoostTimeout, + BoostWorkers: config.BoostWorkers, }, exemplar) if err != nil { return nil, err } - // the level backend only needs one worker to catch up with the previously dropped work + // the level backend only needs temporary workrers to catch up with the previously dropped work levelCfg := LevelQueueConfiguration{ - DataDir: config.DataDir, - BatchLength: config.BatchLength, - Workers: 1, + DataDir: config.DataDir, + QueueLength: config.QueueLength, + BatchLength: config.BatchLength, + Workers: 1, + BlockTimeout: 1 * time.Second, + BoostTimeout: 5 * time.Minute, + BoostWorkers: 5, } levelQueue, err := NewLevelQueue(handle, levelCfg, exemplar) if err == nil { return &PersistableChannelQueue{ - BatchedChannelQueue: batchChannelQueue.(*BatchedChannelQueue), + ChannelQueue: channelQueue.(*ChannelQueue), delayedStarter: delayedStarter{ internal: levelQueue.(*LevelQueue), }, @@ -71,7 +82,7 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) ( } return &PersistableChannelQueue{ - BatchedChannelQueue: batchChannelQueue.(*BatchedChannelQueue), + ChannelQueue: channelQueue.(*ChannelQueue), delayedStarter: delayedStarter{ cfg: levelCfg, underlying: LevelQueueType, @@ -88,7 +99,7 @@ func (p *PersistableChannelQueue) Push(data Data) error { case <-p.closed: return p.internal.Push(data) default: - return p.BatchedChannelQueue.Push(data) + return p.ChannelQueue.Push(data) } } @@ -96,7 +107,7 @@ func (p *PersistableChannelQueue) Push(data Data) error { func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { p.lock.Lock() if p.internal == nil { - p.setInternal(atShutdown, p.handle, p.exemplar) + p.setInternal(atShutdown, p.ChannelQueue.pool.handle, p.exemplar) } else { p.lock.Unlock() } @@ -106,44 +117,16 @@ func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Conte // Just run the level queue - we shut it down later go p.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {}) - wg := sync.WaitGroup{} - for i := 0; i < p.workers; i++ { - wg.Add(1) - go func() { - p.worker() - wg.Done() - }() - } - wg.Wait() -} + go p.ChannelQueue.pool.addWorkers(p.ChannelQueue.pool.baseCtx, p.workers) -func (p *PersistableChannelQueue) worker() { - delay := time.Millisecond * 300 - var datas = make([]Data, 0, p.batchLength) -loop: - for { - select { - case data := <-p.queue: - datas = append(datas, data) - if len(datas) >= p.batchLength { - p.handle(datas...) - datas = make([]Data, 0, p.batchLength) - } - case <-time.After(delay): - delay = time.Millisecond * 100 - if len(datas) > 0 { - p.handle(datas...) - datas = make([]Data, 0, p.batchLength) - } - case <-p.closed: - if len(datas) > 0 { - p.handle(datas...) - } - break loop - } - } + <-p.closed + p.ChannelQueue.pool.cancel() + p.internal.(*LevelQueue).pool.cancel() + p.ChannelQueue.pool.Wait() + p.internal.(*LevelQueue).pool.Wait() + // Redirect all remaining data in the chan to the internal channel go func() { - for data := range p.queue { + for data := range p.ChannelQueue.pool.dataChan { _ = p.internal.Push(data) } }() @@ -154,17 +137,18 @@ func (p *PersistableChannelQueue) Shutdown() { select { case <-p.closed: default: - close(p.closed) p.lock.Lock() defer p.lock.Unlock() if p.internal != nil { p.internal.(*LevelQueue).Shutdown() } + close(p.closed) } } // Terminate this queue and close the queue func (p *PersistableChannelQueue) Terminate() { + log.Trace("Terminating") p.Shutdown() p.lock.Lock() defer p.lock.Unlock() diff --git a/modules/queue/queue_disk_test.go b/modules/queue/queue_disk_test.go index 7033fc6a34a46..b9c6f278ef573 100644 --- a/modules/queue/queue_disk_test.go +++ b/modules/queue/queue_disk_test.go @@ -27,9 +27,13 @@ func TestLevelQueue(t *testing.T) { var queueTerminate func() queue, err := NewLevelQueue(handle, LevelQueueConfiguration{ - DataDir: "level-queue-test-data", - BatchLength: 2, - Workers: 1, + DataDir: "level-queue-test-data", + BatchLength: 2, + Workers: 1, + QueueLength: 20, + BlockTimeout: 1 * time.Second, + BoostTimeout: 5 * time.Minute, + BoostWorkers: 5, }, &testData{}) assert.NoError(t, err) @@ -75,9 +79,13 @@ func TestLevelQueue(t *testing.T) { // Reopen queue queue, err = NewLevelQueue(handle, LevelQueueConfiguration{ - DataDir: "level-queue-test-data", - BatchLength: 2, - Workers: 1, + DataDir: "level-queue-test-data", + BatchLength: 2, + Workers: 1, + QueueLength: 20, + BlockTimeout: 1 * time.Second, + BoostTimeout: 5 * time.Minute, + BoostWorkers: 5, }, &testData{}) assert.NoError(t, err) diff --git a/modules/queue/queue_redis.go b/modules/queue/queue_redis.go index 80ce67233c3cf..acc6feeb95ed4 100644 --- a/modules/queue/queue_redis.go +++ b/modules/queue/queue_redis.go @@ -11,7 +11,6 @@ import ( "fmt" "reflect" "strings" - "sync" "time" "code.gitea.io/gitea/modules/log" @@ -31,23 +30,26 @@ type redisClient interface { // RedisQueue redis queue type RedisQueue struct { - client redisClient - queueName string - handle HandlerFunc - batchLength int - closed chan struct{} - exemplar interface{} - workers int + pool *WorkerPool + client redisClient + queueName string + closed chan struct{} + exemplar interface{} + workers int } // RedisQueueConfiguration is the configuration for the redis queue type RedisQueueConfiguration struct { - Addresses string - Password string - DBIndex int - BatchLength int - QueueName string - Workers int + Addresses string + Password string + DBIndex int + BatchLength int + QueueLength int + QueueName string + Workers int + BlockTimeout time.Duration + BoostTimeout time.Duration + BoostWorkers int } // NewRedisQueue creates single redis or cluster redis queue @@ -59,13 +61,25 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) config := configInterface.(RedisQueueConfiguration) dbs := strings.Split(config.Addresses, ",") + + dataChan := make(chan Data, config.QueueLength) + ctx, cancel := context.WithCancel(context.Background()) + var queue = RedisQueue{ - queueName: config.QueueName, - handle: handle, - batchLength: config.BatchLength, - exemplar: exemplar, - closed: make(chan struct{}), - workers: config.Workers, + pool: &WorkerPool{ + baseCtx: ctx, + cancel: cancel, + batchLength: config.BatchLength, + handle: handle, + dataChan: dataChan, + blockTimeout: config.BlockTimeout, + boostTimeout: config.BoostTimeout, + boostWorkers: config.BoostWorkers, + }, + queueName: config.QueueName, + exemplar: exemplar, + closed: make(chan struct{}), + workers: config.Workers, } if len(dbs) == 0 { return nil, errors.New("no redis host found") @@ -90,79 +104,57 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func())) { atShutdown(context.Background(), r.Shutdown) atTerminate(context.Background(), r.Terminate) - wg := sync.WaitGroup{} - for i := 0; i < r.workers; i++ { - wg.Add(1) - go func() { - r.worker() - wg.Done() - }() - } - wg.Wait() + + go r.pool.addWorkers(r.pool.baseCtx, r.workers) + + go r.readToChan() + + <-r.closed + r.pool.Wait() + // FIXME: graceful: Needs HammerContext + r.pool.CleanUp(context.TODO()) } -func (r *RedisQueue) worker() { - var i int - var datas = make([]Data, 0, r.batchLength) +func (r *RedisQueue) readToChan() { for { select { case <-r.closed: - if len(datas) > 0 { - log.Trace("Handling: %d data, %v", len(datas), datas) - r.handle(datas...) - } + // tell the pool to shutdown + r.pool.cancel() return default: - } - bs, err := r.client.LPop(r.queueName).Bytes() - if err != nil && err != redis.Nil { - log.Error("LPop failed: %v", err) - time.Sleep(time.Millisecond * 100) - continue - } - - i++ - if len(datas) > r.batchLength || (len(datas) > 0 && i > 3) { - log.Trace("Handling: %d data, %v", len(datas), datas) - r.handle(datas...) - datas = make([]Data, 0, r.batchLength) - i = 0 - } - - if len(bs) == 0 { - time.Sleep(time.Millisecond * 100) - continue - } - - var data Data - if r.exemplar != nil { - t := reflect.TypeOf(r.exemplar) - n := reflect.New(t) - ne := n.Elem() - err = json.Unmarshal(bs, ne.Addr().Interface()) - data = ne.Interface().(Data) - } else { - err = json.Unmarshal(bs, &data) - } - if err != nil { - log.Error("Unmarshal: %v", err) - time.Sleep(time.Millisecond * 100) - continue - } + bs, err := r.client.LPop(r.queueName).Bytes() + if err != nil && err != redis.Nil { + log.Error("LPop failed: %v", err) + time.Sleep(time.Millisecond * 100) + continue + } - log.Trace("RedisQueue: task found: %#v", data) + if len(bs) == 0 { + time.Sleep(time.Millisecond * 100) + continue + } - datas = append(datas, data) - select { - case <-r.closed: - if len(datas) > 0 { - log.Trace("Handling: %d data, %v", len(datas), datas) - r.handle(datas...) + var data Data + if r.exemplar != nil { + t := reflect.TypeOf(r.exemplar) + n := reflect.New(t) + ne := n.Elem() + err = json.Unmarshal(bs, ne.Addr().Interface()) + data = ne.Interface().(Data) + } else { + err = json.Unmarshal(bs, &data) } - return - default: + if err != nil { + log.Error("Unmarshal: %v", err) + time.Sleep(time.Millisecond * 100) + continue + } + + log.Trace("RedisQueue: task found: %#v", data) + r.pool.Push(data) + time.Sleep(time.Millisecond * 10) } - time.Sleep(time.Millisecond * 100) } } diff --git a/modules/queue/workerpool.go b/modules/queue/workerpool.go new file mode 100644 index 0000000000000..02e053a427be9 --- /dev/null +++ b/modules/queue/workerpool.go @@ -0,0 +1,239 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "sync" + "time" + + "code.gitea.io/gitea/modules/log" +) + +// WorkerPool takes +type WorkerPool struct { + lock sync.Mutex + baseCtx context.Context + cancel context.CancelFunc + cond *sync.Cond + numberOfWorkers int + batchLength int + handle HandlerFunc + dataChan chan Data + blockTimeout time.Duration + boostTimeout time.Duration + boostWorkers int +} + +// Push pushes the data to the internal channel +func (p *WorkerPool) Push(data Data) { + p.lock.Lock() + if p.blockTimeout > 0 && p.boostTimeout > 0 { + p.lock.Unlock() + p.pushBoost(data) + } else { + p.lock.Unlock() + p.dataChan <- data + } +} + +func (p *WorkerPool) pushBoost(data Data) { + select { + case p.dataChan <- data: + default: + p.lock.Lock() + if p.blockTimeout <= 0 { + p.lock.Unlock() + p.dataChan <- data + return + } + ourTimeout := p.blockTimeout + timer := time.NewTimer(p.blockTimeout) + p.lock.Unlock() + select { + case p.dataChan <- data: + if timer.Stop() { + select { + case <-timer.C: + default: + } + } + case <-timer.C: + p.lock.Lock() + if p.blockTimeout > ourTimeout { + p.lock.Unlock() + p.dataChan <- data + return + } + p.blockTimeout *= 2 + log.Warn("Worker Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout) + ctx, cancel := context.WithCancel(p.baseCtx) + go func() { + <-time.After(p.boostTimeout) + cancel() + p.lock.Lock() + p.blockTimeout /= 2 + p.lock.Unlock() + }() + p.addWorkers(ctx, p.boostWorkers) + p.lock.Unlock() + p.dataChan <- data + } + } +} + +// NumberOfWorkers returns the number of current workers in the pool +func (p *WorkerPool) NumberOfWorkers() int { + p.lock.Lock() + defer p.lock.Unlock() + return p.numberOfWorkers +} + +// AddWorkers adds workers to the pool +func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.CancelFunc { + var ctx context.Context + var cancel context.CancelFunc + if timeout > 0 { + ctx, cancel = context.WithTimeout(p.baseCtx, timeout) + } else { + ctx, cancel = context.WithCancel(p.baseCtx) + } + + p.addWorkers(ctx, number) + return cancel +} + +// addWorkers adds workers to the pool +func (p *WorkerPool) addWorkers(ctx context.Context, number int) { + for i := 0; i < number; i++ { + p.lock.Lock() + if p.cond == nil { + p.cond = sync.NewCond(&p.lock) + } + p.numberOfWorkers++ + p.lock.Unlock() + go func() { + p.doWork(ctx) + + p.lock.Lock() + p.numberOfWorkers-- + if p.numberOfWorkers <= 0 { + // numberOfWorkers can't go negative but... + p.numberOfWorkers = 0 + p.cond.Broadcast() + } + p.lock.Unlock() + }() + } +} + +// Wait for WorkerPool to finish +func (p *WorkerPool) Wait() { + p.lock.Lock() + defer p.lock.Unlock() + if p.cond == nil { + p.cond = sync.NewCond(&p.lock) + } + if p.numberOfWorkers <= 0 { + return + } + p.cond.Wait() +} + +// CleanUp will drain the remaining contents of the channel +// This should be called after AddWorkers context is closed +func (p *WorkerPool) CleanUp(ctx context.Context) { + log.Trace("CleanUp") + close(p.dataChan) + for data := range p.dataChan { + p.handle(data) + select { + case <-ctx.Done(): + log.Warn("Cleanup context closed before finishing clean-up") + return + default: + } + } + log.Trace("CleanUp done") +} + +func (p *WorkerPool) doWork(ctx context.Context) { + delay := time.Millisecond * 300 + var data = make([]Data, 0, p.batchLength) + for { + select { + case <-ctx.Done(): + if len(data) > 0 { + log.Trace("Handling: %d data, %v", len(data), data) + p.handle(data...) + } + log.Trace("Worker shutting down") + return + case datum, ok := <-p.dataChan: + if !ok { + // the dataChan has been closed - we should finish up: + if len(data) > 0 { + log.Trace("Handling: %d data, %v", len(data), data) + p.handle(data...) + } + log.Trace("Worker shutting down") + return + } + data = append(data, datum) + if len(data) >= p.batchLength { + log.Trace("Handling: %d data, %v", len(data), data) + p.handle(data...) + data = make([]Data, 0, p.batchLength) + } + default: + timer := time.NewTimer(delay) + select { + case <-ctx.Done(): + if timer.Stop() { + select { + case <-timer.C: + default: + } + } + if len(data) > 0 { + log.Trace("Handling: %d data, %v", len(data), data) + p.handle(data...) + } + log.Trace("Worker shutting down") + return + case datum, ok := <-p.dataChan: + if timer.Stop() { + select { + case <-timer.C: + default: + } + } + if !ok { + // the dataChan has been closed - we should finish up: + if len(data) > 0 { + log.Trace("Handling: %d data, %v", len(data), data) + p.handle(data...) + } + log.Trace("Worker shutting down") + return + } + data = append(data, datum) + if len(data) >= p.batchLength { + log.Trace("Handling: %d data, %v", len(data), data) + p.handle(data...) + data = make([]Data, 0, p.batchLength) + } + case <-timer.C: + delay = time.Millisecond * 100 + if len(data) > 0 { + log.Trace("Handling: %d data, %v", len(data), data) + p.handle(data...) + data = make([]Data, 0, p.batchLength) + } + + } + } + } +} diff --git a/modules/setting/queue.go b/modules/setting/queue.go index 1a33e232c3348..b619c9855a722 100644 --- a/modules/setting/queue.go +++ b/modules/setting/queue.go @@ -28,6 +28,9 @@ type queueSettings struct { MaxAttempts int Timeout time.Duration Workers int + BlockTimeout time.Duration + BoostTimeout time.Duration + BoostWorkers int } // Queue settings @@ -45,6 +48,9 @@ func CreateQueue(name string, handle queue.HandlerFunc, exemplar interface{}) qu opts["DBIndex"] = q.DBIndex opts["QueueName"] = name opts["Workers"] = q.Workers + opts["BlockTimeout"] = q.BlockTimeout + opts["BoostTimeout"] = q.BoostTimeout + opts["BoostWorkers"] = q.BoostWorkers cfg, err := json.Marshal(opts) if err != nil { @@ -96,7 +102,10 @@ func getQueueSettings(name string) queueSettings { q.WrapIfNecessary = sec.Key("WRAP_IF_NECESSARY").MustBool(Queue.WrapIfNecessary) q.MaxAttempts = sec.Key("MAX_ATTEMPTS").MustInt(Queue.MaxAttempts) q.Timeout = sec.Key("TIMEOUT").MustDuration(Queue.Timeout) - q.Workers = sec.Key("WORKER").MustInt(Queue.Workers) + q.Workers = sec.Key("WORKERS").MustInt(Queue.Workers) + q.BlockTimeout = sec.Key("BLOCK_TIMEOUT").MustDuration(Queue.BlockTimeout) + q.BoostTimeout = sec.Key("BOOST_TIMEOUT").MustDuration(Queue.BoostTimeout) + q.BoostWorkers = sec.Key("BOOST_WORKERS").MustInt(Queue.BoostWorkers) q.Addresses, q.Password, q.DBIndex, _ = ParseQueueConnStr(q.ConnectionString) return q @@ -117,7 +126,10 @@ func newQueueService() { Queue.WrapIfNecessary = sec.Key("WRAP_IF_NECESSARY").MustBool(true) Queue.MaxAttempts = sec.Key("MAX_ATTEMPTS").MustInt(10) Queue.Timeout = sec.Key("TIMEOUT").MustDuration(GracefulHammerTime + 30*time.Second) - Queue.Workers = sec.Key("WORKER").MustInt(1) + Queue.Workers = sec.Key("WORKERS").MustInt(1) + Queue.BlockTimeout = sec.Key("BLOCK_TIMEOUT").MustDuration(1 * time.Second) + Queue.BoostTimeout = sec.Key("BOOST_TIMEOUT").MustDuration(5 * time.Minute) + Queue.BoostWorkers = sec.Key("BOOST_WORKERS").MustInt(5) hasWorkers := false for _, key := range Cfg.Section("queue.notification").Keys() { From d04773feb9e124a4e3e766105b19688c60bf79cc Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Sat, 7 Dec 2019 16:46:36 +0000 Subject: [PATCH 05/21] Queue: Add name variable to queues --- modules/queue/queue_channel.go | 7 +++++-- modules/queue/queue_disk.go | 23 +++++++++++++---------- modules/queue/queue_disk_channel.go | 13 ++++++++++++- modules/queue/queue_redis.go | 13 +++++++++---- modules/queue/queue_wrapped.go | 11 ++++++++--- modules/setting/queue.go | 6 +++++- 6 files changed, 52 insertions(+), 21 deletions(-) diff --git a/modules/queue/queue_channel.go b/modules/queue/queue_channel.go index ebcf22ef7932c..90ec52347def0 100644 --- a/modules/queue/queue_channel.go +++ b/modules/queue/queue_channel.go @@ -24,6 +24,7 @@ type ChannelQueueConfiguration struct { BlockTimeout time.Duration BoostTimeout time.Duration BoostWorkers int + Name string } // ChannelQueue implements @@ -31,6 +32,7 @@ type ChannelQueue struct { pool *WorkerPool exemplar interface{} workers int + name string } // NewChannelQueue create a memory channel queue @@ -59,16 +61,17 @@ func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro }, exemplar: exemplar, workers: config.Workers, + name: config.Name, }, nil } // Run starts to run the queue func (c *ChannelQueue) Run(atShutdown, atTerminate func(context.Context, func())) { atShutdown(context.Background(), func() { - log.Warn("ChannelQueue is not shutdownable!") + log.Warn("ChannelQueue: %s is not shutdownable!", c.name) }) atTerminate(context.Background(), func() { - log.Warn("ChannelQueue is not terminatable!") + log.Warn("ChannelQueue: %s is not terminatable!", c.name) }) c.pool.addWorkers(c.pool.baseCtx, c.workers) } diff --git a/modules/queue/queue_disk.go b/modules/queue/queue_disk.go index 50e49f3a29ef8..cb95b96119024 100644 --- a/modules/queue/queue_disk.go +++ b/modules/queue/queue_disk.go @@ -28,6 +28,7 @@ type LevelQueueConfiguration struct { BlockTimeout time.Duration BoostTimeout time.Duration BoostWorkers int + Name string } // LevelQueue implements a disk library queue @@ -38,6 +39,7 @@ type LevelQueue struct { terminated chan struct{} exemplar interface{} workers int + name string } // NewLevelQueue creates a ledis local queue @@ -72,6 +74,7 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) closed: make(chan struct{}), terminated: make(chan struct{}), workers: config.Workers, + name: config.Name, }, nil } @@ -84,16 +87,16 @@ func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) go l.readToChan() - log.Trace("Waiting til closed") + log.Trace("%s Waiting til closed", l.name) <-l.closed - log.Trace("Waiting til done") + log.Trace("%s Waiting til done", l.name) l.pool.Wait() // FIXME: graceful: Needs HammerContext - log.Trace("Waiting til cleaned") + log.Trace("%s Waiting til cleaned", l.name) l.pool.CleanUp(context.TODO()) - log.Trace("cleaned") + log.Trace("%s cleaned", l.name) } @@ -108,7 +111,7 @@ func (l *LevelQueue) readToChan() { bs, err := l.queue.RPop() if err != nil { if err != levelqueue.ErrNotFound { - log.Error("RPop: %v", err) + log.Error("%s RPop: %v", l.name, err) } time.Sleep(time.Millisecond * 100) continue @@ -130,12 +133,12 @@ func (l *LevelQueue) readToChan() { err = json.Unmarshal(bs, &data) } if err != nil { - log.Error("LevelQueue failed to unmarshal: %v", err) + log.Error("LevelQueue: %s failed to unmarshal: %v", l.name, err) time.Sleep(time.Millisecond * 10) continue } - log.Trace("LevelQueue: task found: %#v", data) + log.Trace("LevelQueue %s: task found: %#v", l.name, data) l.pool.Push(data) time.Sleep(time.Millisecond * 10) @@ -163,6 +166,7 @@ func (l *LevelQueue) Push(data Data) error { // Shutdown this queue and stop processing func (l *LevelQueue) Shutdown() { + log.Trace("Shutdown: %s", l.name) select { case <-l.closed: default: @@ -172,12 +176,11 @@ func (l *LevelQueue) Shutdown() { // Terminate this queue and close the queue func (l *LevelQueue) Terminate() { - log.Trace("Terminating") + log.Trace("Terminating: %s", l.name) l.Shutdown() if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" { - log.Error("Error whilst closing internal queue: %v", err) + log.Error("Error whilst closing internal queue in %s: %v", l.name, err) } - } func init() { diff --git a/modules/queue/queue_disk_channel.go b/modules/queue/queue_disk_channel.go index f3278271527cf..fc186b3bb985d 100644 --- a/modules/queue/queue_disk_channel.go +++ b/modules/queue/queue_disk_channel.go @@ -16,6 +16,7 @@ const PersistableChannelQueueType Type = "persistable-channel" // PersistableChannelQueueConfiguration is the configuration for a PersistableChannelQueue type PersistableChannelQueueConfiguration struct { + Name string DataDir string BatchLength int QueueLength int @@ -50,6 +51,7 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) ( BlockTimeout: config.BlockTimeout, BoostTimeout: config.BoostTimeout, BoostWorkers: config.BoostWorkers, + Name: config.Name + "-channel", }, exemplar) if err != nil { return nil, err @@ -64,6 +66,7 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) ( BlockTimeout: 1 * time.Second, BoostTimeout: 5 * time.Minute, BoostWorkers: 5, + Name: config.Name + "-level", } levelQueue, err := NewLevelQueue(handle, levelCfg, exemplar) @@ -72,6 +75,7 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) ( ChannelQueue: channelQueue.(*ChannelQueue), delayedStarter: delayedStarter{ internal: levelQueue.(*LevelQueue), + name: config.Name, }, closed: make(chan struct{}), }, nil @@ -88,11 +92,17 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) ( underlying: LevelQueueType, timeout: config.Timeout, maxAttempts: config.MaxAttempts, + name: config.Name, }, closed: make(chan struct{}), }, nil } +// Name returns the name of this queue +func (p *PersistableChannelQueue) Name() string { + return p.delayedStarter.name +} + // Push will push the indexer data to queue func (p *PersistableChannelQueue) Push(data Data) error { select { @@ -134,6 +144,7 @@ func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Conte // Shutdown processing this queue func (p *PersistableChannelQueue) Shutdown() { + log.Trace("Shutdown: %s", p.delayedStarter.name) select { case <-p.closed: default: @@ -148,7 +159,7 @@ func (p *PersistableChannelQueue) Shutdown() { // Terminate this queue and close the queue func (p *PersistableChannelQueue) Terminate() { - log.Trace("Terminating") + log.Trace("Terminating: %s", p.delayedStarter.name) p.Shutdown() p.lock.Lock() defer p.lock.Unlock() diff --git a/modules/queue/queue_redis.go b/modules/queue/queue_redis.go index acc6feeb95ed4..ebcba683cb157 100644 --- a/modules/queue/queue_redis.go +++ b/modules/queue/queue_redis.go @@ -36,6 +36,7 @@ type RedisQueue struct { closed chan struct{} exemplar interface{} workers int + name string } // RedisQueueConfiguration is the configuration for the redis queue @@ -50,6 +51,7 @@ type RedisQueueConfiguration struct { BlockTimeout time.Duration BoostTimeout time.Duration BoostWorkers int + Name string } // NewRedisQueue creates single redis or cluster redis queue @@ -80,6 +82,7 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) exemplar: exemplar, closed: make(chan struct{}), workers: config.Workers, + name: config.Name, } if len(dbs) == 0 { return nil, errors.New("no redis host found") @@ -125,7 +128,7 @@ func (r *RedisQueue) readToChan() { default: bs, err := r.client.LPop(r.queueName).Bytes() if err != nil && err != redis.Nil { - log.Error("LPop failed: %v", err) + log.Error("RedisQueue: %s LPop failed: %v", r.name, err) time.Sleep(time.Millisecond * 100) continue } @@ -146,12 +149,12 @@ func (r *RedisQueue) readToChan() { err = json.Unmarshal(bs, &data) } if err != nil { - log.Error("Unmarshal: %v", err) + log.Error("RedisQueue: %s Unmarshal: %v", r.name, err) time.Sleep(time.Millisecond * 100) continue } - log.Trace("RedisQueue: task found: %#v", data) + log.Trace("RedisQueue: %s task found: %#v", r.name, data) r.pool.Push(data) time.Sleep(time.Millisecond * 10) } @@ -178,6 +181,7 @@ func (r *RedisQueue) Push(data Data) error { // Shutdown processing from this queue func (r *RedisQueue) Shutdown() { + log.Trace("Shutdown: %s", r.name) select { case <-r.closed: default: @@ -187,9 +191,10 @@ func (r *RedisQueue) Shutdown() { // Terminate this queue and close the queue func (r *RedisQueue) Terminate() { + log.Trace("Terminating: %s", r.name) r.Shutdown() if err := r.client.Close(); err != nil { - log.Error("Error whilst closing internal redis client: %v", err) + log.Error("Error whilst closing internal redis client in %s: %v", r.name, err) } } diff --git a/modules/queue/queue_wrapped.go b/modules/queue/queue_wrapped.go index f99675a9f913c..229332734802a 100644 --- a/modules/queue/queue_wrapped.go +++ b/modules/queue/queue_wrapped.go @@ -24,6 +24,7 @@ type WrappedQueueConfiguration struct { MaxAttempts int Config interface{} QueueLength int + Name string } type delayedStarter struct { @@ -33,6 +34,7 @@ type delayedStarter struct { cfg interface{} timeout time.Duration maxAttempts int + name string } func (q *delayedStarter) setInternal(atShutdown func(context.Context, func()), handle HandlerFunc, exemplar interface{}) { @@ -55,7 +57,7 @@ func (q *delayedStarter) setInternal(atShutdown func(context.Context, func()), h select { case <-ctx.Done(): q.lock.Unlock() - log.Fatal("Timedout creating queue %v with cfg %v ", q.underlying, q.cfg) + log.Fatal("Timedout creating queue %v with cfg %v in %s", q.underlying, q.cfg, q.name) default: queue, err := CreateQueue(q.underlying, handle, q.cfg, exemplar) if err == nil { @@ -64,12 +66,12 @@ func (q *delayedStarter) setInternal(atShutdown func(context.Context, func()), h break } if err.Error() != "resource temporarily unavailable" { - log.Warn("[Attempt: %d] Failed to create queue: %v cfg: %v error: %v", i, q.underlying, q.cfg, err) + log.Warn("[Attempt: %d] Failed to create queue: %v for %s cfg: %v error: %v", i, q.underlying, q.name, q.cfg, err) } i++ if q.maxAttempts > 0 && i > q.maxAttempts { q.lock.Unlock() - log.Fatal("Unable to create queue %v with cfg %v by max attempts: error: %v", q.underlying, q.cfg, err) + log.Fatal("Unable to create queue %v for %s with cfg %v by max attempts: error: %v", q.underlying, q.name, q.cfg, err) } sleepTime := 100 * time.Millisecond if q.timeout > 0 && q.maxAttempts > 0 { @@ -118,6 +120,7 @@ func NewWrappedQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro underlying: config.Underlying, timeout: config.Timeout, maxAttempts: config.MaxAttempts, + name: config.Name, }, }, nil } @@ -156,6 +159,7 @@ func (q *WrappedQueue) Run(atShutdown, atTerminate func(context.Context, func()) // Shutdown this queue and stop processing func (q *WrappedQueue) Shutdown() { + log.Trace("Shutdown: %s", q.name) q.lock.Lock() defer q.lock.Unlock() if q.internal == nil { @@ -168,6 +172,7 @@ func (q *WrappedQueue) Shutdown() { // Terminate this queue and close the queue func (q *WrappedQueue) Terminate() { + log.Trace("Terminating: %s", q.name) q.lock.Lock() defer q.lock.Unlock() if q.internal == nil { diff --git a/modules/setting/queue.go b/modules/setting/queue.go index b619c9855a722..0066d5a9467a3 100644 --- a/modules/setting/queue.go +++ b/modules/setting/queue.go @@ -23,6 +23,7 @@ type queueSettings struct { Type string Addresses string Password string + QueueName string DBIndex int WrapIfNecessary bool MaxAttempts int @@ -40,13 +41,14 @@ var Queue = queueSettings{} func CreateQueue(name string, handle queue.HandlerFunc, exemplar interface{}) queue.Queue { q := getQueueSettings(name) opts := make(map[string]interface{}) + opts["Name"] = name opts["QueueLength"] = q.Length opts["BatchLength"] = q.BatchLength opts["DataDir"] = q.DataDir opts["Addresses"] = q.Addresses opts["Password"] = q.Password opts["DBIndex"] = q.DBIndex - opts["QueueName"] = name + opts["QueueName"] = q.QueueName opts["Workers"] = q.Workers opts["BlockTimeout"] = q.BlockTimeout opts["BoostTimeout"] = q.BoostTimeout @@ -106,6 +108,7 @@ func getQueueSettings(name string) queueSettings { q.BlockTimeout = sec.Key("BLOCK_TIMEOUT").MustDuration(Queue.BlockTimeout) q.BoostTimeout = sec.Key("BOOST_TIMEOUT").MustDuration(Queue.BoostTimeout) q.BoostWorkers = sec.Key("BOOST_WORKERS").MustInt(Queue.BoostWorkers) + q.QueueName = sec.Key("QUEUE_NAME").MustString(Queue.QueueName) q.Addresses, q.Password, q.DBIndex, _ = ParseQueueConnStr(q.ConnectionString) return q @@ -130,6 +133,7 @@ func newQueueService() { Queue.BlockTimeout = sec.Key("BLOCK_TIMEOUT").MustDuration(1 * time.Second) Queue.BoostTimeout = sec.Key("BOOST_TIMEOUT").MustDuration(5 * time.Minute) Queue.BoostWorkers = sec.Key("BOOST_WORKERS").MustInt(5) + Queue.QueueName = sec.Key("QUEUE_NAME").MustString(Queue.QueueName) hasWorkers := false for _, key := range Cfg.Section("queue.notification").Keys() { From ae2f408fcddad7d9e358e458f72770e72cabfe1b Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Sat, 7 Dec 2019 16:48:21 +0000 Subject: [PATCH 06/21] Queue: Add monitoring --- modules/queue/manager.go | 211 ++++++++++++++++++++++++++++ modules/queue/queue.go | 5 + modules/queue/queue_channel.go | 15 +- modules/queue/queue_disk.go | 21 ++- modules/queue/queue_disk_channel.go | 10 +- modules/queue/queue_redis.go | 15 +- modules/queue/queue_wrapped.go | 11 +- modules/queue/workerpool.go | 30 +++- options/locale/locale_en-US.ini | 28 ++++ routers/admin/admin.go | 59 ++++++++ routers/routes/routes.go | 11 +- templates/admin/monitor.tmpl | 28 ++++ templates/admin/queue.tmpl | 117 +++++++++++++++ 13 files changed, 541 insertions(+), 20 deletions(-) create mode 100644 modules/queue/manager.go create mode 100644 templates/admin/queue.tmpl diff --git a/modules/queue/manager.go b/modules/queue/manager.go new file mode 100644 index 0000000000000..100780c706286 --- /dev/null +++ b/modules/queue/manager.go @@ -0,0 +1,211 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package queue + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "sort" + "sync" + "time" +) + +var manager *Manager + +// Manager is a queue manager +type Manager struct { + mutex sync.Mutex + + counter int64 + Queues map[int64]*Description +} + +// Description represents a working queue inheriting from Gitea. +type Description struct { + mutex sync.Mutex + QID int64 + Queue Queue + Type Type + Name string + Configuration interface{} + ExemplarType string + addWorkers func(number int, timeout time.Duration) context.CancelFunc + numberOfWorkers func() int + counter int64 + PoolWorkers map[int64]*PoolWorkers +} + +// DescriptionList implements the sort.Interface +type DescriptionList []*Description + +// PoolWorkers represents a working queue inheriting from Gitea. +type PoolWorkers struct { + PID int64 + Workers int + Start time.Time + Timeout time.Time + HasTimeout bool + Cancel context.CancelFunc +} + +// PoolWorkersList implements the sort.Interface +type PoolWorkersList []*PoolWorkers + +func init() { + _ = GetManager() +} + +// GetManager returns a Manager and initializes one as singleton if there's none yet +func GetManager() *Manager { + if manager == nil { + manager = &Manager{ + Queues: make(map[int64]*Description), + } + } + return manager +} + +// Add adds a queue to this manager +func (m *Manager) Add(queue Queue, + t Type, + configuration, + exemplar interface{}, + addWorkers func(number int, timeout time.Duration) context.CancelFunc, + numberOfWorkers func() int) int64 { + + cfg, _ := json.Marshal(configuration) + desc := &Description{ + Queue: queue, + Type: t, + Configuration: string(cfg), + ExemplarType: reflect.TypeOf(exemplar).String(), + PoolWorkers: make(map[int64]*PoolWorkers), + addWorkers: addWorkers, + numberOfWorkers: numberOfWorkers, + } + m.mutex.Lock() + m.counter++ + desc.QID = m.counter + desc.Name = fmt.Sprintf("queue-%d", desc.QID) + if named, ok := queue.(Named); ok { + desc.Name = named.Name() + } + m.Queues[desc.QID] = desc + m.mutex.Unlock() + return desc.QID +} + +// Remove a queue from the Manager +func (m *Manager) Remove(qid int64) { + m.mutex.Lock() + delete(m.Queues, qid) + m.mutex.Unlock() +} + +// GetDescription by qid +func (m *Manager) GetDescription(qid int64) *Description { + m.mutex.Lock() + defer m.mutex.Unlock() + return m.Queues[qid] +} + +// Descriptions returns the queue descriptions +func (m *Manager) Descriptions() []*Description { + m.mutex.Lock() + descs := make([]*Description, 0, len(m.Queues)) + for _, desc := range m.Queues { + descs = append(descs, desc) + } + m.mutex.Unlock() + sort.Sort(DescriptionList(descs)) + return descs +} + +// Workers returns the poolworkers +func (q *Description) Workers() []*PoolWorkers { + q.mutex.Lock() + workers := make([]*PoolWorkers, 0, len(q.PoolWorkers)) + for _, worker := range q.PoolWorkers { + workers = append(workers, worker) + } + q.mutex.Unlock() + sort.Sort(PoolWorkersList(workers)) + return workers +} + +// RegisterWorkers registers workers to this queue +func (q *Description) RegisterWorkers(number int, start time.Time, hasTimeout bool, timeout time.Time, cancel context.CancelFunc) int64 { + q.mutex.Lock() + defer q.mutex.Unlock() + q.counter++ + q.PoolWorkers[q.counter] = &PoolWorkers{ + PID: q.counter, + Workers: number, + Start: start, + Timeout: timeout, + HasTimeout: hasTimeout, + Cancel: cancel, + } + return q.counter +} + +// CancelWorkers cancels pooled workers with pid +func (q *Description) CancelWorkers(pid int64) { + q.mutex.Lock() + pw, ok := q.PoolWorkers[pid] + q.mutex.Unlock() + if !ok { + return + } + pw.Cancel() +} + +// RemoveWorkers deletes pooled workers with pid +func (q *Description) RemoveWorkers(pid int64) { + q.mutex.Lock() + delete(q.PoolWorkers, pid) + q.mutex.Unlock() +} + +// AddWorkers adds workers to the queue if it has registered an add worker function +func (q *Description) AddWorkers(number int, timeout time.Duration) { + if q.addWorkers != nil { + _ = q.addWorkers(number, timeout) + } +} + +// NumberOfWorkers returns the number of workers in the queue +func (q *Description) NumberOfWorkers() int { + if q.numberOfWorkers != nil { + return q.numberOfWorkers() + } + return -1 +} + +func (l DescriptionList) Len() int { + return len(l) +} + +func (l DescriptionList) Less(i, j int) bool { + return l[i].Name < l[j].Name +} + +func (l DescriptionList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l PoolWorkersList) Len() int { + return len(l) +} + +func (l PoolWorkersList) Less(i, j int) bool { + return l[i].Start.Before(l[j].Start) +} + +func (l PoolWorkersList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/modules/queue/queue.go b/modules/queue/queue.go index 1220db5c03bbc..464e16dab130d 100644 --- a/modules/queue/queue.go +++ b/modules/queue/queue.go @@ -48,6 +48,11 @@ type Shutdownable interface { Terminate() } +// Named represents a queue with a name +type Named interface { + Name() string +} + // Queue defines an interface to save an issue indexer queue type Queue interface { Run(atShutdown, atTerminate func(context.Context, func())) diff --git a/modules/queue/queue_channel.go b/modules/queue/queue_channel.go index 90ec52347def0..265a5c88f10e1 100644 --- a/modules/queue/queue_channel.go +++ b/modules/queue/queue_channel.go @@ -48,7 +48,7 @@ func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro dataChan := make(chan Data, config.QueueLength) ctx, cancel := context.WithCancel(context.Background()) - return &ChannelQueue{ + queue := &ChannelQueue{ pool: &WorkerPool{ baseCtx: ctx, cancel: cancel, @@ -62,7 +62,9 @@ func NewChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro exemplar: exemplar, workers: config.Workers, name: config.Name, - }, nil + } + queue.pool.qid = GetManager().Add(queue, ChannelQueueType, config, exemplar, queue.pool.AddWorkers, queue.pool.NumberOfWorkers) + return queue, nil } // Run starts to run the queue @@ -73,7 +75,9 @@ func (c *ChannelQueue) Run(atShutdown, atTerminate func(context.Context, func()) atTerminate(context.Background(), func() { log.Warn("ChannelQueue: %s is not terminatable!", c.name) }) - c.pool.addWorkers(c.pool.baseCtx, c.workers) + go func() { + _ = c.pool.AddWorkers(c.workers, 0) + }() } // Push will push the indexer data to queue @@ -90,6 +94,11 @@ func (c *ChannelQueue) Push(data Data) error { return nil } +// Name returns the name of this queue +func (c *ChannelQueue) Name() string { + return c.name +} + func init() { queuesMap[ChannelQueueType] = NewChannelQueue } diff --git a/modules/queue/queue_disk.go b/modules/queue/queue_disk.go index cb95b96119024..f18f3c5f8edc0 100644 --- a/modules/queue/queue_disk.go +++ b/modules/queue/queue_disk.go @@ -50,7 +50,7 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) } config := configInterface.(LevelQueueConfiguration) - queue, err := levelqueue.Open(config.DataDir) + internal, err := levelqueue.Open(config.DataDir) if err != nil { return nil, err } @@ -58,7 +58,7 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) dataChan := make(chan Data, config.QueueLength) ctx, cancel := context.WithCancel(context.Background()) - return &LevelQueue{ + queue := &LevelQueue{ pool: &WorkerPool{ baseCtx: ctx, cancel: cancel, @@ -69,13 +69,15 @@ func NewLevelQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) boostTimeout: config.BoostTimeout, boostWorkers: config.BoostWorkers, }, - queue: queue, + queue: internal, exemplar: exemplar, closed: make(chan struct{}), terminated: make(chan struct{}), workers: config.Workers, name: config.Name, - }, nil + } + queue.pool.qid = GetManager().Add(queue, LevelQueueType, config, exemplar, queue.pool.AddWorkers, queue.pool.NumberOfWorkers) + return queue, nil } // Run starts to run the queue @@ -83,7 +85,9 @@ func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) atShutdown(context.Background(), l.Shutdown) atTerminate(context.Background(), l.Terminate) - go l.pool.addWorkers(l.pool.baseCtx, l.workers) + go func() { + _ = l.pool.AddWorkers(l.workers, 0) + }() go l.readToChan() @@ -140,7 +144,7 @@ func (l *LevelQueue) readToChan() { log.Trace("LevelQueue %s: task found: %#v", l.name, data) l.pool.Push(data) - time.Sleep(time.Millisecond * 10) + time.Sleep(time.Millisecond * 100) } } @@ -183,6 +187,11 @@ func (l *LevelQueue) Terminate() { } } +// Name returns the name of this queue +func (l *LevelQueue) Name() string { + return l.name +} + func init() { queuesMap[LevelQueueType] = NewLevelQueue } diff --git a/modules/queue/queue_disk_channel.go b/modules/queue/queue_disk_channel.go index fc186b3bb985d..3bf39b9fa5940 100644 --- a/modules/queue/queue_disk_channel.go +++ b/modules/queue/queue_disk_channel.go @@ -85,7 +85,7 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) ( return nil, ErrInvalidConfiguration{cfg: cfg} } - return &PersistableChannelQueue{ + queue := &PersistableChannelQueue{ ChannelQueue: channelQueue.(*ChannelQueue), delayedStarter: delayedStarter{ cfg: levelCfg, @@ -95,7 +95,9 @@ func NewPersistableChannelQueue(handle HandlerFunc, cfg, exemplar interface{}) ( name: config.Name, }, closed: make(chan struct{}), - }, nil + } + _ = GetManager().Add(queue, PersistableChannelQueueType, config, exemplar, nil, nil) + return queue, nil } // Name returns the name of this queue @@ -127,7 +129,9 @@ func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Conte // Just run the level queue - we shut it down later go p.internal.Run(func(_ context.Context, _ func()) {}, func(_ context.Context, _ func()) {}) - go p.ChannelQueue.pool.addWorkers(p.ChannelQueue.pool.baseCtx, p.workers) + go func() { + _ = p.ChannelQueue.pool.AddWorkers(p.workers, 0) + }() <-p.closed p.ChannelQueue.pool.cancel() diff --git a/modules/queue/queue_redis.go b/modules/queue/queue_redis.go index ebcba683cb157..88794428a8577 100644 --- a/modules/queue/queue_redis.go +++ b/modules/queue/queue_redis.go @@ -67,7 +67,7 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) dataChan := make(chan Data, config.QueueLength) ctx, cancel := context.WithCancel(context.Background()) - var queue = RedisQueue{ + var queue = &RedisQueue{ pool: &WorkerPool{ baseCtx: ctx, cancel: cancel, @@ -100,7 +100,9 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) if err := queue.client.Ping().Err(); err != nil { return nil, err } - return &queue, nil + queue.pool.qid = GetManager().Add(queue, RedisQueueType, config, exemplar, queue.pool.AddWorkers, queue.pool.NumberOfWorkers) + + return queue, nil } // Run runs the redis queue @@ -108,7 +110,9 @@ func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func())) atShutdown(context.Background(), r.Shutdown) atTerminate(context.Background(), r.Terminate) - go r.pool.addWorkers(r.pool.baseCtx, r.workers) + go func() { + _ = r.pool.AddWorkers(r.workers, 0) + }() go r.readToChan() @@ -198,6 +202,11 @@ func (r *RedisQueue) Terminate() { } } +// Name returns the name of this queue +func (r *RedisQueue) Name() string { + return r.name +} + func init() { queuesMap[RedisQueueType] = NewRedisQueue } diff --git a/modules/queue/queue_wrapped.go b/modules/queue/queue_wrapped.go index 229332734802a..57f19f63127d4 100644 --- a/modules/queue/queue_wrapped.go +++ b/modules/queue/queue_wrapped.go @@ -111,7 +111,7 @@ func NewWrappedQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro return nil, ErrInvalidConfiguration{cfg: cfg} } - return &WrappedQueue{ + queue = &WrappedQueue{ handle: handle, channel: make(chan Data, config.QueueLength), exemplar: exemplar, @@ -122,7 +122,14 @@ func NewWrappedQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, erro maxAttempts: config.MaxAttempts, name: config.Name, }, - }, nil + } + _ = GetManager().Add(queue, WrappedQueueType, config, exemplar, nil, nil) + return queue, nil +} + +// Name returns the name of the queue +func (q *WrappedQueue) Name() string { + return q.name + "-wrapper" } // Push will push the data to the internal channel checking it against the exemplar diff --git a/modules/queue/workerpool.go b/modules/queue/workerpool.go index 02e053a427be9..bf3a15c00ed3b 100644 --- a/modules/queue/workerpool.go +++ b/modules/queue/workerpool.go @@ -18,6 +18,7 @@ type WorkerPool struct { baseCtx context.Context cancel context.CancelFunc cond *sync.Cond + qid int64 numberOfWorkers int batchLength int handle HandlerFunc @@ -68,8 +69,21 @@ func (p *WorkerPool) pushBoost(data Data) { return } p.blockTimeout *= 2 - log.Warn("Worker Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout) ctx, cancel := context.WithCancel(p.baseCtx) + desc := GetManager().GetDescription(p.qid) + if desc != nil { + log.Warn("Worker Channel for %v blocked for %v - adding %d temporary workers for %s, block timeout now %v", desc.Name, ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout) + + start := time.Now() + pid := desc.RegisterWorkers(p.boostWorkers, start, false, start, cancel) + go func() { + <-ctx.Done() + desc.RemoveWorkers(pid) + cancel() + }() + } else { + log.Warn("Worker Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout) + } go func() { <-time.After(p.boostTimeout) cancel() @@ -95,12 +109,26 @@ func (p *WorkerPool) NumberOfWorkers() int { func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.CancelFunc { var ctx context.Context var cancel context.CancelFunc + start := time.Now() + end := start + hasTimeout := false if timeout > 0 { ctx, cancel = context.WithTimeout(p.baseCtx, timeout) + end = start.Add(timeout) + hasTimeout = true } else { ctx, cancel = context.WithCancel(p.baseCtx) } + desc := GetManager().GetDescription(p.qid) + if desc != nil { + pid := desc.RegisterWorkers(number, start, hasTimeout, end, cancel) + go func() { + <-ctx.Done() + desc.RemoveWorkers(pid) + cancel() + }() + } p.addWorkers(ctx, number) return cancel } diff --git a/options/locale/locale_en-US.ini b/options/locale/locale_en-US.ini index c9416e727a2b8..54a2b630f5b15 100644 --- a/options/locale/locale_en-US.ini +++ b/options/locale/locale_en-US.ini @@ -2021,6 +2021,34 @@ monitor.execute_time = Execution Time monitor.process.cancel = Cancel process monitor.process.cancel_desc = Cancelling a process may cause data loss monitor.process.cancel_notices = Cancel: %s? +monitor.queues = Queues +monitor.queue = Queue: %s +monitor.queue.name = Name +monitor.queue.type = Type +monitor.queue.exemplar = Exemplar Type +monitor.queue.numberworkers = Number of Workers +monitor.queue.review = Review Config +monitor.queue.review_add = Review/Add Workers +monitor.queue.configuration = Initial Configuration +monitor.queue.nopool.title = No Worker Pool +monitor.queue.nopool.desc = This queue wraps other queues and does not itself have a worker pool. +monitor.queue.wrapped.desc = A wrapped queue wraps a slow starting queue, buffering queued requests in a channel. It does not have a worker pool itself. +monitor.queue.persistable-channel.desc = A persistable-channel wraps two queues, a channel queue that has its own worker pool and a level queue for persisted requests from previous shutdowns. It does not have a worker pool itself. +monitor.queue.pool.timeout = Timeout +monitor.queue.pool.addworkers.title = Add Workers +monitor.queue.pool.addworkers.submit = Add Workers +monitor.queue.pool.addworkers.desc = Add Workers to this pool with or without a timeout. If you set a timeout these workers will be removed from the pool after the timeout has lapsed. +monitor.queue.pool.addworkers.numberworkers.placeholder = Number of Workers +monitor.queue.pool.addworkers.timeout.placeholder = Set to 0 for no timeout +monitor.queue.pool.addworkers.mustnumbergreaterzero = Number of Workers to add must be greater than zero +monitor.queue.pool.addworkers.musttimeoutduration = Timeout must be a golang duration eg. 5m or be 0 +monitor.queue.pool.added = Worker Group Added +monitor.queue.pool.workers.title = Active Worker Groups +monitor.queue.pool.workers.none = No worker groups. +monitor.queue.pool.cancel = Shutdown Worker Group +monitor.queue.pool.cancelling = Worker Group shutting down +monitor.queue.pool.cancel_notices = Shutdown this group of %s workers? +monitor.queue.pool.cancel_desc = Leaving a queue without any worker groups may cause requests may block indefinitely. notices.system_notice_list = System Notices notices.view_detail_header = View Notice Details diff --git a/routers/admin/admin.go b/routers/admin/admin.go index ccedcaf8a62e9..7fc57edf312af 100644 --- a/routers/admin/admin.go +++ b/routers/admin/admin.go @@ -22,6 +22,7 @@ import ( "code.gitea.io/gitea/modules/graceful" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/process" + "code.gitea.io/gitea/modules/queue" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/timeutil" "code.gitea.io/gitea/services/mailer" @@ -35,6 +36,7 @@ const ( tplDashboard base.TplName = "admin/dashboard" tplConfig base.TplName = "admin/config" tplMonitor base.TplName = "admin/monitor" + tplQueue base.TplName = "admin/queue" ) var ( @@ -355,6 +357,7 @@ func Monitor(ctx *context.Context) { ctx.Data["PageIsAdminMonitor"] = true ctx.Data["Processes"] = process.GetManager().Processes() ctx.Data["Entries"] = cron.ListTasks() + ctx.Data["Queues"] = queue.GetManager().Descriptions() ctx.HTML(200, tplMonitor) } @@ -366,3 +369,59 @@ func MonitorCancel(ctx *context.Context) { "redirect": ctx.Repo.RepoLink + "/admin/monitor", }) } + +// Queue shows details for a specific queue +func Queue(ctx *context.Context) { + qid := ctx.ParamsInt64("qid") + desc := queue.GetManager().GetDescription(qid) + if desc == nil { + ctx.Status(404) + return + } + ctx.Data["Title"] = ctx.Tr("admin.monitor.queue", desc.Name) + ctx.Data["PageIsAdmin"] = true + ctx.Data["PageIsAdminMonitor"] = true + ctx.Data["Queue"] = desc + ctx.HTML(200, tplQueue) +} + +// WorkerCancel cancels a worker group +func WorkerCancel(ctx *context.Context) { + qid := ctx.ParamsInt64("qid") + desc := queue.GetManager().GetDescription(qid) + if desc == nil { + ctx.Status(404) + return + } + pid := ctx.ParamsInt64("pid") + desc.CancelWorkers(pid) + ctx.Flash.Info(ctx.Tr("admin.monitor.queue.pool.cancelling")) + ctx.JSON(200, map[string]interface{}{ + "redirect": setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid), + }) +} + +// AddWorkers adds workers to a worker group +func AddWorkers(ctx *context.Context) { + qid := ctx.ParamsInt64("qid") + desc := queue.GetManager().GetDescription(qid) + if desc == nil { + ctx.Status(404) + return + } + number := ctx.QueryInt("number") + if number < 1 { + ctx.Flash.Error(ctx.Tr("admin.monitor.queue.pool.addworkers.mustnumbergreaterzero")) + ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid)) + return + } + timeout, err := time.ParseDuration(ctx.Query("timeout")) + if err != nil { + ctx.Flash.Error(ctx.Tr("admin.monitor.queue.pool.addworkers.musttimeoutduration")) + ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid)) + return + } + desc.AddWorkers(number, timeout) + ctx.Flash.Success(ctx.Tr("admin.monitor.queue.pool.added")) + ctx.Redirect(setting.AppSubURL + fmt.Sprintf("/admin/monitor/queue/%d", qid)) +} diff --git a/routers/routes/routes.go b/routers/routes/routes.go index cb4fadbcdb384..c02843b35a73d 100644 --- a/routers/routes/routes.go +++ b/routers/routes/routes.go @@ -429,8 +429,15 @@ func RegisterRoutes(m *macaron.Macaron) { m.Get("", adminReq, admin.Dashboard) m.Get("/config", admin.Config) m.Post("/config/test_mail", admin.SendTestMail) - m.Get("/monitor", admin.Monitor) - m.Post("/monitor/cancel/:pid", admin.MonitorCancel) + m.Group("/monitor", func() { + m.Get("", admin.Monitor) + m.Post("/cancel/:pid", admin.MonitorCancel) + m.Group("/queue/:qid", func() { + m.Get("", admin.Queue) + m.Post("/add", admin.AddWorkers) + m.Post("/cancel/:pid", admin.WorkerCancel) + }) + }) m.Group("/users", func() { m.Get("", admin.Users) diff --git a/templates/admin/monitor.tmpl b/templates/admin/monitor.tmpl index 38402fece2be9..0f9c2150b647e 100644 --- a/templates/admin/monitor.tmpl +++ b/templates/admin/monitor.tmpl @@ -31,6 +31,34 @@ +

+ {{.i18n.Tr "admin.monitor.queues"}} +

+
+ + + + + + + + + + + + {{range .Queues}} + + + + + + + {{end}} + +
{{.i18n.Tr "admin.monitor.queue.name"}}{{.i18n.Tr "admin.monitor.queue.type"}}{{.i18n.Tr "admin.monitor.queue.exemplar"}}{{.i18n.Tr "admin.monitor.queue.numberworkers"}}
{{.Name}}{{.Type}}{{.ExemplarType}}{{$sum := .NumberOfWorkers}}{{if lt $sum 0}}-{{else}}{{$sum}}{{end}}{{if lt $sum 0}}{{$.i18n.Tr "admin.monitor.queue.review"}}{{else}}{{$.i18n.Tr "admin.monitor.queue.review_add"}}{{end}} +
+
+

{{.i18n.Tr "admin.monitor.process"}}

diff --git a/templates/admin/queue.tmpl b/templates/admin/queue.tmpl new file mode 100644 index 0000000000000..ab8422824361e --- /dev/null +++ b/templates/admin/queue.tmpl @@ -0,0 +1,117 @@ +{{template "base/head" .}} +
+ {{template "admin/navbar" .}} +
+ {{template "base/alert" .}} +

+ {{.i18n.Tr "admin.monitor.queue" .Queue.Name}} +

+
+ + + + + + + + + + + + + + + + + +
{{.i18n.Tr "admin.monitor.queue.name"}}{{.i18n.Tr "admin.monitor.queue.type"}}{{.i18n.Tr "admin.monitor.queue.exemplar"}}{{.i18n.Tr "admin.monitor.queue.numberworkers"}}
{{.Queue.Name}}{{.Queue.Type}}{{.Queue.ExemplarType}}{{$sum := .Queue.NumberOfWorkers}}{{if lt $sum 0}}-{{else}}{{$sum}}{{end}}
+
+ {{if lt $sum 0 }} +

+ {{.i18n.Tr "admin.monitor.queue.nopool.title"}} +

+
+ {{if eq .Queue.Type "wrapped" }} +

{{.i18n.Tr "admin.monitor.queue.wrapped.desc"}}

+ {{else if eq .Queue.Type "persistable-channel"}} +

{{.i18n.Tr "admin.monitor.queue.persistable-channel.desc"}}

+ {{else}} +

{{.i18n.Tr "admin.monitor.queue.nopool.desc"}}

+ {{end}} +
+ {{else}} +

+ {{.i18n.Tr "admin.monitor.queue.pool.addworkers.title"}} +

+
+

{{.i18n.Tr "admin.monitor.queue.pool.addworkers.desc"}}

+
+ {{$.CsrfTokenHtml}} +
+
+
+ + +
+
+ + +
+
+ +
+
+
+

+ {{.i18n.Tr "admin.monitor.queue.pool.workers.title"}} +

+
+ + + + + + + + + + + {{range .Queue.Workers}} + + + + + + + {{else}} + + + {{end}} + +
{{.i18n.Tr "admin.monitor.queue.numberworkers"}}{{.i18n.Tr "admin.monitor.start"}}{{.i18n.Tr "admin.monitor.queue.pool.timeout"}}
{{.Workers}}{{DateFmtLong .Start}}{{if .HasTimeout}}{{DateFmtLong .Timeout}}{{else}}-{{end}} + +
{{.i18n.Tr "admin.monitor.queue.pool.workers.none" }} +
+
+ {{end}} +

+ {{.i18n.Tr "admin.monitor.queue.configuration"}} +

+
+
{{.Queue.Configuration | JsonPrettyPrint}}
+		
+
+
+ + +{{template "base/footer" .}} From 2ce9c673c1537204efa6cfa944b5cbd8e33e0689 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Wed, 11 Dec 2019 20:34:05 +0000 Subject: [PATCH 07/21] Queue: Improve logging --- modules/queue/manager.go | 5 ++ modules/queue/queue_disk.go | 38 +++++++++------ modules/queue/queue_disk_channel.go | 10 +++- modules/queue/queue_disk_channel_test.go | 28 +++++++---- modules/queue/queue_disk_test.go | 59 +++++++++++++++--------- modules/queue/queue_redis.go | 16 +++++-- modules/queue/queue_wrapped.go | 7 +-- modules/queue/workerpool.go | 14 ++++-- 8 files changed, 115 insertions(+), 62 deletions(-) diff --git a/modules/queue/manager.go b/modules/queue/manager.go index 100780c706286..81478019e5335 100644 --- a/modules/queue/manager.go +++ b/modules/queue/manager.go @@ -12,6 +12,8 @@ import ( "sort" "sync" "time" + + "code.gitea.io/gitea/modules/log" ) var manager *Manager @@ -96,6 +98,7 @@ func (m *Manager) Add(queue Queue, } m.Queues[desc.QID] = desc m.mutex.Unlock() + log.Trace("Queue Manager registered: %s (QID: %d)", desc.Name, desc.QID) return desc.QID } @@ -104,6 +107,8 @@ func (m *Manager) Remove(qid int64) { m.mutex.Lock() delete(m.Queues, qid) m.mutex.Unlock() + log.Trace("Queue Manager removed: QID: %d", qid) + } // GetDescription by qid diff --git a/modules/queue/queue_disk.go b/modules/queue/queue_disk.go index f18f3c5f8edc0..41e8a9e7c0b71 100644 --- a/modules/queue/queue_disk.go +++ b/modules/queue/queue_disk.go @@ -91,16 +91,18 @@ func (l *LevelQueue) Run(atShutdown, atTerminate func(context.Context, func())) go l.readToChan() - log.Trace("%s Waiting til closed", l.name) + log.Trace("LevelQueue: %s Waiting til closed", l.name) <-l.closed - log.Trace("%s Waiting til done", l.name) + log.Trace("LevelQueue: %s Waiting til done", l.name) l.pool.Wait() - // FIXME: graceful: Needs HammerContext - log.Trace("%s Waiting til cleaned", l.name) - l.pool.CleanUp(context.TODO()) - log.Trace("%s cleaned", l.name) + log.Trace("LevelQueue: %s Waiting til cleaned", l.name) + ctx, cancel := context.WithCancel(context.Background()) + atTerminate(ctx, cancel) + l.pool.CleanUp(ctx) + cancel() + log.Trace("LevelQueue: %s Cleaned", l.name) } @@ -115,7 +117,7 @@ func (l *LevelQueue) readToChan() { bs, err := l.queue.RPop() if err != nil { if err != levelqueue.ErrNotFound { - log.Error("%s RPop: %v", l.name, err) + log.Error("LevelQueue: %s Error on RPop: %v", l.name, err) } time.Sleep(time.Millisecond * 100) continue @@ -137,14 +139,14 @@ func (l *LevelQueue) readToChan() { err = json.Unmarshal(bs, &data) } if err != nil { - log.Error("LevelQueue: %s failed to unmarshal: %v", l.name, err) - time.Sleep(time.Millisecond * 10) + log.Error("LevelQueue: %s Failed to unmarshal with error: %v", l.name, err) + time.Sleep(time.Millisecond * 100) continue } - log.Trace("LevelQueue %s: task found: %#v", l.name, data) + log.Trace("LevelQueue %s: Task found: %#v", l.name, data) l.pool.Push(data) - time.Sleep(time.Millisecond * 100) + time.Sleep(time.Millisecond * 10) } } @@ -170,7 +172,7 @@ func (l *LevelQueue) Push(data Data) error { // Shutdown this queue and stop processing func (l *LevelQueue) Shutdown() { - log.Trace("Shutdown: %s", l.name) + log.Trace("LevelQueue: %s Shutdown", l.name) select { case <-l.closed: default: @@ -180,10 +182,16 @@ func (l *LevelQueue) Shutdown() { // Terminate this queue and close the queue func (l *LevelQueue) Terminate() { - log.Trace("Terminating: %s", l.name) + log.Trace("LevelQueue: %s Terminating", l.name) l.Shutdown() - if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" { - log.Error("Error whilst closing internal queue in %s: %v", l.name, err) + select { + case <-l.terminated: + default: + close(l.terminated) + if err := l.queue.Close(); err != nil && err.Error() != "leveldb: closed" { + log.Error("Error whilst closing internal queue in %s: %v", l.name, err) + } + } } diff --git a/modules/queue/queue_disk_channel.go b/modules/queue/queue_disk_channel.go index 3bf39b9fa5940..884fc410df922 100644 --- a/modules/queue/queue_disk_channel.go +++ b/modules/queue/queue_disk_channel.go @@ -133,22 +133,28 @@ func (p *PersistableChannelQueue) Run(atShutdown, atTerminate func(context.Conte _ = p.ChannelQueue.pool.AddWorkers(p.workers, 0) }() + log.Trace("PersistableChannelQueue: %s Waiting til closed", p.delayedStarter.name) <-p.closed + log.Trace("PersistableChannelQueue: %s Cancelling pools", p.delayedStarter.name) p.ChannelQueue.pool.cancel() p.internal.(*LevelQueue).pool.cancel() + log.Trace("PersistableChannelQueue: %s Waiting til done", p.delayedStarter.name) p.ChannelQueue.pool.Wait() p.internal.(*LevelQueue).pool.Wait() // Redirect all remaining data in the chan to the internal channel go func() { + log.Trace("PersistableChannelQueue: %s Redirecting remaining data", p.delayedStarter.name) for data := range p.ChannelQueue.pool.dataChan { _ = p.internal.Push(data) } + log.Trace("PersistableChannelQueue: %s Done Redirecting remaining data", p.delayedStarter.name) }() + log.Trace("PersistableChannelQueue: %s Done main loop", p.delayedStarter.name) } // Shutdown processing this queue func (p *PersistableChannelQueue) Shutdown() { - log.Trace("Shutdown: %s", p.delayedStarter.name) + log.Trace("PersistableChannelQueue: %s Shutdown", p.delayedStarter.name) select { case <-p.closed: default: @@ -163,7 +169,7 @@ func (p *PersistableChannelQueue) Shutdown() { // Terminate this queue and close the queue func (p *PersistableChannelQueue) Terminate() { - log.Trace("Terminating: %s", p.delayedStarter.name) + log.Trace("PersistableChannelQueue: %s Terminating", p.delayedStarter.name) p.Shutdown() p.lock.Lock() defer p.lock.Unlock() diff --git a/modules/queue/queue_disk_channel_test.go b/modules/queue/queue_disk_channel_test.go index 5f6f614bd8c8b..01a90ebcfb8a6 100644 --- a/modules/queue/queue_disk_channel_test.go +++ b/modules/queue/queue_disk_channel_test.go @@ -24,8 +24,8 @@ func TestPersistableChannelQueue(t *testing.T) { } } - var queueShutdown func() - var queueTerminate func() + queueShutdown := []func(){} + queueTerminate := []func(){} tmpDir, err := ioutil.TempDir("", "persistable-channel-queue-test-data") assert.NoError(t, err) @@ -40,9 +40,9 @@ func TestPersistableChannelQueue(t *testing.T) { assert.NoError(t, err) go queue.Run(func(_ context.Context, shutdown func()) { - queueShutdown = shutdown + queueShutdown = append(queueShutdown, shutdown) }, func(_ context.Context, terminate func()) { - queueTerminate = terminate + queueTerminate = append(queueTerminate, terminate) }) test1 := testData{"A", 1} @@ -66,7 +66,9 @@ func TestPersistableChannelQueue(t *testing.T) { err = queue.Push(test1) assert.Error(t, err) - queueShutdown() + for _, callback := range queueShutdown { + callback() + } time.Sleep(200 * time.Millisecond) err = queue.Push(&test1) assert.NoError(t, err) @@ -77,7 +79,9 @@ func TestPersistableChannelQueue(t *testing.T) { assert.Fail(t, "Handler processing should have stopped") default: } - queueTerminate() + for _, callback := range queueTerminate { + callback() + } // Reopen queue queue, err = NewPersistableChannelQueue(handle, PersistableChannelQueueConfiguration{ @@ -89,9 +93,9 @@ func TestPersistableChannelQueue(t *testing.T) { assert.NoError(t, err) go queue.Run(func(_ context.Context, shutdown func()) { - queueShutdown = shutdown + queueShutdown = append(queueShutdown, shutdown) }, func(_ context.Context, terminate func()) { - queueTerminate = terminate + queueTerminate = append(queueTerminate, terminate) }) result3 := <-handleChan @@ -101,7 +105,11 @@ func TestPersistableChannelQueue(t *testing.T) { result4 := <-handleChan assert.Equal(t, test2.TestString, result4.TestString) assert.Equal(t, test2.TestInt, result4.TestInt) - queueShutdown() - queueTerminate() + for _, callback := range queueShutdown { + callback() + } + for _, callback := range queueTerminate { + callback() + } } diff --git a/modules/queue/queue_disk_test.go b/modules/queue/queue_disk_test.go index b9c6f278ef573..03de451760a06 100644 --- a/modules/queue/queue_disk_test.go +++ b/modules/queue/queue_disk_test.go @@ -6,6 +6,7 @@ package queue import ( "context" + "io/ioutil" "os" "testing" "time" @@ -23,11 +24,15 @@ func TestLevelQueue(t *testing.T) { } } - var queueShutdown func() - var queueTerminate func() + queueShutdown := []func(){} + queueTerminate := []func(){} + + tmpDir, err := ioutil.TempDir("", "level-queue-test-data") + assert.NoError(t, err) + defer os.RemoveAll(tmpDir) queue, err := NewLevelQueue(handle, LevelQueueConfiguration{ - DataDir: "level-queue-test-data", + DataDir: tmpDir, BatchLength: 2, Workers: 1, QueueLength: 20, @@ -38,9 +43,9 @@ func TestLevelQueue(t *testing.T) { assert.NoError(t, err) go queue.Run(func(_ context.Context, shutdown func()) { - queueShutdown = shutdown + queueShutdown = append(queueShutdown, shutdown) }, func(_ context.Context, terminate func()) { - queueTerminate = terminate + queueTerminate = append(queueTerminate, terminate) }) test1 := testData{"A", 1} @@ -64,7 +69,9 @@ func TestLevelQueue(t *testing.T) { err = queue.Push(test1) assert.Error(t, err) - queueShutdown() + for _, callback := range queueShutdown { + callback() + } time.Sleep(200 * time.Millisecond) err = queue.Push(&test1) assert.NoError(t, err) @@ -75,24 +82,30 @@ func TestLevelQueue(t *testing.T) { assert.Fail(t, "Handler processing should have stopped") default: } - queueTerminate() + for _, callback := range queueTerminate { + callback() + } // Reopen queue - queue, err = NewLevelQueue(handle, LevelQueueConfiguration{ - DataDir: "level-queue-test-data", - BatchLength: 2, - Workers: 1, - QueueLength: 20, - BlockTimeout: 1 * time.Second, - BoostTimeout: 5 * time.Minute, - BoostWorkers: 5, - }, &testData{}) + queue, err = NewWrappedQueue(handle, + WrappedQueueConfiguration{ + Underlying: LevelQueueType, + Config: LevelQueueConfiguration{ + DataDir: tmpDir, + BatchLength: 2, + Workers: 1, + QueueLength: 20, + BlockTimeout: 1 * time.Second, + BoostTimeout: 5 * time.Minute, + BoostWorkers: 5, + }, + }, &testData{}) assert.NoError(t, err) go queue.Run(func(_ context.Context, shutdown func()) { - queueShutdown = shutdown + queueShutdown = append(queueShutdown, shutdown) }, func(_ context.Context, terminate func()) { - queueTerminate = terminate + queueTerminate = append(queueTerminate, terminate) }) result3 := <-handleChan @@ -102,8 +115,10 @@ func TestLevelQueue(t *testing.T) { result4 := <-handleChan assert.Equal(t, test2.TestString, result4.TestString) assert.Equal(t, test2.TestInt, result4.TestInt) - queueShutdown() - queueTerminate() - - os.RemoveAll("level-queue-test-data") + for _, callback := range queueShutdown { + callback() + } + for _, callback := range queueTerminate { + callback() + } } diff --git a/modules/queue/queue_redis.go b/modules/queue/queue_redis.go index 88794428a8577..4f2ceec029f0f 100644 --- a/modules/queue/queue_redis.go +++ b/modules/queue/queue_redis.go @@ -116,10 +116,16 @@ func (r *RedisQueue) Run(atShutdown, atTerminate func(context.Context, func())) go r.readToChan() + log.Trace("RedisQueue: %s Waiting til closed", r.name) <-r.closed + log.Trace("RedisQueue: %s Waiting til done", r.name) r.pool.Wait() - // FIXME: graceful: Needs HammerContext - r.pool.CleanUp(context.TODO()) + + log.Trace("RedisQueue: %s Waiting til cleaned", r.name) + ctx, cancel := context.WithCancel(context.Background()) + atTerminate(ctx, cancel) + r.pool.CleanUp(ctx) + cancel() } func (r *RedisQueue) readToChan() { @@ -132,7 +138,7 @@ func (r *RedisQueue) readToChan() { default: bs, err := r.client.LPop(r.queueName).Bytes() if err != nil && err != redis.Nil { - log.Error("RedisQueue: %s LPop failed: %v", r.name, err) + log.Error("RedisQueue: %s Error on LPop: %v", r.name, err) time.Sleep(time.Millisecond * 100) continue } @@ -153,12 +159,12 @@ func (r *RedisQueue) readToChan() { err = json.Unmarshal(bs, &data) } if err != nil { - log.Error("RedisQueue: %s Unmarshal: %v", r.name, err) + log.Error("RedisQueue: %s Error on Unmarshal: %v", r.name, err) time.Sleep(time.Millisecond * 100) continue } - log.Trace("RedisQueue: %s task found: %#v", r.name, data) + log.Trace("RedisQueue: %s Task found: %#v", r.name, data) r.pool.Push(data) time.Sleep(time.Millisecond * 10) } diff --git a/modules/queue/queue_wrapped.go b/modules/queue/queue_wrapped.go index 57f19f63127d4..46557ea318991 100644 --- a/modules/queue/queue_wrapped.go +++ b/modules/queue/queue_wrapped.go @@ -92,7 +92,7 @@ type WrappedQueue struct { // NewWrappedQueue will attempt to create a queue of the provided type, // but if there is a problem creating this queue it will instead create -// a WrappedQueue with delayed the startup of the queue instead and a +// a WrappedQueue with delayed startup of the queue instead and a // channel which will be redirected to the queue func NewWrappedQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) { configInterface, err := toConfig(WrappedQueueConfiguration{}, cfg) @@ -162,11 +162,12 @@ func (q *WrappedQueue) Run(atShutdown, atTerminate func(context.Context, func()) } q.internal.Run(atShutdown, atTerminate) + log.Trace("WrappedQueue: %s Done", q.name) } // Shutdown this queue and stop processing func (q *WrappedQueue) Shutdown() { - log.Trace("Shutdown: %s", q.name) + log.Trace("WrappedQueue: %s Shutdown", q.name) q.lock.Lock() defer q.lock.Unlock() if q.internal == nil { @@ -179,7 +180,7 @@ func (q *WrappedQueue) Shutdown() { // Terminate this queue and close the queue func (q *WrappedQueue) Terminate() { - log.Trace("Terminating: %s", q.name) + log.Trace("WrappedQueue: %s Terminating", q.name) q.lock.Lock() defer q.lock.Unlock() if q.internal == nil { diff --git a/modules/queue/workerpool.go b/modules/queue/workerpool.go index bf3a15c00ed3b..fe05e7fe6ec2c 100644 --- a/modules/queue/workerpool.go +++ b/modules/queue/workerpool.go @@ -72,7 +72,7 @@ func (p *WorkerPool) pushBoost(data Data) { ctx, cancel := context.WithCancel(p.baseCtx) desc := GetManager().GetDescription(p.qid) if desc != nil { - log.Warn("Worker Channel for %v blocked for %v - adding %d temporary workers for %s, block timeout now %v", desc.Name, ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout) + log.Warn("WorkerPool: %d (for %s) Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", p.qid, desc.Name, ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout) start := time.Now() pid := desc.RegisterWorkers(p.boostWorkers, start, false, start, cancel) @@ -82,7 +82,7 @@ func (p *WorkerPool) pushBoost(data Data) { cancel() }() } else { - log.Warn("Worker Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout) + log.Warn("WorkerPool: %d Channel blocked for %v - adding %d temporary workers for %s, block timeout now %v", p.qid, ourTimeout, p.boostWorkers, p.boostTimeout, p.blockTimeout) } go func() { <-time.After(p.boostTimeout) @@ -128,6 +128,10 @@ func (p *WorkerPool) AddWorkers(number int, timeout time.Duration) context.Cance desc.RemoveWorkers(pid) cancel() }() + log.Trace("WorkerPool: %d (for %s) adding %d workers with group id: %d", p.qid, desc.Name, number, pid) + } else { + log.Trace("WorkerPool: %d adding %d workers (no group id)", p.qid, number) + } p.addWorkers(ctx, number) return cancel @@ -173,18 +177,18 @@ func (p *WorkerPool) Wait() { // CleanUp will drain the remaining contents of the channel // This should be called after AddWorkers context is closed func (p *WorkerPool) CleanUp(ctx context.Context) { - log.Trace("CleanUp") + log.Trace("WorkerPool: %d CleanUp", p.qid) close(p.dataChan) for data := range p.dataChan { p.handle(data) select { case <-ctx.Done(): - log.Warn("Cleanup context closed before finishing clean-up") + log.Warn("WorkerPool: %d Cleanup context closed before finishing clean-up", p.qid) return default: } } - log.Trace("CleanUp done") + log.Trace("WorkerPool: %d CleanUp Done", p.qid) } func (p *WorkerPool) doWork(ctx context.Context) { From 0b275fd79ef76850ef3fc374d7b66cda7b3e7f28 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Fri, 15 Nov 2019 19:16:40 +0000 Subject: [PATCH 08/21] Issues: Gracefulise the issues indexer Remove the old now unused specific queues --- integrations/issue_test.go | 9 +- modules/indexer/issues/bleve.go | 5 + modules/indexer/issues/db.go | 5 + modules/indexer/issues/indexer.go | 216 ++++++++++++++++-------- modules/indexer/issues/queue.go | 25 --- modules/indexer/issues/queue_channel.go | 62 ------- modules/indexer/issues/queue_disk.go | 104 ------------ modules/indexer/issues/queue_redis.go | 146 ---------------- 8 files changed, 166 insertions(+), 406 deletions(-) delete mode 100644 modules/indexer/issues/queue.go delete mode 100644 modules/indexer/issues/queue_channel.go delete mode 100644 modules/indexer/issues/queue_disk.go delete mode 100644 modules/indexer/issues/queue_redis.go diff --git a/integrations/issue_test.go b/integrations/issue_test.go index fe66a005047fe..1454d75885019 100644 --- a/integrations/issue_test.go +++ b/integrations/issue_test.go @@ -11,8 +11,10 @@ import ( "strconv" "strings" "testing" + "time" "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/indexer/issues" "code.gitea.io/gitea/modules/references" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/test" @@ -87,7 +89,12 @@ func TestViewIssuesKeyword(t *testing.T) { defer prepareTestEnv(t)() repo := models.AssertExistsAndLoadBean(t, &models.Repository{ID: 1}).(*models.Repository) - + issue := models.AssertExistsAndLoadBean(t, &models.Issue{ + RepoID: repo.ID, + Index: 1, + }).(*models.Issue) + issues.UpdateIssueIndexer(issue) + time.Sleep(time.Second * 1) const keyword = "first" req := NewRequestf(t, "GET", "%s/issues?q=%s", repo.RelLink(), keyword) resp := MakeRequest(t, req, http.StatusOK) diff --git a/modules/indexer/issues/bleve.go b/modules/indexer/issues/bleve.go index 24443e54a343b..7878d39c75bd0 100644 --- a/modules/indexer/issues/bleve.go +++ b/modules/indexer/issues/bleve.go @@ -256,3 +256,8 @@ func (b *BleveIndexer) Search(keyword string, repoIDs []int64, limit, start int) } return &ret, nil } + +// Close the Index +func (b *BleveIndexer) Close() error { + return b.indexer.Close() +} diff --git a/modules/indexer/issues/db.go b/modules/indexer/issues/db.go index a758cfeaeebdd..2a5df80fac2ee 100644 --- a/modules/indexer/issues/db.go +++ b/modules/indexer/issues/db.go @@ -25,6 +25,11 @@ func (db *DBIndexer) Delete(ids ...int64) error { return nil } +// Close dummy function +func (db *DBIndexer) Close() error { + return nil +} + // Search dummy function func (db *DBIndexer) Search(kw string, repoIDs []int64, limit, start int) (*SearchResult, error) { total, ids, err := models.SearchIssueIDsByKeyword(kw, repoIDs, limit, start) diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go index ebcd3f68dd51c..1fcef59f34f48 100644 --- a/modules/indexer/issues/indexer.go +++ b/modules/indexer/issues/indexer.go @@ -5,12 +5,16 @@ package issues import ( + "context" + "encoding/json" + "os" "sync" "time" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/graceful" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/queue" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" ) @@ -44,6 +48,7 @@ type Indexer interface { Index(issue []*IndexerData) error Delete(ids ...int64) error Search(kw string, repoIDs []int64, limit, start int) (*SearchResult, error) + Close() error } type indexerHolder struct { @@ -75,9 +80,8 @@ func (h *indexerHolder) get() Indexer { } var ( - issueIndexerChannel = make(chan *IndexerData, setting.Indexer.UpdateQueueLength) // issueIndexerQueue queue of issue ids to be updated - issueIndexerQueue Queue + issueIndexerQueue queue.Queue holder = newIndexerHolder() ) @@ -85,88 +89,142 @@ var ( // all issue index done. func InitIssueIndexer(syncReindex bool) { waitChannel := make(chan time.Duration) + + // Create the Queue + switch setting.Indexer.IssueType { + case "bleve": + handler := func(data ...queue.Data) { + iData := make([]*IndexerData, 0, setting.Indexer.IssueQueueBatchNumber) + for _, datum := range data { + indexerData, ok := datum.(*IndexerData) + if !ok { + log.Error("Unable to process provided datum: %v - not possible to cast to IndexerData", datum) + continue + } + log.Trace("IndexerData Process: %d %v %t", indexerData.ID, indexerData.IDs, indexerData.IsDelete) + if indexerData.IsDelete { + _ = holder.get().Delete(indexerData.IDs...) + continue + } + iData = append(iData, indexerData) + } + if err := holder.get().Index(iData); err != nil { + log.Error("Error whilst indexing: %v Error: %v", iData, err) + } + } + + queueType := queue.PersistableChannelQueueType + switch setting.Indexer.IssueQueueType { + case setting.LevelQueueType: + queueType = queue.LevelQueueType + case setting.ChannelQueueType: + queueType = queue.PersistableChannelQueueType + case setting.RedisQueueType: + queueType = queue.RedisQueueType + default: + log.Fatal("Unsupported indexer queue type: %v", + setting.Indexer.IssueQueueType) + } + + name := "issue_indexer_queue" + opts := make(map[string]interface{}) + opts["QueueLength"] = setting.Indexer.UpdateQueueLength + opts["BatchLength"] = setting.Indexer.IssueQueueBatchNumber + opts["DataDir"] = setting.Indexer.IssueQueueDir + + addrs, password, dbIdx, err := setting.ParseQueueConnStr(setting.Indexer.IssueQueueConnStr) + if queueType == queue.RedisQueueType && err != nil { + log.Fatal("Unable to parse connection string for RedisQueueType: %s : %v", + setting.Indexer.IssueQueueConnStr, + err) + } + opts["Addresses"] = addrs + opts["Password"] = password + opts["DBIndex"] = dbIdx + opts["QueueName"] = name + opts["Name"] = name + opts["Workers"] = 1 + opts["BlockTimeout"] = 1 * time.Second + opts["BoostTimeout"] = 5 * time.Minute + opts["BoostWorkers"] = 5 + cfg, err := json.Marshal(opts) + if err != nil { + log.Error("Unable to marshall generic options: %v Error: %v", opts, err) + log.Fatal("Unable to create issue indexer queue with type %s: %v", + queueType, + err) + } + log.Debug("Creating issue indexer queue with type %s: configuration: %s", queueType, string(cfg)) + issueIndexerQueue, err = queue.CreateQueue(queueType, handler, cfg, &IndexerData{}) + if err != nil { + issueIndexerQueue, err = queue.CreateQueue(queue.WrappedQueueType, handler, queue.WrappedQueueConfiguration{ + Underlying: queueType, + Timeout: setting.GracefulHammerTime + 30*time.Second, + MaxAttempts: 10, + Config: cfg, + QueueLength: setting.Indexer.UpdateQueueLength, + Name: name, + }, &IndexerData{}) + } + if err != nil { + log.Fatal("Unable to create issue indexer queue with type %s: %v : %v", + queueType, + string(cfg), + err) + } + default: + issueIndexerQueue = &queue.DummyQueue{} + } + + // Create the Indexer go func() { start := time.Now() - log.Info("Initializing Issue Indexer") + log.Info("PID %d: Initializing Issue Indexer: %s", os.Getpid(), setting.Indexer.IssueType) var populate bool - var dummyQueue bool switch setting.Indexer.IssueType { case "bleve": - issueIndexer := NewBleveIndexer(setting.Indexer.IssuePath) - exist, err := issueIndexer.Init() - if err != nil { - log.Fatal("Unable to initialize Bleve Issue Indexer: %v", err) - } - populate = !exist - holder.set(issueIndexer) + graceful.GetManager().RunWithShutdownFns(func(_, atTerminate func(context.Context, func())) { + issueIndexer := NewBleveIndexer(setting.Indexer.IssuePath) + exist, err := issueIndexer.Init() + if err != nil { + log.Fatal("Unable to initialize Bleve Issue Indexer: %v", err) + } + populate = !exist + holder.set(issueIndexer) + atTerminate(context.Background(), func() { + log.Debug("Closing issue indexer") + issueIndexer := holder.get() + if issueIndexer != nil { + err := issueIndexer.Close() + if err != nil { + log.Error("Error whilst closing the issue indexer: %v", err) + } + } + log.Info("PID: %d Issue Indexer closed", os.Getpid()) + }) + log.Debug("Created Bleve Indexer") + }) case "db": issueIndexer := &DBIndexer{} holder.set(issueIndexer) - dummyQueue = true default: log.Fatal("Unknown issue indexer type: %s", setting.Indexer.IssueType) } - if dummyQueue { - issueIndexerQueue = &DummyQueue{} - } else { - var err error - switch setting.Indexer.IssueQueueType { - case setting.LevelQueueType: - issueIndexerQueue, err = NewLevelQueue( - holder.get(), - setting.Indexer.IssueQueueDir, - setting.Indexer.IssueQueueBatchNumber) - if err != nil { - log.Fatal( - "Unable create level queue for issue queue dir: %s batch number: %d : %v", - setting.Indexer.IssueQueueDir, - setting.Indexer.IssueQueueBatchNumber, - err) - } - case setting.ChannelQueueType: - issueIndexerQueue = NewChannelQueue(holder.get(), setting.Indexer.IssueQueueBatchNumber) - case setting.RedisQueueType: - addrs, pass, idx, err := parseConnStr(setting.Indexer.IssueQueueConnStr) - if err != nil { - log.Fatal("Unable to parse connection string for RedisQueueType: %s : %v", - setting.Indexer.IssueQueueConnStr, - err) - } - issueIndexerQueue, err = NewRedisQueue(addrs, pass, idx, holder.get(), setting.Indexer.IssueQueueBatchNumber) - if err != nil { - log.Fatal("Unable to create RedisQueue: %s : %v", - setting.Indexer.IssueQueueConnStr, - err) - } - default: - log.Fatal("Unsupported indexer queue type: %v", - setting.Indexer.IssueQueueType) - } - - go func() { - err = issueIndexerQueue.Run() - if err != nil { - log.Error("issueIndexerQueue.Run: %v", err) - } - }() - } - - go func() { - for data := range issueIndexerChannel { - _ = issueIndexerQueue.Push(data) - } - }() + // Start processing the queue + go graceful.GetManager().RunWithShutdownFns(issueIndexerQueue.Run) + // Populate the index if populate { if syncReindex { - populateIssueIndexer() + graceful.GetManager().RunWithShutdownContext(populateIssueIndexer) } else { - go populateIssueIndexer() + go graceful.GetManager().RunWithShutdownContext(populateIssueIndexer) } } waitChannel <- time.Since(start) }() + if syncReindex { <-waitChannel } else if setting.Indexer.StartupTimeout > 0 { @@ -179,6 +237,9 @@ func InitIssueIndexer(syncReindex bool) { case duration := <-waitChannel: log.Info("Issue Indexer Initialization took %v", duration) case <-time.After(timeout): + if shutdownable, ok := issueIndexerQueue.(queue.Shutdownable); ok { + shutdownable.Terminate() + } log.Fatal("Issue Indexer Initialization timed-out after: %v", timeout) } }() @@ -186,8 +247,14 @@ func InitIssueIndexer(syncReindex bool) { } // populateIssueIndexer populate the issue indexer with issue data -func populateIssueIndexer() { +func populateIssueIndexer(ctx context.Context) { for page := 1; ; page++ { + select { + case <-ctx.Done(): + log.Warn("Issue Indexer population shutdown before completion") + return + default: + } repos, _, err := models.SearchRepositoryByName(&models.SearchRepoOptions{ Page: page, PageSize: models.RepositoryListDefaultPageSize, @@ -200,10 +267,17 @@ func populateIssueIndexer() { continue } if len(repos) == 0 { + log.Debug("Issue Indexer population complete") return } for _, repo := range repos { + select { + case <-ctx.Done(): + log.Info("Issue Indexer population shutdown before completion") + return + default: + } UpdateRepoIndexer(repo) } } @@ -237,13 +311,17 @@ func UpdateIssueIndexer(issue *models.Issue) { comments = append(comments, comment.Content) } } - issueIndexerChannel <- &IndexerData{ + indexerData := &IndexerData{ ID: issue.ID, RepoID: issue.RepoID, Title: issue.Title, Content: issue.Content, Comments: comments, } + log.Debug("Adding to channel: %v", indexerData) + if err := issueIndexerQueue.Push(indexerData); err != nil { + log.Error("Unable to push to issue indexer: %v: Error: %v", indexerData, err) + } } // DeleteRepoIssueIndexer deletes repo's all issues indexes @@ -258,11 +336,13 @@ func DeleteRepoIssueIndexer(repo *models.Repository) { if len(ids) == 0 { return } - - issueIndexerChannel <- &IndexerData{ + indexerData := &IndexerData{ IDs: ids, IsDelete: true, } + if err := issueIndexerQueue.Push(indexerData); err != nil { + log.Error("Unable to push to issue indexer: %v: Error: %v", indexerData, err) + } } // SearchIssuesByKeyword search issue ids by keywords and repo id diff --git a/modules/indexer/issues/queue.go b/modules/indexer/issues/queue.go deleted file mode 100644 index f93e5c47a40a5..0000000000000 --- a/modules/indexer/issues/queue.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018 The Gitea Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package issues - -// Queue defines an interface to save an issue indexer queue -type Queue interface { - Run() error - Push(*IndexerData) error -} - -// DummyQueue represents an empty queue -type DummyQueue struct { -} - -// Run starts to run the queue -func (b *DummyQueue) Run() error { - return nil -} - -// Push pushes data to indexer -func (b *DummyQueue) Push(*IndexerData) error { - return nil -} diff --git a/modules/indexer/issues/queue_channel.go b/modules/indexer/issues/queue_channel.go deleted file mode 100644 index b6458d3eb53db..0000000000000 --- a/modules/indexer/issues/queue_channel.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2018 The Gitea Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package issues - -import ( - "time" - - "code.gitea.io/gitea/modules/setting" -) - -// ChannelQueue implements -type ChannelQueue struct { - queue chan *IndexerData - indexer Indexer - batchNumber int -} - -// NewChannelQueue create a memory channel queue -func NewChannelQueue(indexer Indexer, batchNumber int) *ChannelQueue { - return &ChannelQueue{ - queue: make(chan *IndexerData, setting.Indexer.UpdateQueueLength), - indexer: indexer, - batchNumber: batchNumber, - } -} - -// Run starts to run the queue -func (c *ChannelQueue) Run() error { - var i int - var datas = make([]*IndexerData, 0, c.batchNumber) - for { - select { - case data := <-c.queue: - if data.IsDelete { - _ = c.indexer.Delete(data.IDs...) - continue - } - - datas = append(datas, data) - if len(datas) >= c.batchNumber { - _ = c.indexer.Index(datas) - // TODO: save the point - datas = make([]*IndexerData, 0, c.batchNumber) - } - case <-time.After(time.Millisecond * 100): - i++ - if i >= 3 && len(datas) > 0 { - _ = c.indexer.Index(datas) - // TODO: save the point - datas = make([]*IndexerData, 0, c.batchNumber) - } - } - } -} - -// Push will push the indexer data to queue -func (c *ChannelQueue) Push(data *IndexerData) error { - c.queue <- data - return nil -} diff --git a/modules/indexer/issues/queue_disk.go b/modules/indexer/issues/queue_disk.go deleted file mode 100644 index d6187f2acbd02..0000000000000 --- a/modules/indexer/issues/queue_disk.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package issues - -import ( - "encoding/json" - "time" - - "code.gitea.io/gitea/modules/log" - - "gitea.com/lunny/levelqueue" -) - -var ( - _ Queue = &LevelQueue{} -) - -// LevelQueue implements a disk library queue -type LevelQueue struct { - indexer Indexer - queue *levelqueue.Queue - batchNumber int -} - -// NewLevelQueue creates a ledis local queue -func NewLevelQueue(indexer Indexer, dataDir string, batchNumber int) (*LevelQueue, error) { - queue, err := levelqueue.Open(dataDir) - if err != nil { - return nil, err - } - - return &LevelQueue{ - indexer: indexer, - queue: queue, - batchNumber: batchNumber, - }, nil -} - -// Run starts to run the queue -func (l *LevelQueue) Run() error { - var i int - var datas = make([]*IndexerData, 0, l.batchNumber) - for { - i++ - if len(datas) > l.batchNumber || (len(datas) > 0 && i > 3) { - _ = l.indexer.Index(datas) - datas = make([]*IndexerData, 0, l.batchNumber) - i = 0 - continue - } - - bs, err := l.queue.RPop() - if err != nil { - if err != levelqueue.ErrNotFound { - log.Error("RPop: %v", err) - } - time.Sleep(time.Millisecond * 100) - continue - } - - if len(bs) == 0 { - time.Sleep(time.Millisecond * 100) - continue - } - - var data IndexerData - err = json.Unmarshal(bs, &data) - if err != nil { - log.Error("Unmarshal: %v", err) - time.Sleep(time.Millisecond * 100) - continue - } - - log.Trace("LevelQueue: task found: %#v", data) - - if data.IsDelete { - if data.ID > 0 { - if err = l.indexer.Delete(data.ID); err != nil { - log.Error("indexer.Delete: %v", err) - } - } else if len(data.IDs) > 0 { - if err = l.indexer.Delete(data.IDs...); err != nil { - log.Error("indexer.Delete: %v", err) - } - } - time.Sleep(time.Millisecond * 10) - continue - } - - datas = append(datas, &data) - time.Sleep(time.Millisecond * 10) - } -} - -// Push will push the indexer data to queue -func (l *LevelQueue) Push(data *IndexerData) error { - bs, err := json.Marshal(data) - if err != nil { - return err - } - return l.queue.LPush(bs) -} diff --git a/modules/indexer/issues/queue_redis.go b/modules/indexer/issues/queue_redis.go deleted file mode 100644 index 0344d3c87a0f5..0000000000000 --- a/modules/indexer/issues/queue_redis.go +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package issues - -import ( - "encoding/json" - "errors" - "strconv" - "strings" - "time" - - "code.gitea.io/gitea/modules/log" - - "github.com/go-redis/redis" -) - -var ( - _ Queue = &RedisQueue{} -) - -type redisClient interface { - RPush(key string, args ...interface{}) *redis.IntCmd - LPop(key string) *redis.StringCmd - Ping() *redis.StatusCmd -} - -// RedisQueue redis queue -type RedisQueue struct { - client redisClient - queueName string - indexer Indexer - batchNumber int -} - -func parseConnStr(connStr string) (addrs, password string, dbIdx int, err error) { - fields := strings.Fields(connStr) - for _, f := range fields { - items := strings.SplitN(f, "=", 2) - if len(items) < 2 { - continue - } - switch strings.ToLower(items[0]) { - case "addrs": - addrs = items[1] - case "password": - password = items[1] - case "db": - dbIdx, err = strconv.Atoi(items[1]) - if err != nil { - return - } - } - } - return -} - -// NewRedisQueue creates single redis or cluster redis queue -func NewRedisQueue(addrs string, password string, dbIdx int, indexer Indexer, batchNumber int) (*RedisQueue, error) { - dbs := strings.Split(addrs, ",") - var queue = RedisQueue{ - queueName: "issue_indexer_queue", - indexer: indexer, - batchNumber: batchNumber, - } - if len(dbs) == 0 { - return nil, errors.New("no redis host found") - } else if len(dbs) == 1 { - queue.client = redis.NewClient(&redis.Options{ - Addr: strings.TrimSpace(dbs[0]), // use default Addr - Password: password, // no password set - DB: dbIdx, // use default DB - }) - } else { - queue.client = redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: dbs, - }) - } - if err := queue.client.Ping().Err(); err != nil { - return nil, err - } - return &queue, nil -} - -// Run runs the redis queue -func (r *RedisQueue) Run() error { - var i int - var datas = make([]*IndexerData, 0, r.batchNumber) - for { - bs, err := r.client.LPop(r.queueName).Bytes() - if err != nil && err != redis.Nil { - log.Error("LPop faile: %v", err) - time.Sleep(time.Millisecond * 100) - continue - } - - i++ - if len(datas) > r.batchNumber || (len(datas) > 0 && i > 3) { - _ = r.indexer.Index(datas) - datas = make([]*IndexerData, 0, r.batchNumber) - i = 0 - } - - if len(bs) == 0 { - time.Sleep(time.Millisecond * 100) - continue - } - - var data IndexerData - err = json.Unmarshal(bs, &data) - if err != nil { - log.Error("Unmarshal: %v", err) - time.Sleep(time.Millisecond * 100) - continue - } - - log.Trace("RedisQueue: task found: %#v", data) - - if data.IsDelete { - if data.ID > 0 { - if err = r.indexer.Delete(data.ID); err != nil { - log.Error("indexer.Delete: %v", err) - } - } else if len(data.IDs) > 0 { - if err = r.indexer.Delete(data.IDs...); err != nil { - log.Error("indexer.Delete: %v", err) - } - } - time.Sleep(time.Millisecond * 100) - continue - } - - datas = append(datas, &data) - time.Sleep(time.Millisecond * 100) - } -} - -// Push implements Queue -func (r *RedisQueue) Push(data *IndexerData) error { - bs, err := json.Marshal(data) - if err != nil { - return err - } - return r.client.RPush(r.queueName, bs).Err() -} From f12e30929b81f48ee04b34313c31e094677a4bf5 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Sat, 16 Nov 2019 18:11:34 +0000 Subject: [PATCH 09/21] Task: Move to generic queue and gracefulise --- modules/setting/setting.go | 1 + modules/setting/task.go | 27 +++---- modules/task/queue.go | 14 ---- modules/task/queue_channel.go | 48 ------------- modules/task/queue_redis.go | 130 ---------------------------------- modules/task/task.go | 40 +++++------ 6 files changed, 30 insertions(+), 230 deletions(-) delete mode 100644 modules/task/queue.go delete mode 100644 modules/task/queue_channel.go delete mode 100644 modules/task/queue_redis.go diff --git a/modules/setting/setting.go b/modules/setting/setting.go index dbf43f31ee258..a7a916e9c2eac 100644 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -1090,4 +1090,5 @@ func NewServices() { newMigrationsService() newIndexerService() newTaskService() + newQueueService() } diff --git a/modules/setting/task.go b/modules/setting/task.go index 97704d4a4da68..fa63c669c6629 100644 --- a/modules/setting/task.go +++ b/modules/setting/task.go @@ -4,22 +4,17 @@ package setting -var ( - // Task settings - Task = struct { - QueueType string - QueueLength int - QueueConnStr string - }{ - QueueType: ChannelQueueType, - QueueLength: 1000, - QueueConnStr: "addrs=127.0.0.1:6379 db=0", - } -) +import "code.gitea.io/gitea/modules/queue" func newTaskService() { - sec := Cfg.Section("task") - Task.QueueType = sec.Key("QUEUE_TYPE").MustString(ChannelQueueType) - Task.QueueLength = sec.Key("QUEUE_LENGTH").MustInt(1000) - Task.QueueConnStr = sec.Key("QUEUE_CONN_STR").MustString("addrs=127.0.0.1:6379 db=0") + taskSec := Cfg.Section("task") + queueTaskSec := Cfg.Section("queue.task") + switch taskSec.Key("QUEUE_TYPE").MustString(ChannelQueueType) { + case ChannelQueueType: + queueTaskSec.Key("TYPE").MustString(string(queue.PersistableChannelQueueType)) + case RedisQueueType: + queueTaskSec.Key("TYPE").MustString(string(queue.RedisQueueType)) + } + queueTaskSec.Key("LENGTH").MustInt(taskSec.Key("QUEUE_LENGTH").MustInt(1000)) + queueTaskSec.Key("CONN_STR").MustString(taskSec.Key("QUEUE_CONN_STR").MustString("addrs=127.0.0.1:6379 db=0")) } diff --git a/modules/task/queue.go b/modules/task/queue.go deleted file mode 100644 index ddee0b3d46274..0000000000000 --- a/modules/task/queue.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2019 Gitea. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package task - -import "code.gitea.io/gitea/models" - -// Queue defines an interface to run task queue -type Queue interface { - Run() error - Push(*models.Task) error - Stop() -} diff --git a/modules/task/queue_channel.go b/modules/task/queue_channel.go deleted file mode 100644 index da541f47551f5..0000000000000 --- a/modules/task/queue_channel.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package task - -import ( - "code.gitea.io/gitea/models" - "code.gitea.io/gitea/modules/log" -) - -var ( - _ Queue = &ChannelQueue{} -) - -// ChannelQueue implements -type ChannelQueue struct { - queue chan *models.Task -} - -// NewChannelQueue create a memory channel queue -func NewChannelQueue(queueLen int) *ChannelQueue { - return &ChannelQueue{ - queue: make(chan *models.Task, queueLen), - } -} - -// Run starts to run the queue -func (c *ChannelQueue) Run() error { - for task := range c.queue { - err := Run(task) - if err != nil { - log.Error("Run task failed: %s", err.Error()) - } - } - return nil -} - -// Push will push the task ID to queue -func (c *ChannelQueue) Push(task *models.Task) error { - c.queue <- task - return nil -} - -// Stop stop the queue -func (c *ChannelQueue) Stop() { - close(c.queue) -} diff --git a/modules/task/queue_redis.go b/modules/task/queue_redis.go deleted file mode 100644 index 127de0cdbf1d3..0000000000000 --- a/modules/task/queue_redis.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - -package task - -import ( - "encoding/json" - "errors" - "strconv" - "strings" - "time" - - "code.gitea.io/gitea/models" - "code.gitea.io/gitea/modules/log" - - "github.com/go-redis/redis" -) - -var ( - _ Queue = &RedisQueue{} -) - -type redisClient interface { - RPush(key string, args ...interface{}) *redis.IntCmd - LPop(key string) *redis.StringCmd - Ping() *redis.StatusCmd -} - -// RedisQueue redis queue -type RedisQueue struct { - client redisClient - queueName string - closeChan chan bool -} - -func parseConnStr(connStr string) (addrs, password string, dbIdx int, err error) { - fields := strings.Fields(connStr) - for _, f := range fields { - items := strings.SplitN(f, "=", 2) - if len(items) < 2 { - continue - } - switch strings.ToLower(items[0]) { - case "addrs": - addrs = items[1] - case "password": - password = items[1] - case "db": - dbIdx, err = strconv.Atoi(items[1]) - if err != nil { - return - } - } - } - return -} - -// NewRedisQueue creates single redis or cluster redis queue -func NewRedisQueue(addrs string, password string, dbIdx int) (*RedisQueue, error) { - dbs := strings.Split(addrs, ",") - var queue = RedisQueue{ - queueName: "task_queue", - closeChan: make(chan bool), - } - if len(dbs) == 0 { - return nil, errors.New("no redis host found") - } else if len(dbs) == 1 { - queue.client = redis.NewClient(&redis.Options{ - Addr: strings.TrimSpace(dbs[0]), // use default Addr - Password: password, // no password set - DB: dbIdx, // use default DB - }) - } else { - // cluster will ignore db - queue.client = redis.NewClusterClient(&redis.ClusterOptions{ - Addrs: dbs, - Password: password, - }) - } - if err := queue.client.Ping().Err(); err != nil { - return nil, err - } - return &queue, nil -} - -// Run starts to run the queue -func (r *RedisQueue) Run() error { - for { - select { - case <-r.closeChan: - return nil - case <-time.After(time.Millisecond * 100): - } - - bs, err := r.client.LPop(r.queueName).Bytes() - if err != nil { - if err != redis.Nil { - log.Error("LPop failed: %v", err) - } - time.Sleep(time.Millisecond * 100) - continue - } - - var task models.Task - err = json.Unmarshal(bs, &task) - if err != nil { - log.Error("Unmarshal task failed: %s", err.Error()) - } else { - err = Run(&task) - if err != nil { - log.Error("Run task failed: %s", err.Error()) - } - } - } -} - -// Push implements Queue -func (r *RedisQueue) Push(task *models.Task) error { - bs, err := json.Marshal(task) - if err != nil { - return err - } - return r.client.RPush(r.queueName, bs).Err() -} - -// Stop stop the queue -func (r *RedisQueue) Stop() { - r.closeChan <- true -} diff --git a/modules/task/task.go b/modules/task/task.go index 64744afe7a4c7..852319d406fba 100644 --- a/modules/task/task.go +++ b/modules/task/task.go @@ -8,14 +8,16 @@ import ( "fmt" "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/graceful" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/migrations/base" + "code.gitea.io/gitea/modules/queue" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/structs" ) // taskQueue is a global queue of tasks -var taskQueue Queue +var taskQueue queue.Queue // Run a task func Run(t *models.Task) error { @@ -23,38 +25,32 @@ func Run(t *models.Task) error { case structs.TaskTypeMigrateRepo: return runMigrateTask(t) default: - return fmt.Errorf("Unknow task type: %d", t.Type) + return fmt.Errorf("Unknown task type: %d", t.Type) } } // Init will start the service to get all unfinished tasks and run them func Init() error { - switch setting.Task.QueueType { - case setting.ChannelQueueType: - taskQueue = NewChannelQueue(setting.Task.QueueLength) - case setting.RedisQueueType: - var err error - addrs, pass, idx, err := parseConnStr(setting.Task.QueueConnStr) - if err != nil { - return err - } - taskQueue, err = NewRedisQueue(addrs, pass, idx) - if err != nil { - return err - } - default: - return fmt.Errorf("Unsupported task queue type: %v", setting.Task.QueueType) + taskQueue = setting.CreateQueue("task", handle, &models.Task{}) + + if taskQueue == nil { + return fmt.Errorf("Unable to create Task Queue") } - go func() { - if err := taskQueue.Run(); err != nil { - log.Error("taskQueue.Run end failed: %v", err) - } - }() + go graceful.GetManager().RunWithShutdownFns(taskQueue.Run) return nil } +func handle(data ...queue.Data) { + for _, datum := range data { + task := datum.(*models.Task) + if err := Run(task); err != nil { + log.Error("Run task failed: %v", err) + } + } +} + // MigrateRepository add migration repository to task func MigrateRepository(doer, u *models.User, opts base.MigrateOptions) error { task, err := models.CreateMigrateTask(doer, u, opts) From 592e2caf7a9716027a4bcb8542a5ba56692ff2b8 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Mon, 16 Dec 2019 20:51:28 +0000 Subject: [PATCH 10/21] Issues: Standardise the issues indexer queue settings --- modules/indexer/issues/indexer.go | 61 ++----------------------------- modules/setting/queue.go | 33 +++++++++++++++++ 2 files changed, 36 insertions(+), 58 deletions(-) diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go index 1fcef59f34f48..8f0593acfff10 100644 --- a/modules/indexer/issues/indexer.go +++ b/modules/indexer/issues/indexer.go @@ -6,7 +6,6 @@ package issues import ( "context" - "encoding/json" "os" "sync" "time" @@ -113,64 +112,10 @@ func InitIssueIndexer(syncReindex bool) { } } - queueType := queue.PersistableChannelQueueType - switch setting.Indexer.IssueQueueType { - case setting.LevelQueueType: - queueType = queue.LevelQueueType - case setting.ChannelQueueType: - queueType = queue.PersistableChannelQueueType - case setting.RedisQueueType: - queueType = queue.RedisQueueType - default: - log.Fatal("Unsupported indexer queue type: %v", - setting.Indexer.IssueQueueType) - } - - name := "issue_indexer_queue" - opts := make(map[string]interface{}) - opts["QueueLength"] = setting.Indexer.UpdateQueueLength - opts["BatchLength"] = setting.Indexer.IssueQueueBatchNumber - opts["DataDir"] = setting.Indexer.IssueQueueDir + issueIndexerQueue = setting.CreateQueue("issue_indexer", handler, &IndexerData{}) - addrs, password, dbIdx, err := setting.ParseQueueConnStr(setting.Indexer.IssueQueueConnStr) - if queueType == queue.RedisQueueType && err != nil { - log.Fatal("Unable to parse connection string for RedisQueueType: %s : %v", - setting.Indexer.IssueQueueConnStr, - err) - } - opts["Addresses"] = addrs - opts["Password"] = password - opts["DBIndex"] = dbIdx - opts["QueueName"] = name - opts["Name"] = name - opts["Workers"] = 1 - opts["BlockTimeout"] = 1 * time.Second - opts["BoostTimeout"] = 5 * time.Minute - opts["BoostWorkers"] = 5 - cfg, err := json.Marshal(opts) - if err != nil { - log.Error("Unable to marshall generic options: %v Error: %v", opts, err) - log.Fatal("Unable to create issue indexer queue with type %s: %v", - queueType, - err) - } - log.Debug("Creating issue indexer queue with type %s: configuration: %s", queueType, string(cfg)) - issueIndexerQueue, err = queue.CreateQueue(queueType, handler, cfg, &IndexerData{}) - if err != nil { - issueIndexerQueue, err = queue.CreateQueue(queue.WrappedQueueType, handler, queue.WrappedQueueConfiguration{ - Underlying: queueType, - Timeout: setting.GracefulHammerTime + 30*time.Second, - MaxAttempts: 10, - Config: cfg, - QueueLength: setting.Indexer.UpdateQueueLength, - Name: name, - }, &IndexerData{}) - } - if err != nil { - log.Fatal("Unable to create issue indexer queue with type %s: %v : %v", - queueType, - string(cfg), - err) + if issueIndexerQueue == nil { + log.Fatal("Unable to create issue indexer queue") } default: issueIndexerQueue = &queue.DummyQueue{} diff --git a/modules/setting/queue.go b/modules/setting/queue.go index 0066d5a9467a3..08f6eaf3ee57a 100644 --- a/modules/setting/queue.go +++ b/modules/setting/queue.go @@ -6,6 +6,7 @@ package setting import ( "encoding/json" + "fmt" "path" "strconv" "strings" @@ -145,6 +146,38 @@ func newQueueService() { if !hasWorkers { Cfg.Section("queue.notification").Key("WORKERS").SetValue("5") } + + // Now handle the old issue_indexer configuration + section := Cfg.Section("queue.issue_indexer") + issueIndexerSectionMap := map[string]string{} + for _, key := range section.Keys() { + issueIndexerSectionMap[key.Name()] = key.Value() + } + if _, ok := issueIndexerSectionMap["TYPE"]; !ok { + switch Indexer.IssueQueueType { + case LevelQueueType: + section.Key("TYPE").SetValue("level") + case ChannelQueueType: + section.Key("TYPE").SetValue("persistable-channel") + case RedisQueueType: + section.Key("TYPE").SetValue("redis") + default: + log.Fatal("Unsupported indexer queue type: %v", + Indexer.IssueQueueType) + } + } + if _, ok := issueIndexerSectionMap["LENGTH"]; !ok { + section.Key("LENGTH").SetValue(fmt.Sprintf("%d", Indexer.UpdateQueueLength)) + } + if _, ok := issueIndexerSectionMap["BATCH_LENGTH"]; !ok { + section.Key("BATCH_LENGTH").SetValue(fmt.Sprintf("%d", Indexer.IssueQueueBatchNumber)) + } + if _, ok := issueIndexerSectionMap["DATADIR"]; !ok { + section.Key("DATADIR").SetValue(Indexer.IssueQueueDir) + } + if _, ok := issueIndexerSectionMap["CONN_STR"]; !ok { + section.Key("CONN_STR").SetValue(Indexer.IssueQueueConnStr) + } } // ParseQueueConnStr parses a queue connection string From caa439282a17cee33cb724ae8d09138b3a310cd3 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Mon, 16 Dec 2019 22:05:31 +0000 Subject: [PATCH 11/21] Fix test --- modules/indexer/issues/indexer_test.go | 3 +++ modules/queue/queue_test.go | 9 +++++---- modules/setting/queue.go | 4 +++- modules/setting/setting.go | 2 +- 4 files changed, 12 insertions(+), 6 deletions(-) diff --git a/modules/indexer/issues/indexer_test.go b/modules/indexer/issues/indexer_test.go index a45fede9ac073..379f1c58905e9 100644 --- a/modules/indexer/issues/indexer_test.go +++ b/modules/indexer/issues/indexer_test.go @@ -12,6 +12,7 @@ import ( "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/setting" + "gopkg.in/ini.v1" "github.com/stretchr/testify/assert" ) @@ -22,10 +23,12 @@ func TestMain(m *testing.M) { func TestBleveSearchIssues(t *testing.T) { assert.NoError(t, models.PrepareTestDatabase()) + setting.Cfg = ini.Empty() os.RemoveAll(setting.Indexer.IssueQueueDir) os.RemoveAll(setting.Indexer.IssuePath) setting.Indexer.IssueType = "bleve" + setting.NewQueueService() InitIssueIndexer(true) time.Sleep(5 * time.Second) diff --git a/modules/queue/queue_test.go b/modules/queue/queue_test.go index e41643da211c6..3608f68d3d424 100644 --- a/modules/queue/queue_test.go +++ b/modules/queue/queue_test.go @@ -4,11 +4,12 @@ package queue -import "testing" +import ( + "encoding/json" + "testing" -import "github.com/stretchr/testify/assert" - -import "encoding/json" + "github.com/stretchr/testify/assert" +) type testData struct { TestString string diff --git a/modules/setting/queue.go b/modules/setting/queue.go index 08f6eaf3ee57a..5cbee851c64e5 100644 --- a/modules/setting/queue.go +++ b/modules/setting/queue.go @@ -115,7 +115,9 @@ func getQueueSettings(name string) queueSettings { return q } -func newQueueService() { +// NewQueueService sets up the default settings for Queues +// This is exported for tests to be able to use the queue +func NewQueueService() { sec := Cfg.Section("queue") Queue.DataDir = sec.Key("DATADIR").MustString("queues/") if !path.IsAbs(Queue.DataDir) { diff --git a/modules/setting/setting.go b/modules/setting/setting.go index a7a916e9c2eac..76609990892b2 100644 --- a/modules/setting/setting.go +++ b/modules/setting/setting.go @@ -1090,5 +1090,5 @@ func NewServices() { newMigrationsService() newIndexerService() newTaskService() - newQueueService() + NewQueueService() } From ba1698af6e973ccaf01b14ce792c6095e1351f39 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Tue, 17 Dec 2019 19:44:37 +0000 Subject: [PATCH 12/21] Queue: Allow Redis to connect to unix --- modules/queue/queue_redis.go | 2 ++ modules/setting/queue.go | 10 +++++++--- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/modules/queue/queue_redis.go b/modules/queue/queue_redis.go index 4f2ceec029f0f..724e22b7b5d54 100644 --- a/modules/queue/queue_redis.go +++ b/modules/queue/queue_redis.go @@ -41,6 +41,7 @@ type RedisQueue struct { // RedisQueueConfiguration is the configuration for the redis queue type RedisQueueConfiguration struct { + Network string Addresses string Password string DBIndex int @@ -88,6 +89,7 @@ func NewRedisQueue(handle HandlerFunc, cfg, exemplar interface{}) (Queue, error) return nil, errors.New("no redis host found") } else if len(dbs) == 1 { queue.client = redis.NewClient(&redis.Options{ + Network: config.Network, Addr: strings.TrimSpace(dbs[0]), // use default Addr Password: config.Password, // no password set DB: config.DBIndex, // use default DB diff --git a/modules/setting/queue.go b/modules/setting/queue.go index 5cbee851c64e5..778ddeb217f36 100644 --- a/modules/setting/queue.go +++ b/modules/setting/queue.go @@ -22,6 +22,7 @@ type queueSettings struct { BatchLength int ConnectionString string Type string + Network string Addresses string Password string QueueName string @@ -47,6 +48,7 @@ func CreateQueue(name string, handle queue.HandlerFunc, exemplar interface{}) qu opts["BatchLength"] = q.BatchLength opts["DataDir"] = q.DataDir opts["Addresses"] = q.Addresses + opts["Network"] = q.Network opts["Password"] = q.Password opts["DBIndex"] = q.DBIndex opts["QueueName"] = q.QueueName @@ -111,7 +113,7 @@ func getQueueSettings(name string) queueSettings { q.BoostWorkers = sec.Key("BOOST_WORKERS").MustInt(Queue.BoostWorkers) q.QueueName = sec.Key("QUEUE_NAME").MustString(Queue.QueueName) - q.Addresses, q.Password, q.DBIndex, _ = ParseQueueConnStr(q.ConnectionString) + q.Network, q.Addresses, q.Password, q.DBIndex, _ = ParseQueueConnStr(q.ConnectionString) return q } @@ -128,7 +130,7 @@ func NewQueueService() { Queue.ConnectionString = sec.Key("CONN_STR").MustString(path.Join(AppDataPath, "")) validTypes := queue.RegisteredTypesAsString() Queue.Type = sec.Key("TYPE").In(string(queue.PersistableChannelQueueType), validTypes) - Queue.Addresses, Queue.Password, Queue.DBIndex, _ = ParseQueueConnStr(Queue.ConnectionString) + Queue.Network, Queue.Addresses, Queue.Password, Queue.DBIndex, _ = ParseQueueConnStr(Queue.ConnectionString) Queue.WrapIfNecessary = sec.Key("WRAP_IF_NECESSARY").MustBool(true) Queue.MaxAttempts = sec.Key("MAX_ATTEMPTS").MustInt(10) Queue.Timeout = sec.Key("TIMEOUT").MustDuration(GracefulHammerTime + 30*time.Second) @@ -183,7 +185,7 @@ func NewQueueService() { } // ParseQueueConnStr parses a queue connection string -func ParseQueueConnStr(connStr string) (addrs, password string, dbIdx int, err error) { +func ParseQueueConnStr(connStr string) (network, addrs, password string, dbIdx int, err error) { fields := strings.Fields(connStr) for _, f := range fields { items := strings.SplitN(f, "=", 2) @@ -191,6 +193,8 @@ func ParseQueueConnStr(connStr string) (addrs, password string, dbIdx int, err e continue } switch strings.ToLower(items[0]) { + case "network": + network = items[1] case "addrs": addrs = items[1] case "password": From f7516936dc6aa9919d41811da33edfe1ff2dd9a8 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Mon, 23 Dec 2019 10:29:36 +0000 Subject: [PATCH 13/21] Prevent deadlock during early shutdown of issue indexer --- modules/indexer/issues/indexer.go | 45 +++++++++++++++++++++++++------ 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go index 8f0593acfff10..8676561cf134c 100644 --- a/modules/indexer/issues/indexer.go +++ b/modules/indexer/issues/indexer.go @@ -6,6 +6,7 @@ package issues import ( "context" + "fmt" "os" "sync" "time" @@ -51,9 +52,10 @@ type Indexer interface { } type indexerHolder struct { - indexer Indexer - mutex sync.RWMutex - cond *sync.Cond + indexer Indexer + mutex sync.RWMutex + cond *sync.Cond + cancelled bool } func newIndexerHolder() *indexerHolder { @@ -62,6 +64,13 @@ func newIndexerHolder() *indexerHolder { return h } +func (h *indexerHolder) cancel() { + h.mutex.Lock() + defer h.mutex.Unlock() + h.cancelled = true + h.cond.Broadcast() +} + func (h *indexerHolder) set(indexer Indexer) { h.mutex.Lock() defer h.mutex.Unlock() @@ -72,7 +81,7 @@ func (h *indexerHolder) set(indexer Indexer) { func (h *indexerHolder) get() Indexer { h.mutex.RLock() defer h.mutex.RUnlock() - if h.indexer == nil { + if h.indexer == nil && !h.cancelled { h.cond.Wait() } return h.indexer @@ -93,6 +102,12 @@ func InitIssueIndexer(syncReindex bool) { switch setting.Indexer.IssueType { case "bleve": handler := func(data ...queue.Data) { + indexer := holder.get() + if indexer == nil { + log.Error("Unable to get indexer!") + return + } + iData := make([]*IndexerData, 0, setting.Indexer.IssueQueueBatchNumber) for _, datum := range data { indexerData, ok := datum.(*IndexerData) @@ -102,12 +117,12 @@ func InitIssueIndexer(syncReindex bool) { } log.Trace("IndexerData Process: %d %v %t", indexerData.ID, indexerData.IDs, indexerData.IsDelete) if indexerData.IsDelete { - _ = holder.get().Delete(indexerData.IDs...) + _ = indexer.Delete(indexerData.IDs...) continue } iData = append(iData, indexerData) } - if err := holder.get().Index(iData); err != nil { + if err := indexer.Index(iData); err != nil { log.Error("Error whilst indexing: %v Error: %v", iData, err) } } @@ -132,6 +147,7 @@ func InitIssueIndexer(syncReindex bool) { issueIndexer := NewBleveIndexer(setting.Indexer.IssuePath) exist, err := issueIndexer.Init() if err != nil { + holder.cancel() log.Fatal("Unable to initialize Bleve Issue Indexer: %v", err) } populate = !exist @@ -153,6 +169,7 @@ func InitIssueIndexer(syncReindex bool) { issueIndexer := &DBIndexer{} holder.set(issueIndexer) default: + holder.cancel() log.Fatal("Unknown issue indexer type: %s", setting.Indexer.IssueType) } @@ -168,10 +185,14 @@ func InitIssueIndexer(syncReindex bool) { } } waitChannel <- time.Since(start) + close(waitChannel) }() if syncReindex { - <-waitChannel + select { + case <-waitChannel: + case <-graceful.GetManager().IsShutdown(): + } } else if setting.Indexer.StartupTimeout > 0 { go func() { timeout := setting.Indexer.StartupTimeout @@ -181,6 +202,8 @@ func InitIssueIndexer(syncReindex bool) { select { case duration := <-waitChannel: log.Info("Issue Indexer Initialization took %v", duration) + case <-graceful.GetManager().IsShutdown(): + log.Warn("Shutdown occurred before issue index initialisation was complete") case <-time.After(timeout): if shutdownable, ok := issueIndexerQueue.(queue.Shutdownable); ok { shutdownable.Terminate() @@ -293,7 +316,13 @@ func DeleteRepoIssueIndexer(repo *models.Repository) { // SearchIssuesByKeyword search issue ids by keywords and repo id func SearchIssuesByKeyword(repoIDs []int64, keyword string) ([]int64, error) { var issueIDs []int64 - res, err := holder.get().Search(keyword, repoIDs, 1000, 0) + indexer := holder.get() + + if indexer == nil { + log.Error("Unable to get indexer!") + return nil, fmt.Errorf("unable to get issue indexer") + } + res, err := indexer.Search(keyword, repoIDs, 1000, 0) if err != nil { return nil, err } From 9ab1c7671a30241ba1bd48623f211de8f633363a Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Sat, 7 Dec 2019 16:36:10 +0000 Subject: [PATCH 14/21] TODO: These are the places with remaining graceful queries --- modules/migrations/migrate.go | 1 + modules/notification/notification.go | 1 + services/mailer/mailer.go | 2 ++ services/mirror/mirror.go | 1 + 4 files changed, 5 insertions(+) diff --git a/modules/migrations/migrate.go b/modules/migrations/migrate.go index fb143f7e29e94..bc9adf4465b07 100644 --- a/modules/migrations/migrate.go +++ b/modules/migrations/migrate.go @@ -29,6 +29,7 @@ func RegisterDownloaderFactory(factory base.DownloaderFactory) { } // MigrateRepository migrate repository according MigrateOptions +// FIXME: graceful: ctx may need to be checked more often func MigrateRepository(ctx context.Context, doer *models.User, ownerName string, opts base.MigrateOptions) (*models.Repository, error) { var ( downloader base.Downloader diff --git a/modules/notification/notification.go b/modules/notification/notification.go index f567552df557b..a5b09d97f87eb 100644 --- a/modules/notification/notification.go +++ b/modules/notification/notification.go @@ -21,6 +21,7 @@ var ( ) // RegisterNotifier providers method to receive notify messages +// FIXME: graceful: This may need to become a queue func RegisterNotifier(notifier base.Notifier) { go notifier.Run() notifiers = append(notifiers, notifier) diff --git a/services/mailer/mailer.go b/services/mailer/mailer.go index 2e4aa8d71b1c9..3b96c3ee63a7d 100644 --- a/services/mailer/mailer.go +++ b/services/mailer/mailer.go @@ -292,6 +292,8 @@ func NewContext() { } mailQueue = make(chan *Message, setting.MailService.QueueLength) + + // FIXME: graceful: Needs to become a queue and graceful go processMailQueue() } diff --git a/services/mirror/mirror.go b/services/mirror/mirror.go index 7fc6e97b463a1..6ee0e8d5b4731 100644 --- a/services/mirror/mirror.go +++ b/services/mirror/mirror.go @@ -427,6 +427,7 @@ func syncMirror(repoID string) { } // InitSyncMirrors initializes a go routine to sync the mirrors +// FIXME: graceful: Needs to be a proper queue and graceful func InitSyncMirrors() { go graceful.GetManager().RunWithShutdownContext(SyncMirrors) } From 811d549b36bacfc7ddb9fc1c3426ce0c6343a4f4 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Tue, 26 Nov 2019 20:56:21 +0000 Subject: [PATCH 15/21] Mailer: Make a queue --- modules/setting/queue.go | 11 +++++++++++ services/mailer/mailer.go | 34 +++++++++++++++++----------------- 2 files changed, 28 insertions(+), 17 deletions(-) diff --git a/modules/setting/queue.go b/modules/setting/queue.go index 778ddeb217f36..9053cfb12eefc 100644 --- a/modules/setting/queue.go +++ b/modules/setting/queue.go @@ -182,6 +182,17 @@ func NewQueueService() { if _, ok := issueIndexerSectionMap["CONN_STR"]; !ok { section.Key("CONN_STR").SetValue(Indexer.IssueQueueConnStr) } + + hasLength := false + for _, key := range Cfg.Section("queue.mail").Keys() { + if key.Name() == "LENGTH" { + hasLength = true + break + } + } + if !hasLength { + Cfg.Section("queue.mail").Key("LENGTH").SetValue(fmt.Sprintf("%d", Cfg.Section("mailer").Key("SEND_BUFFER_LEN").MustInt(100))) + } } // ParseQueueConnStr parses a queue connection string diff --git a/services/mailer/mailer.go b/services/mailer/mailer.go index 3b96c3ee63a7d..25ad87ab25201 100644 --- a/services/mailer/mailer.go +++ b/services/mailer/mailer.go @@ -18,7 +18,9 @@ import ( "time" "code.gitea.io/gitea/modules/base" + "code.gitea.io/gitea/modules/graceful" "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/queue" "code.gitea.io/gitea/modules/setting" "github.com/jaytaylor/html2text" @@ -257,18 +259,7 @@ func (s *dummySender) Send(from string, to []string, msg io.WriterTo) error { return nil } -func processMailQueue() { - for msg := range mailQueue { - log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info) - if err := gomail.Send(Sender, msg.Message); err != nil { - log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err) - } else { - log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info) - } - } -} - -var mailQueue chan *Message +var mailQueue queue.Queue // Sender sender for sending mail synchronously var Sender gomail.Sender @@ -291,16 +282,25 @@ func NewContext() { Sender = &dummySender{} } - mailQueue = make(chan *Message, setting.MailService.QueueLength) + mailQueue = setting.CreateQueue("mail", func(data ...queue.Data) { + for _, datum := range data { + msg := datum.(*Message) + log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info) + if err := gomail.Send(Sender, msg.Message); err != nil { + log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err) + } else { + log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info) + } + } + }, &Message{}) - // FIXME: graceful: Needs to become a queue and graceful - go processMailQueue() + go graceful.GetManager().RunWithShutdownFns(mailQueue.Run) } // SendAsync send mail asynchronously func SendAsync(msg *Message) { go func() { - mailQueue <- msg + _ = mailQueue.Push(msg) }() } @@ -308,7 +308,7 @@ func SendAsync(msg *Message) { func SendAsyncs(msgs []*Message) { go func() { for _, msg := range msgs { - mailQueue <- msg + _ = mailQueue.Push(msg) } }() } From 34609c9bb54bda51a57b109af4cd793a91cba98f Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Wed, 20 Nov 2019 14:16:37 +0000 Subject: [PATCH 16/21] Notification: queue ui.go notification-service --- modules/notification/ui/ui.go | 42 ++++++++++++++++++++++------------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/modules/notification/ui/ui.go b/modules/notification/ui/ui.go index f58ebce6d7a69..1f28d37b98a65 100644 --- a/modules/notification/ui/ui.go +++ b/modules/notification/ui/ui.go @@ -7,14 +7,17 @@ package ui import ( "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/git" + "code.gitea.io/gitea/modules/graceful" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/notification/base" + "code.gitea.io/gitea/modules/queue" + "code.gitea.io/gitea/modules/setting" ) type ( notificationService struct { base.NullNotifier - issueQueue chan issueNotificationOpts + issueQueue queue.Queue } issueNotificationOpts struct { @@ -30,19 +33,24 @@ var ( // NewNotifier create a new notificationService notifier func NewNotifier() base.Notifier { - return ¬ificationService{ - issueQueue: make(chan issueNotificationOpts, 100), - } + ns := ¬ificationService{} + ns.issueQueue = setting.CreateQueue("notification-service", ns.handle, issueNotificationOpts{}) + return ns } -func (ns *notificationService) Run() { - for opts := range ns.issueQueue { +func (ns *notificationService) handle(data ...queue.Data) { + for _, datum := range data { + opts := datum.(issueNotificationOpts) if err := models.CreateOrUpdateIssueNotifications(opts.issueID, opts.commentID, opts.notificationAuthorID); err != nil { log.Error("Was unable to create issue notification: %v", err) } } } +func (ns *notificationService) Run() { + graceful.GetManager().RunWithShutdownFns(ns.issueQueue.Run) +} + func (ns *notificationService) NotifyCreateIssueComment(doer *models.User, repo *models.Repository, issue *models.Issue, comment *models.Comment) { var opts = issueNotificationOpts{ @@ -52,35 +60,39 @@ func (ns *notificationService) NotifyCreateIssueComment(doer *models.User, repo if comment != nil { opts.commentID = comment.ID } - ns.issueQueue <- opts + _ = ns.issueQueue.Push(opts) } func (ns *notificationService) NotifyNewIssue(issue *models.Issue) { - ns.issueQueue <- issueNotificationOpts{ + _ = ns.issueQueue.Push(issueNotificationOpts{ issueID: issue.ID, notificationAuthorID: issue.Poster.ID, - } + }) } func (ns *notificationService) NotifyIssueChangeStatus(doer *models.User, issue *models.Issue, actionComment *models.Comment, isClosed bool) { ns.issueQueue <- issueNotificationOpts{ issueID: issue.ID, notificationAuthorID: doer.ID, - } + }) } func (ns *notificationService) NotifyMergePullRequest(pr *models.PullRequest, doer *models.User, gitRepo *git.Repository) { - ns.issueQueue <- issueNotificationOpts{ + _ = ns.issueQueue.Push(issueNotificationOpts{ issueID: pr.Issue.ID, notificationAuthorID: doer.ID, - } + }) } func (ns *notificationService) NotifyNewPullRequest(pr *models.PullRequest) { - ns.issueQueue <- issueNotificationOpts{ + if err := pr.LoadIssue(); err != nil { + log.Error("Unable to load issue: %d for pr: %d: Error: %v", pr.IssueID, pr.ID, err) + return + } + _ = ns.issueQueue.Push(issueNotificationOpts{ issueID: pr.Issue.ID, notificationAuthorID: pr.Issue.PosterID, - } + }) } func (ns *notificationService) NotifyPullRequestReview(pr *models.PullRequest, r *models.Review, c *models.Comment) { @@ -91,5 +103,5 @@ func (ns *notificationService) NotifyPullRequestReview(pr *models.PullRequest, r if c != nil { opts.commentID = c.ID } - ns.issueQueue <- opts + _ = ns.issueQueue.Push(opts) } From d508f4ed7c98c222255c1a2cc8e5412462fa25b9 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Wed, 20 Nov 2019 20:02:48 +0000 Subject: [PATCH 17/21] Notification: move to use a queue --- integrations/integration_test.go | 1 + integrations/notification_helper_test.go | 120 ++ integrations/pull_merge_test.go | 98 +- integrations/sqlite.ini | 7 + models/issue.go | 2 +- models/pull.go | 3 + models/repo_unit.go | 53 + models/repo_watch.go | 1 + models/user.go | 12 +- modules/notification/base/main.go | 247 +++ modules/notification/base/notifier.go | 2 + modules/notification/base/null.go | 146 +- modules/notification/base/queue.go | 1748 ++++++++++++++++++++++ modules/notification/indexer/indexer.go | 4 + modules/notification/mail/mail.go | 12 + modules/notification/notification.go | 23 +- modules/notification/ui/ui.go | 6 +- modules/setting/queue.go | 1 - routers/api/v1/repo/issue_comment.go | 2 + 19 files changed, 2354 insertions(+), 134 deletions(-) create mode 100644 integrations/notification_helper_test.go create mode 100644 modules/notification/base/main.go create mode 100644 modules/notification/base/queue.go diff --git a/integrations/integration_test.go b/integrations/integration_test.go index bf363f3b4ddc3..98759675b2202 100644 --- a/integrations/integration_test.go +++ b/integrations/integration_test.go @@ -178,6 +178,7 @@ func initIntegrationTest() { defer db.Close() } routers.GlobalInit(graceful.GetManager().HammerContext()) + NotifierListenerInit() } func prepareTestEnv(t testing.TB, skip ...int) func() { diff --git a/integrations/notification_helper_test.go b/integrations/notification_helper_test.go new file mode 100644 index 0000000000000..1f7e0819439c5 --- /dev/null +++ b/integrations/notification_helper_test.go @@ -0,0 +1,120 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package integrations + +import ( + "encoding/json" + "reflect" + "sync" + "testing" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/notification" + "code.gitea.io/gitea/modules/notification/base" + "code.gitea.io/gitea/modules/queue" +) + +var notifierListener *NotifierListener + +var once = sync.Once{} + +type NotifierListener struct { + lock sync.RWMutex + callbacks map[string][]*func(string, [][]byte) + notifier base.Notifier +} + +func NotifierListenerInit() { + once.Do(func() { + notifierListener = &NotifierListener{ + callbacks: map[string][]*func(string, [][]byte){}, + } + notifierListener.notifier = base.NewQueueNotifierWithHandle("test-notifier", notifierListener.handle) + notification.RegisterNotifier(notifierListener.notifier) + }) +} + +// Register will register a callback with the provided notifier function +func (n *NotifierListener) Register(functionName string, callback *func(string, [][]byte)) { + n.lock.Lock() + n.callbacks[functionName] = append(n.callbacks[functionName], callback) + n.lock.Unlock() +} + +// Deregister will remove the provided callback from the provided notifier function +func (n *NotifierListener) Deregister(functionName string, callback *func(string, [][]byte)) { + n.lock.Lock() + found := -1 + for i, callbackPtr := range n.callbacks[functionName] { + if callbackPtr == callback { + found = i + break + } + } + if found > -1 { + n.callbacks[functionName] = append(n.callbacks[functionName][0:found], n.callbacks[functionName][found+1:]...) + } + n.lock.Unlock() +} + +// RegisterChannel will register a provided channel with function name and return a function to deregister it +func (n *NotifierListener) RegisterChannel(name string, channel chan<- interface{}, argNumber int, exemplar interface{}) (deregister func()) { + t := reflect.TypeOf(exemplar) + callback := func(_ string, args [][]byte) { + n := reflect.New(t).Elem() + err := json.Unmarshal(args[argNumber], n.Addr().Interface()) + if err != nil { + log.Error("Wrong Argument passed to register channel: %v ", err) + } + channel <- n.Interface() + } + n.Register(name, &callback) + + return func() { + n.Deregister(name, &callback) + } +} + +func (n *NotifierListener) handle(data ...queue.Data) { + n.lock.RLock() + defer n.lock.RUnlock() + for _, datum := range data { + call := datum.(*base.FunctionCall) + callbacks, ok := n.callbacks[call.Name] + if ok && len(callbacks) > 0 { + for _, callback := range callbacks { + (*callback)(call.Name, call.Args) + } + } + } +} + +func TestNotifierListener(t *testing.T) { + defer prepareTestEnv(t)() + + createPullNotified := make(chan interface{}, 10) + deregister := notifierListener.RegisterChannel("NotifyNewPullRequest", createPullNotified, 0, &models.PullRequest{}) + bs, _ := json.Marshal(&models.PullRequest{}) + notifierListener.handle(&base.FunctionCall{ + Name: "NotifyNewPullRequest", + Args: [][]byte{ + bs, + }, + }) + <-createPullNotified + + notifierListener.notifier.NotifyNewPullRequest(&models.PullRequest{}) + <-createPullNotified + + notification.NotifyNewPullRequest(&models.PullRequest{}) + <-createPullNotified + + deregister() + close(createPullNotified) + + notification.NotifyNewPullRequest(&models.PullRequest{}) + // would panic if not deregistered +} diff --git a/integrations/pull_merge_test.go b/integrations/pull_merge_test.go index 218f0e4da66ad..9a3e126b85994 100644 --- a/integrations/pull_merge_test.go +++ b/integrations/pull_merge_test.go @@ -61,9 +61,12 @@ func testPullCleanUp(t *testing.T, session *TestSession, user, repo, pullnum str func TestPullMerge(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - hookTasks, err := models.HookTasks(1, 1) //Retrieve previous hook number - assert.NoError(t, err) - hookTasksLenBefore := len(hookTasks) + mergePullNotified := make(chan interface{}, 10) + deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", mergePullNotified, 0, &models.PullRequest{}) + defer func() { + deferable() + close(mergePullNotified) + }() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") @@ -73,19 +76,44 @@ func TestPullMerge(t *testing.T) { elem := strings.Split(test.RedirectURL(resp), "/") assert.EqualValues(t, "pulls", elem[3]) + testPullMerge(t, session, elem[1], elem[2], elem[4], models.MergeStyleMerge) - hookTasks, err = models.HookTasks(1, 1) - assert.NoError(t, err) - assert.Len(t, hookTasks, hookTasksLenBefore+1) + var prInterface interface{} + select { + case prInterface = <-mergePullNotified: + case <-time.After(500 * time.Millisecond): + assert.Fail(t, "Took too long to notify!") + } + + pr := prInterface.(*models.PullRequest) + pr.LoadBaseRepo() + pr.LoadHeadRepo() + pr.BaseRepo.MustOwner() + pr.HeadRepo.MustOwner() + + assert.EqualValues(t, "user1", pr.HeadRepo.Owner.Name) + assert.EqualValues(t, "repo1", pr.HeadRepo.Name) + assert.EqualValues(t, "user2", pr.BaseRepo.Owner.Name) + assert.EqualValues(t, "repo1", pr.BaseRepo.Name) + + time.Sleep(100 * time.Millisecond) + select { + case prInterface = <-mergePullNotified: + assert.Fail(t, "Should only have one pull create notification: %v", prInterface) + default: + } }) } func TestPullRebase(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - hookTasks, err := models.HookTasks(1, 1) //Retrieve previous hook number - assert.NoError(t, err) - hookTasksLenBefore := len(hookTasks) + mergePullNotified := make(chan interface{}, 10) + deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", mergePullNotified, 0, &models.PullRequest{}) + defer func() { + deferable() + close(mergePullNotified) + }() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") @@ -96,21 +124,22 @@ func TestPullRebase(t *testing.T) { elem := strings.Split(test.RedirectURL(resp), "/") assert.EqualValues(t, "pulls", elem[3]) testPullMerge(t, session, elem[1], elem[2], elem[4], models.MergeStyleRebase) - - hookTasks, err = models.HookTasks(1, 1) - assert.NoError(t, err) - assert.Len(t, hookTasks, hookTasksLenBefore+1) + select { + case <-mergePullNotified: + case <-time.After(500 * time.Millisecond): + assert.Fail(t, "Took too long to notify!") + } }) } func TestPullRebaseMerge(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - defer prepareTestEnv(t)() - - hookTasks, err := models.HookTasks(1, 1) //Retrieve previous hook number - assert.NoError(t, err) - hookTasksLenBefore := len(hookTasks) - + mergePullNotified := make(chan interface{}, 10) + deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", mergePullNotified, 0, &models.PullRequest{}) + defer func() { + deferable() + close(mergePullNotified) + }() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") testEditFile(t, session, "user1", "repo1", "master", "README.md", "Hello, World (Edited)\n") @@ -121,19 +150,22 @@ func TestPullRebaseMerge(t *testing.T) { assert.EqualValues(t, "pulls", elem[3]) testPullMerge(t, session, elem[1], elem[2], elem[4], models.MergeStyleRebaseMerge) - hookTasks, err = models.HookTasks(1, 1) - assert.NoError(t, err) - assert.Len(t, hookTasks, hookTasksLenBefore+1) + select { + case <-mergePullNotified: + case <-time.After(500 * time.Millisecond): + assert.Fail(t, "Took too long to notify!") + } }) } func TestPullSquash(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - defer prepareTestEnv(t)() - - hookTasks, err := models.HookTasks(1, 1) //Retrieve previous hook number - assert.NoError(t, err) - hookTasksLenBefore := len(hookTasks) + mergePullNotified := make(chan interface{}, 10) + deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", mergePullNotified, 0, &models.PullRequest{}) + defer func() { + deferable() + close(mergePullNotified) + }() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") @@ -146,15 +178,16 @@ func TestPullSquash(t *testing.T) { assert.EqualValues(t, "pulls", elem[3]) testPullMerge(t, session, elem[1], elem[2], elem[4], models.MergeStyleSquash) - hookTasks, err = models.HookTasks(1, 1) - assert.NoError(t, err) - assert.Len(t, hookTasks, hookTasksLenBefore+1) + select { + case <-mergePullNotified: + case <-time.After(500 * time.Millisecond): + assert.Fail(t, "Took too long to notify!") + } }) } func TestPullCleanUpAfterMerge(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - defer prepareTestEnv(t)() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") testEditFileToNewBranch(t, session, "user1", "repo1", "master", "feature/test", "README.md", "Hello, World (Edited)\n") @@ -190,7 +223,6 @@ func TestPullCleanUpAfterMerge(t *testing.T) { func TestCantMergeWorkInProgress(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - defer prepareTestEnv(t)() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") testEditFile(t, session, "user1", "repo1", "master", "README.md", "Hello, World (Edited)\n") @@ -212,7 +244,6 @@ func TestCantMergeWorkInProgress(t *testing.T) { func TestCantMergeConflict(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - defer prepareTestEnv(t)() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") testEditFileToNewBranch(t, session, "user1", "repo1", "master", "conflict", "README.md", "Hello, World (Edited Once)\n") @@ -258,7 +289,6 @@ func TestCantMergeConflict(t *testing.T) { func TestCantMergeUnrelated(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - defer prepareTestEnv(t)() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") testEditFileToNewBranch(t, session, "user1", "repo1", "master", "base", "README.md", "Hello, World (Edited Twice)\n") diff --git a/integrations/sqlite.ini b/integrations/sqlite.ini index de3355c166b03..b27b123957d3e 100644 --- a/integrations/sqlite.ini +++ b/integrations/sqlite.ini @@ -81,3 +81,10 @@ INTERNAL_TOKEN = eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYmYiOjE0OTI3OTU5ODN9.O [oauth2] JWT_SECRET = KZb_QLUd4fYVyxetjxC4eZkrBgWM2SndOOWDNtgUUko +[queue] +TYPE=channel + +[queue.test-notifier] +BATCH_LENGTH=1 +LENGTH=20 + diff --git a/models/issue.go b/models/issue.go index 75f7bd818aa6e..e7a31b62ae92c 100644 --- a/models/issue.go +++ b/models/issue.go @@ -47,7 +47,7 @@ type Issue struct { IsClosed bool `xorm:"INDEX"` IsRead bool `xorm:"-"` IsPull bool `xorm:"INDEX"` // Indicates whether is a pull request or not. - PullRequest *PullRequest `xorm:"-"` + PullRequest *PullRequest `xorm:"-" json:"-"` NumComments int Ref string diff --git a/models/pull.go b/models/pull.go index ba9c575775c05..89bd1825521c4 100644 --- a/models/pull.go +++ b/models/pull.go @@ -210,6 +210,9 @@ func (pr *PullRequest) apiFormat(e Engine) *api.PullRequest { log.Error("loadRepo[%d]: %v", pr.ID, err) return nil } + if pr.Issue.PullRequest == nil { + pr.Issue.PullRequest = pr + } apiIssue := pr.Issue.apiFormat(e) if pr.BaseRepo == nil { pr.BaseRepo, err = getRepositoryByID(e, pr.BaseRepoID) diff --git a/models/repo_unit.go b/models/repo_unit.go index a6162a65e516b..dbf198e6b31af 100644 --- a/models/repo_unit.go +++ b/models/repo_unit.go @@ -6,6 +6,8 @@ package models import ( "encoding/json" + "fmt" + "reflect" "code.gitea.io/gitea/modules/timeutil" @@ -169,6 +171,57 @@ func (r *RepoUnit) ExternalTrackerConfig() *ExternalTrackerConfig { return r.Config.(*ExternalTrackerConfig) } +// MarshalJSON implements json.Marshaler +func (r *RepoUnit) MarshalJSON() ([]byte, error) { + tmp := map[string]interface{}{} + tmp["ID"] = r.ID + tmp["RepoID"] = r.RepoID + var err error + tmp["Config"], err = r.Config.ToDB() + if err != nil { + return nil, err + } + tmp["CreatedUnix"] = r.CreatedUnix + bs, err := json.Marshal(tmp) + return bs, err +} + +// UnmarshalJSON implements json.UnMarshaler +func (r *RepoUnit) UnmarshalJSON(bs []byte) (err error) { + tmp := struct { + ID int64 + RepoID int64 + Type UnitType + Config []byte + CreatedUnix timeutil.TimeStamp + }{} + err = json.Unmarshal(bs, &tmp) + if err != nil { + return err + } + + r.ID = tmp.ID + r.RepoID = tmp.RepoID + r.Type = tmp.Type + if r.Type != 0 { + defer func() { + panicked := recover() + if panicked == nil { + return + } + // Panicing is not very nice... + err = fmt.Errorf("%v", panicked) + r.Config = new(UnitConfig) + }() + typeInt64 := int64(r.Type) + typeInterface := reflect.ValueOf(typeInt64).Interface() + r.BeforeSet("type", xorm.Cell(&typeInterface)) + return json.Unmarshal(tmp.Config, &(r.Config)) + } + r.Config = new(UnitConfig) + return nil +} + func getUnitsByRepoID(e Engine, repoID int64) (units []*RepoUnit, err error) { return units, e.Where("repo_id = ?", repoID).Find(&units) } diff --git a/models/repo_watch.go b/models/repo_watch.go index 7d421081a405f..f9e8ce2e75df0 100644 --- a/models/repo_watch.go +++ b/models/repo_watch.go @@ -172,6 +172,7 @@ func notifyWatchers(e Engine, act *Action) error { } // Add feed for actioner. + act.ID = 0 act.UserID = act.ActUserID if _, err = e.InsertOne(act); err != nil { return fmt.Errorf("insert new actioner: %v", err) diff --git a/models/user.go b/models/user.go index 0454158de6863..32ca0b95df6c2 100644 --- a/models/user.go +++ b/models/user.go @@ -110,9 +110,9 @@ type User struct { LoginSource int64 `xorm:"NOT NULL DEFAULT 0"` LoginName string Type UserType - OwnedOrgs []*User `xorm:"-"` - Orgs []*User `xorm:"-"` - Repos []*Repository `xorm:"-"` + OwnedOrgs []*User `xorm:"-" json:"-"` + Orgs []*User `xorm:"-" json:"-"` + Repos []*Repository `xorm:"-" json:"-"` Location string Website string Rands string `xorm:"VARCHAR(10)"` @@ -151,9 +151,9 @@ type User struct { // For organization NumTeams int NumMembers int - Teams []*Team `xorm:"-"` - Members UserList `xorm:"-"` - MembersIsPublic map[int64]bool `xorm:"-"` + Teams []*Team `xorm:"-" json:"-"` + Members UserList `xorm:"-" json:"-"` + MembersIsPublic map[int64]bool `xorm:"-" json:"-"` Visibility structs.VisibleType `xorm:"NOT NULL DEFAULT 0"` RepoAdminChangeTeamAccess bool `xorm:"NOT NULL DEFAULT false"` diff --git a/modules/notification/base/main.go b/modules/notification/base/main.go new file mode 100644 index 0000000000000..256566e7844fa --- /dev/null +++ b/modules/notification/base/main.go @@ -0,0 +1,247 @@ +// Copyright 2019 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import ( + "bytes" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/token" + "io/ioutil" + "strings" + "text/template" + "time" +) + +type funcDef struct { + Name string + Args []funcDefArg +} + +type funcDefArg struct { + Name string + Type string +} + +func main() { + fset := token.NewFileSet() // positions are relative to fset + f, err := parser.ParseFile(fset, "notifier.go", nil, 0) + if err != nil { + panic(err) + } + funcs := make([]funcDef, 0) + //currentFunc := funcDef{} + ast.Inspect(f, func(n ast.Node) bool { + spec, ok := n.(*ast.TypeSpec) + if !ok || spec.Name.Name != "Notifier" { + return true + } + child, ok := spec.Type.(*ast.InterfaceType) + if !ok { + return false + } + funcs = make([]funcDef, len(child.Methods.List)) + for i, method := range child.Methods.List { + methodFuncDef := method.Type.(*ast.FuncType) + def := funcDef{} + def.Name = method.Names[0].Name + def.Args = make([]funcDefArg, 0, len(methodFuncDef.Params.List)) + for j, param := range methodFuncDef.Params.List { + defaultName := fmt.Sprintf("unknown%d", j) + sb := strings.Builder{} + format.Node(&sb, fset, param.Type) + + if len(param.Names) == 0 { + def.Args = append(def.Args, funcDefArg{ + Name: defaultName, + Type: sb.String(), + }) + } else { + for _, ident := range param.Names { + def.Args = append(def.Args, funcDefArg{ + Name: ident.Name, + Type: sb.String(), + }) + } + } + } + funcs[i] = def + } + + return true + }) + + buf := bytes.Buffer{} + nullTemplate.Execute(&buf, struct { + Timestamp time.Time + Funcs []funcDef + }{ + Timestamp: time.Now(), + Funcs: funcs, + }) + + bs, err := format.Source(buf.Bytes()) + if err != nil { + panic(err) + } + + err = ioutil.WriteFile("null.go", bs, 0644) + if err != nil { + panic(err) + } + + buf = bytes.Buffer{} + queueTemplate.Execute(&buf, struct { + Timestamp time.Time + Funcs []funcDef + }{ + Timestamp: time.Now(), + Funcs: funcs, + }) + + bs, err = format.Source(buf.Bytes()) + if err != nil { + ioutil.WriteFile("queue.go", buf.Bytes(), 0644) + panic(err) + } + + err = ioutil.WriteFile("queue.go", bs, 0644) + if err != nil { + panic(err) + } + +} + +var queueTemplate = template.Must(template.New("").Parse(` +// Code generated by go generate; DO NOT EDIT. +package base + +import ( + "encoding/json" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/git" + "code.gitea.io/gitea/modules/graceful" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/queue" +) + +// FunctionCall represents is function call with json.Marshaled arguments +type FunctionCall struct { + Name string + Args [][]byte +} + +type QueueNotifier struct { + name string + notifiers []Notifier + internal queue.Queue +} + +var ( + _ Notifier = &QueueNotifier{} +) + +func NewQueueNotifier(name string, notifiers []Notifier) Notifier { + q := &QueueNotifier{ + name: name, + notifiers: notifiers, + } + q.internal = setting.CreateQueue(name, q.handle, &FunctionCall{}) + return q +} + +func NewQueueNotifierWithHandle(name string, handle queue.HandlerFunc) Notifier { + q := &QueueNotifier{ + name: name, + } + q.internal = setting.CreateQueue(name, handle, &FunctionCall{}) + return q +} + +func (q *QueueNotifier) handle(data ...queue.Data) { + for _, datum := range data { + call := datum.(*FunctionCall) + var err error + switch call.Name { + {{- range .Funcs }} + case "{{.Name}}": + {{$p := .Name}} + {{- range $i, $e := .Args }} + var {{$e.Name}} {{$e.Type}} + err = json.Unmarshal(call.Args[{{$i}}], &{{$e.Name}}) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[{{$i}}]), "{{$e.Type}}", "{{$p}}", err) + continue + } + {{- end }} + for _, notifier := range q.notifiers { + notifier.{{.Name}}({{- range $i, $e := .Args}}{{ if $i }}, {{ end }}{{$e.Name}}{{end}}) + } + {{- end }} + default: + log.Error("Unknown notifier function %s with %d arguments", call.Name, len(call.Args)) + } + } +} + +func (q *QueueNotifier) Run() { + for _, notifier := range q.notifiers { + go notifier.Run() + } + graceful.GetManager().RunWithShutdownFns(q.internal.Run) +} +{{- range .Funcs}} +{{if ne .Name "Run"}} + +// {{ .Name }} is a placeholder function +func (q *QueueNotifier) {{ .Name }}({{ range $i, $e := .Args }}{{ if $i }}, {{ end }}{{$e.Name}} {{$e.Type}}{{end}}) { + args := make([][]byte, 0) + var err error + var bs []byte + {{- range .Args }} + bs, err = json.Marshal(&{{.Name}}) + if err != nil { + log.Error("Unable to marshall {{.Name}}: %v", err) + return + } + args = append(args, bs) + {{- end }} + + q.internal.Push(&FunctionCall{ + Name: "{{.Name}}", + Args: args, + }) +} +{{end}} +{{- end }} +`)) + +var nullTemplate = template.Must(template.New("").Parse(`// Code generated by go generate; DO NOT EDIT. +package base + +import ( + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/git" +) + +// NullNotifier implements a blank notifier +type NullNotifier struct { +} + +var ( + _ Notifier = &NullNotifier{} +) +{{- range .Funcs}} + +// {{ .Name }} is a placeholder function +func (*NullNotifier) {{ .Name }}({{ range $i, $e := .Args }}{{ if $i }}, {{ end }}{{$e.Name}} {{$e.Type}}{{end}}) {} +{{- end }} +`)) diff --git a/modules/notification/base/notifier.go b/modules/notification/base/notifier.go index 48846b3446cea..c19b4fe5a720f 100644 --- a/modules/notification/base/notifier.go +++ b/modules/notification/base/notifier.go @@ -9,6 +9,8 @@ import ( "code.gitea.io/gitea/modules/git" ) +//go:generate go run -mod=vendor main.go + // Notifier defines an interface to notify receiver type Notifier interface { Run() diff --git a/modules/notification/base/null.go b/modules/notification/base/null.go index bea4e55277212..99fe789fe5dc4 100644 --- a/modules/notification/base/null.go +++ b/modules/notification/base/null.go @@ -1,7 +1,4 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// Use of this source code is governed by a MIT-style -// license that can be found in the LICENSE file. - +// Code generated by go generate; DO NOT EDIT. package base import ( @@ -17,132 +14,119 @@ var ( _ Notifier = &NullNotifier{} ) -// Run places a place holder function -func (*NullNotifier) Run() { -} +// Run is a placeholder function +func (*NullNotifier) Run() {} -// NotifyCreateIssueComment places a place holder function -func (*NullNotifier) NotifyCreateIssueComment(doer *models.User, repo *models.Repository, - issue *models.Issue, comment *models.Comment) { +// NotifyCreateRepository is a placeholder function +func (*NullNotifier) NotifyCreateRepository(doer *models.User, u *models.User, repo *models.Repository) { } -// NotifyNewIssue places a place holder function -func (*NullNotifier) NotifyNewIssue(issue *models.Issue) { +// NotifyMigrateRepository is a placeholder function +func (*NullNotifier) NotifyMigrateRepository(doer *models.User, u *models.User, repo *models.Repository) { } -// NotifyIssueChangeStatus places a place holder function -func (*NullNotifier) NotifyIssueChangeStatus(doer *models.User, issue *models.Issue, actionComment *models.Comment, isClosed bool) { -} +// NotifyDeleteRepository is a placeholder function +func (*NullNotifier) NotifyDeleteRepository(doer *models.User, repo *models.Repository) {} -// NotifyNewPullRequest places a place holder function -func (*NullNotifier) NotifyNewPullRequest(pr *models.PullRequest) { +// NotifyForkRepository is a placeholder function +func (*NullNotifier) NotifyForkRepository(doer *models.User, oldRepo *models.Repository, repo *models.Repository) { } -// NotifyPullRequestReview places a place holder function -func (*NullNotifier) NotifyPullRequestReview(pr *models.PullRequest, r *models.Review, comment *models.Comment) { +// NotifyRenameRepository is a placeholder function +func (*NullNotifier) NotifyRenameRepository(doer *models.User, repo *models.Repository, oldRepoName string) { } -// NotifyMergePullRequest places a place holder function -func (*NullNotifier) NotifyMergePullRequest(pr *models.PullRequest, doer *models.User, baseRepo *git.Repository) { +// NotifyTransferRepository is a placeholder function +func (*NullNotifier) NotifyTransferRepository(doer *models.User, repo *models.Repository, oldOwnerName string) { } -// NotifyPullRequestSynchronized places a place holder function -func (*NullNotifier) NotifyPullRequestSynchronized(doer *models.User, pr *models.PullRequest) { -} +// NotifyNewIssue is a placeholder function +func (*NullNotifier) NotifyNewIssue(unknown0 *models.Issue) {} -// NotifyPullRequestChangeTargetBranch places a place holder function -func (*NullNotifier) NotifyPullRequestChangeTargetBranch(doer *models.User, pr *models.PullRequest, oldBranch string) { +// NotifyIssueChangeStatus is a placeholder function +func (*NullNotifier) NotifyIssueChangeStatus(unknown0 *models.User, unknown1 *models.Issue, unknown2 *models.Comment, unknown3 bool) { } -// NotifyUpdateComment places a place holder function -func (*NullNotifier) NotifyUpdateComment(doer *models.User, c *models.Comment, oldContent string) { +// NotifyIssueChangeMilestone is a placeholder function +func (*NullNotifier) NotifyIssueChangeMilestone(doer *models.User, issue *models.Issue, oldMilestoneID int64) { } -// NotifyDeleteComment places a place holder function -func (*NullNotifier) NotifyDeleteComment(doer *models.User, c *models.Comment) { +// NotifyIssueChangeAssignee is a placeholder function +func (*NullNotifier) NotifyIssueChangeAssignee(doer *models.User, issue *models.Issue, assignee *models.User, removed bool, comment *models.Comment) { } -// NotifyNewRelease places a place holder function -func (*NullNotifier) NotifyNewRelease(rel *models.Release) { +// NotifyIssueChangeContent is a placeholder function +func (*NullNotifier) NotifyIssueChangeContent(doer *models.User, issue *models.Issue, oldContent string) { } -// NotifyUpdateRelease places a place holder function -func (*NullNotifier) NotifyUpdateRelease(doer *models.User, rel *models.Release) { -} +// NotifyIssueClearLabels is a placeholder function +func (*NullNotifier) NotifyIssueClearLabels(doer *models.User, issue *models.Issue) {} -// NotifyDeleteRelease places a place holder function -func (*NullNotifier) NotifyDeleteRelease(doer *models.User, rel *models.Release) { -} +// NotifyIssueChangeTitle is a placeholder function +func (*NullNotifier) NotifyIssueChangeTitle(doer *models.User, issue *models.Issue, oldTitle string) {} -// NotifyIssueChangeMilestone places a place holder function -func (*NullNotifier) NotifyIssueChangeMilestone(doer *models.User, issue *models.Issue, oldMilestoneID int64) { +// NotifyIssueChangeLabels is a placeholder function +func (*NullNotifier) NotifyIssueChangeLabels(doer *models.User, issue *models.Issue, addedLabels []*models.Label, removedLabels []*models.Label) { } -// NotifyIssueChangeContent places a place holder function -func (*NullNotifier) NotifyIssueChangeContent(doer *models.User, issue *models.Issue, oldContent string) { -} +// NotifyNewPullRequest is a placeholder function +func (*NullNotifier) NotifyNewPullRequest(unknown0 *models.PullRequest) {} -// NotifyIssueChangeAssignee places a place holder function -func (*NullNotifier) NotifyIssueChangeAssignee(doer *models.User, issue *models.Issue, assignee *models.User, removed bool, comment *models.Comment) { +// NotifyMergePullRequest is a placeholder function +func (*NullNotifier) NotifyMergePullRequest(unknown0 *models.PullRequest, unknown1 *models.User, unknown2 *git.Repository) { } -// NotifyIssueClearLabels places a place holder function -func (*NullNotifier) NotifyIssueClearLabels(doer *models.User, issue *models.Issue) { -} +// NotifyPullRequestSynchronized is a placeholder function +func (*NullNotifier) NotifyPullRequestSynchronized(doer *models.User, pr *models.PullRequest) {} -// NotifyIssueChangeTitle places a place holder function -func (*NullNotifier) NotifyIssueChangeTitle(doer *models.User, issue *models.Issue, oldTitle string) { +// NotifyPullRequestReview is a placeholder function +func (*NullNotifier) NotifyPullRequestReview(unknown0 *models.PullRequest, unknown1 *models.Review, unknown2 *models.Comment) { } -// NotifyIssueChangeLabels places a place holder function -func (*NullNotifier) NotifyIssueChangeLabels(doer *models.User, issue *models.Issue, - addedLabels []*models.Label, removedLabels []*models.Label) { +// NotifyPullRequestChangeTargetBranch is a placeholder function +func (*NullNotifier) NotifyPullRequestChangeTargetBranch(doer *models.User, pr *models.PullRequest, oldBranch string) { } -// NotifyCreateRepository places a place holder function -func (*NullNotifier) NotifyCreateRepository(doer *models.User, u *models.User, repo *models.Repository) { +// NotifyCreateIssueComment is a placeholder function +func (*NullNotifier) NotifyCreateIssueComment(unknown0 *models.User, unknown1 *models.Repository, unknown2 *models.Issue, unknown3 *models.Comment) { } -// NotifyDeleteRepository places a place holder function -func (*NullNotifier) NotifyDeleteRepository(doer *models.User, repo *models.Repository) { +// NotifyUpdateComment is a placeholder function +func (*NullNotifier) NotifyUpdateComment(unknown0 *models.User, unknown1 *models.Comment, unknown2 string) { } -// NotifyForkRepository places a place holder function -func (*NullNotifier) NotifyForkRepository(doer *models.User, oldRepo, repo *models.Repository) { -} +// NotifyDeleteComment is a placeholder function +func (*NullNotifier) NotifyDeleteComment(unknown0 *models.User, unknown1 *models.Comment) {} -// NotifyMigrateRepository places a place holder function -func (*NullNotifier) NotifyMigrateRepository(doer *models.User, u *models.User, repo *models.Repository) { -} +// NotifyNewRelease is a placeholder function +func (*NullNotifier) NotifyNewRelease(rel *models.Release) {} -// NotifyPushCommits notifies commits pushed to notifiers -func (*NullNotifier) NotifyPushCommits(pusher *models.User, repo *models.Repository, refName, oldCommitID, newCommitID string, commits *models.PushCommits) { -} +// NotifyUpdateRelease is a placeholder function +func (*NullNotifier) NotifyUpdateRelease(doer *models.User, rel *models.Release) {} -// NotifyCreateRef notifies branch or tag creation to notifiers -func (*NullNotifier) NotifyCreateRef(doer *models.User, repo *models.Repository, refType, refFullName string) { -} +// NotifyDeleteRelease is a placeholder function +func (*NullNotifier) NotifyDeleteRelease(doer *models.User, rel *models.Release) {} -// NotifyDeleteRef notifies branch or tag deleteion to notifiers -func (*NullNotifier) NotifyDeleteRef(doer *models.User, repo *models.Repository, refType, refFullName string) { +// NotifyPushCommits is a placeholder function +func (*NullNotifier) NotifyPushCommits(pusher *models.User, repo *models.Repository, refName string, oldCommitID string, newCommitID string, commits *models.PushCommits) { } -// NotifyRenameRepository places a place holder function -func (*NullNotifier) NotifyRenameRepository(doer *models.User, repo *models.Repository, oldRepoName string) { +// NotifyCreateRef is a placeholder function +func (*NullNotifier) NotifyCreateRef(doer *models.User, repo *models.Repository, refType string, refFullName string) { } -// NotifyTransferRepository places a place holder function -func (*NullNotifier) NotifyTransferRepository(doer *models.User, repo *models.Repository, oldOwnerName string) { +// NotifyDeleteRef is a placeholder function +func (*NullNotifier) NotifyDeleteRef(doer *models.User, repo *models.Repository, refType string, refFullName string) { } -// NotifySyncPushCommits places a place holder function -func (*NullNotifier) NotifySyncPushCommits(pusher *models.User, repo *models.Repository, refName, oldCommitID, newCommitID string, commits *models.PushCommits) { +// NotifySyncPushCommits is a placeholder function +func (*NullNotifier) NotifySyncPushCommits(pusher *models.User, repo *models.Repository, refName string, oldCommitID string, newCommitID string, commits *models.PushCommits) { } -// NotifySyncCreateRef places a place holder function -func (*NullNotifier) NotifySyncCreateRef(doer *models.User, repo *models.Repository, refType, refFullName string) { +// NotifySyncCreateRef is a placeholder function +func (*NullNotifier) NotifySyncCreateRef(doer *models.User, repo *models.Repository, refType string, refFullName string) { } -// NotifySyncDeleteRef places a place holder function -func (*NullNotifier) NotifySyncDeleteRef(doer *models.User, repo *models.Repository, refType, refFullName string) { +// NotifySyncDeleteRef is a placeholder function +func (*NullNotifier) NotifySyncDeleteRef(doer *models.User, repo *models.Repository, refType string, refFullName string) { } diff --git a/modules/notification/base/queue.go b/modules/notification/base/queue.go new file mode 100644 index 0000000000000..5c476b0838a6f --- /dev/null +++ b/modules/notification/base/queue.go @@ -0,0 +1,1748 @@ +// Code generated by go generate; DO NOT EDIT. +package base + +import ( + "encoding/json" + + "code.gitea.io/gitea/models" + "code.gitea.io/gitea/modules/git" + "code.gitea.io/gitea/modules/graceful" + "code.gitea.io/gitea/modules/log" + "code.gitea.io/gitea/modules/queue" + "code.gitea.io/gitea/modules/setting" +) + +// FunctionCall represents is function call with json.Marshaled arguments +type FunctionCall struct { + Name string + Args [][]byte +} + +type QueueNotifier struct { + name string + notifiers []Notifier + internal queue.Queue +} + +var ( + _ Notifier = &QueueNotifier{} +) + +func NewQueueNotifier(name string, notifiers []Notifier) Notifier { + q := &QueueNotifier{ + name: name, + notifiers: notifiers, + } + q.internal = setting.CreateQueue(name, q.handle, &FunctionCall{}) + return q +} + +func NewQueueNotifierWithHandle(name string, handle queue.HandlerFunc) Notifier { + q := &QueueNotifier{ + name: name, + } + q.internal = setting.CreateQueue(name, handle, &FunctionCall{}) + return q +} + +func (q *QueueNotifier) handle(data ...queue.Data) { + for _, datum := range data { + call := datum.(*FunctionCall) + var err error + switch call.Name { + case "Run": + + for _, notifier := range q.notifiers { + notifier.Run() + } + case "NotifyCreateRepository": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyCreateRepository", err) + continue + } + var u *models.User + err = json.Unmarshal(call.Args[1], &u) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.User", "NotifyCreateRepository", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[2], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "*models.Repository", "NotifyCreateRepository", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyCreateRepository(doer, u, repo) + } + case "NotifyMigrateRepository": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyMigrateRepository", err) + continue + } + var u *models.User + err = json.Unmarshal(call.Args[1], &u) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.User", "NotifyMigrateRepository", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[2], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "*models.Repository", "NotifyMigrateRepository", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyMigrateRepository(doer, u, repo) + } + case "NotifyDeleteRepository": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyDeleteRepository", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[1], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifyDeleteRepository", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyDeleteRepository(doer, repo) + } + case "NotifyForkRepository": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyForkRepository", err) + continue + } + var oldRepo *models.Repository + err = json.Unmarshal(call.Args[1], &oldRepo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifyForkRepository", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[2], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "*models.Repository", "NotifyForkRepository", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyForkRepository(doer, oldRepo, repo) + } + case "NotifyRenameRepository": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyRenameRepository", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[1], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifyRenameRepository", err) + continue + } + var oldRepoName string + err = json.Unmarshal(call.Args[2], &oldRepoName) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifyRenameRepository", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyRenameRepository(doer, repo, oldRepoName) + } + case "NotifyTransferRepository": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyTransferRepository", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[1], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifyTransferRepository", err) + continue + } + var oldOwnerName string + err = json.Unmarshal(call.Args[2], &oldOwnerName) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifyTransferRepository", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyTransferRepository(doer, repo, oldOwnerName) + } + case "NotifyNewIssue": + + var unknown0 *models.Issue + err = json.Unmarshal(call.Args[0], &unknown0) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.Issue", "NotifyNewIssue", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyNewIssue(unknown0) + } + case "NotifyIssueChangeStatus": + + var unknown0 *models.User + err = json.Unmarshal(call.Args[0], &unknown0) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyIssueChangeStatus", err) + continue + } + var unknown1 *models.Issue + err = json.Unmarshal(call.Args[1], &unknown1) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Issue", "NotifyIssueChangeStatus", err) + continue + } + var unknown2 *models.Comment + err = json.Unmarshal(call.Args[2], &unknown2) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "*models.Comment", "NotifyIssueChangeStatus", err) + continue + } + var unknown3 bool + err = json.Unmarshal(call.Args[3], &unknown3) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[3]), "bool", "NotifyIssueChangeStatus", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyIssueChangeStatus(unknown0, unknown1, unknown2, unknown3) + } + case "NotifyIssueChangeMilestone": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyIssueChangeMilestone", err) + continue + } + var issue *models.Issue + err = json.Unmarshal(call.Args[1], &issue) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Issue", "NotifyIssueChangeMilestone", err) + continue + } + var oldMilestoneID int64 + err = json.Unmarshal(call.Args[2], &oldMilestoneID) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "int64", "NotifyIssueChangeMilestone", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyIssueChangeMilestone(doer, issue, oldMilestoneID) + } + case "NotifyIssueChangeAssignee": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyIssueChangeAssignee", err) + continue + } + var issue *models.Issue + err = json.Unmarshal(call.Args[1], &issue) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Issue", "NotifyIssueChangeAssignee", err) + continue + } + var assignee *models.User + err = json.Unmarshal(call.Args[2], &assignee) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "*models.User", "NotifyIssueChangeAssignee", err) + continue + } + var removed bool + err = json.Unmarshal(call.Args[3], &removed) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[3]), "bool", "NotifyIssueChangeAssignee", err) + continue + } + var comment *models.Comment + err = json.Unmarshal(call.Args[4], &comment) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[4]), "*models.Comment", "NotifyIssueChangeAssignee", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyIssueChangeAssignee(doer, issue, assignee, removed, comment) + } + case "NotifyIssueChangeContent": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyIssueChangeContent", err) + continue + } + var issue *models.Issue + err = json.Unmarshal(call.Args[1], &issue) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Issue", "NotifyIssueChangeContent", err) + continue + } + var oldContent string + err = json.Unmarshal(call.Args[2], &oldContent) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifyIssueChangeContent", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyIssueChangeContent(doer, issue, oldContent) + } + case "NotifyIssueClearLabels": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyIssueClearLabels", err) + continue + } + var issue *models.Issue + err = json.Unmarshal(call.Args[1], &issue) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Issue", "NotifyIssueClearLabels", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyIssueClearLabels(doer, issue) + } + case "NotifyIssueChangeTitle": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyIssueChangeTitle", err) + continue + } + var issue *models.Issue + err = json.Unmarshal(call.Args[1], &issue) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Issue", "NotifyIssueChangeTitle", err) + continue + } + var oldTitle string + err = json.Unmarshal(call.Args[2], &oldTitle) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifyIssueChangeTitle", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyIssueChangeTitle(doer, issue, oldTitle) + } + case "NotifyIssueChangeLabels": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyIssueChangeLabels", err) + continue + } + var issue *models.Issue + err = json.Unmarshal(call.Args[1], &issue) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Issue", "NotifyIssueChangeLabels", err) + continue + } + var addedLabels []*models.Label + err = json.Unmarshal(call.Args[2], &addedLabels) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "[]*models.Label", "NotifyIssueChangeLabels", err) + continue + } + var removedLabels []*models.Label + err = json.Unmarshal(call.Args[3], &removedLabels) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[3]), "[]*models.Label", "NotifyIssueChangeLabels", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyIssueChangeLabels(doer, issue, addedLabels, removedLabels) + } + case "NotifyNewPullRequest": + + var unknown0 *models.PullRequest + err = json.Unmarshal(call.Args[0], &unknown0) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.PullRequest", "NotifyNewPullRequest", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyNewPullRequest(unknown0) + } + case "NotifyMergePullRequest": + + var unknown0 *models.PullRequest + err = json.Unmarshal(call.Args[0], &unknown0) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.PullRequest", "NotifyMergePullRequest", err) + continue + } + var unknown1 *models.User + err = json.Unmarshal(call.Args[1], &unknown1) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.User", "NotifyMergePullRequest", err) + continue + } + var unknown2 *git.Repository + err = json.Unmarshal(call.Args[2], &unknown2) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "*git.Repository", "NotifyMergePullRequest", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyMergePullRequest(unknown0, unknown1, unknown2) + } + case "NotifyPullRequestSynchronized": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyPullRequestSynchronized", err) + continue + } + var pr *models.PullRequest + err = json.Unmarshal(call.Args[1], &pr) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.PullRequest", "NotifyPullRequestSynchronized", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyPullRequestSynchronized(doer, pr) + } + case "NotifyPullRequestReview": + + var unknown0 *models.PullRequest + err = json.Unmarshal(call.Args[0], &unknown0) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.PullRequest", "NotifyPullRequestReview", err) + continue + } + var unknown1 *models.Review + err = json.Unmarshal(call.Args[1], &unknown1) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Review", "NotifyPullRequestReview", err) + continue + } + var unknown2 *models.Comment + err = json.Unmarshal(call.Args[2], &unknown2) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "*models.Comment", "NotifyPullRequestReview", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyPullRequestReview(unknown0, unknown1, unknown2) + } + case "NotifyPullRequestChangeTargetBranch": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyPullRequestChangeTargetBranch", err) + continue + } + var pr *models.PullRequest + err = json.Unmarshal(call.Args[1], &pr) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.PullRequest", "NotifyPullRequestChangeTargetBranch", err) + continue + } + var oldBranch string + err = json.Unmarshal(call.Args[2], &oldBranch) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifyPullRequestChangeTargetBranch", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyPullRequestChangeTargetBranch(doer, pr, oldBranch) + } + case "NotifyCreateIssueComment": + + var unknown0 *models.User + err = json.Unmarshal(call.Args[0], &unknown0) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyCreateIssueComment", err) + continue + } + var unknown1 *models.Repository + err = json.Unmarshal(call.Args[1], &unknown1) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifyCreateIssueComment", err) + continue + } + var unknown2 *models.Issue + err = json.Unmarshal(call.Args[2], &unknown2) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "*models.Issue", "NotifyCreateIssueComment", err) + continue + } + var unknown3 *models.Comment + err = json.Unmarshal(call.Args[3], &unknown3) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[3]), "*models.Comment", "NotifyCreateIssueComment", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyCreateIssueComment(unknown0, unknown1, unknown2, unknown3) + } + case "NotifyUpdateComment": + + var unknown0 *models.User + err = json.Unmarshal(call.Args[0], &unknown0) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyUpdateComment", err) + continue + } + var unknown1 *models.Comment + err = json.Unmarshal(call.Args[1], &unknown1) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Comment", "NotifyUpdateComment", err) + continue + } + var unknown2 string + err = json.Unmarshal(call.Args[2], &unknown2) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifyUpdateComment", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyUpdateComment(unknown0, unknown1, unknown2) + } + case "NotifyDeleteComment": + + var unknown0 *models.User + err = json.Unmarshal(call.Args[0], &unknown0) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyDeleteComment", err) + continue + } + var unknown1 *models.Comment + err = json.Unmarshal(call.Args[1], &unknown1) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Comment", "NotifyDeleteComment", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyDeleteComment(unknown0, unknown1) + } + case "NotifyNewRelease": + + var rel *models.Release + err = json.Unmarshal(call.Args[0], &rel) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.Release", "NotifyNewRelease", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyNewRelease(rel) + } + case "NotifyUpdateRelease": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyUpdateRelease", err) + continue + } + var rel *models.Release + err = json.Unmarshal(call.Args[1], &rel) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Release", "NotifyUpdateRelease", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyUpdateRelease(doer, rel) + } + case "NotifyDeleteRelease": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyDeleteRelease", err) + continue + } + var rel *models.Release + err = json.Unmarshal(call.Args[1], &rel) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Release", "NotifyDeleteRelease", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyDeleteRelease(doer, rel) + } + case "NotifyPushCommits": + + var pusher *models.User + err = json.Unmarshal(call.Args[0], &pusher) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyPushCommits", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[1], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifyPushCommits", err) + continue + } + var refName string + err = json.Unmarshal(call.Args[2], &refName) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifyPushCommits", err) + continue + } + var oldCommitID string + err = json.Unmarshal(call.Args[3], &oldCommitID) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[3]), "string", "NotifyPushCommits", err) + continue + } + var newCommitID string + err = json.Unmarshal(call.Args[4], &newCommitID) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[4]), "string", "NotifyPushCommits", err) + continue + } + var commits *models.PushCommits + err = json.Unmarshal(call.Args[5], &commits) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[5]), "*models.PushCommits", "NotifyPushCommits", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyPushCommits(pusher, repo, refName, oldCommitID, newCommitID, commits) + } + case "NotifyCreateRef": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyCreateRef", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[1], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifyCreateRef", err) + continue + } + var refType string + err = json.Unmarshal(call.Args[2], &refType) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifyCreateRef", err) + continue + } + var refFullName string + err = json.Unmarshal(call.Args[3], &refFullName) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[3]), "string", "NotifyCreateRef", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyCreateRef(doer, repo, refType, refFullName) + } + case "NotifyDeleteRef": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifyDeleteRef", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[1], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifyDeleteRef", err) + continue + } + var refType string + err = json.Unmarshal(call.Args[2], &refType) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifyDeleteRef", err) + continue + } + var refFullName string + err = json.Unmarshal(call.Args[3], &refFullName) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[3]), "string", "NotifyDeleteRef", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifyDeleteRef(doer, repo, refType, refFullName) + } + case "NotifySyncPushCommits": + + var pusher *models.User + err = json.Unmarshal(call.Args[0], &pusher) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifySyncPushCommits", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[1], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifySyncPushCommits", err) + continue + } + var refName string + err = json.Unmarshal(call.Args[2], &refName) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifySyncPushCommits", err) + continue + } + var oldCommitID string + err = json.Unmarshal(call.Args[3], &oldCommitID) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[3]), "string", "NotifySyncPushCommits", err) + continue + } + var newCommitID string + err = json.Unmarshal(call.Args[4], &newCommitID) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[4]), "string", "NotifySyncPushCommits", err) + continue + } + var commits *models.PushCommits + err = json.Unmarshal(call.Args[5], &commits) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[5]), "*models.PushCommits", "NotifySyncPushCommits", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifySyncPushCommits(pusher, repo, refName, oldCommitID, newCommitID, commits) + } + case "NotifySyncCreateRef": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifySyncCreateRef", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[1], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifySyncCreateRef", err) + continue + } + var refType string + err = json.Unmarshal(call.Args[2], &refType) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifySyncCreateRef", err) + continue + } + var refFullName string + err = json.Unmarshal(call.Args[3], &refFullName) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[3]), "string", "NotifySyncCreateRef", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifySyncCreateRef(doer, repo, refType, refFullName) + } + case "NotifySyncDeleteRef": + + var doer *models.User + err = json.Unmarshal(call.Args[0], &doer) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[0]), "*models.User", "NotifySyncDeleteRef", err) + continue + } + var repo *models.Repository + err = json.Unmarshal(call.Args[1], &repo) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[1]), "*models.Repository", "NotifySyncDeleteRef", err) + continue + } + var refType string + err = json.Unmarshal(call.Args[2], &refType) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[2]), "string", "NotifySyncDeleteRef", err) + continue + } + var refFullName string + err = json.Unmarshal(call.Args[3], &refFullName) + if err != nil { + log.Error("Unable to unmarshal %s to %s in call to %s: %v", string(call.Args[3]), "string", "NotifySyncDeleteRef", err) + continue + } + for _, notifier := range q.notifiers { + notifier.NotifySyncDeleteRef(doer, repo, refType, refFullName) + } + default: + log.Error("Unknown notifier function %s with %d arguments", call.Name, len(call.Args)) + } + } +} + +func (q *QueueNotifier) Run() { + for _, notifier := range q.notifiers { + go notifier.Run() + } + graceful.GetManager().RunWithShutdownFns(q.internal.Run) +} + +// NotifyCreateRepository is a placeholder function +func (q *QueueNotifier) NotifyCreateRepository(doer *models.User, u *models.User, repo *models.Repository) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&u) + if err != nil { + log.Error("Unable to marshall u: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyCreateRepository", + Args: args, + }) +} + +// NotifyMigrateRepository is a placeholder function +func (q *QueueNotifier) NotifyMigrateRepository(doer *models.User, u *models.User, repo *models.Repository) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&u) + if err != nil { + log.Error("Unable to marshall u: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyMigrateRepository", + Args: args, + }) +} + +// NotifyDeleteRepository is a placeholder function +func (q *QueueNotifier) NotifyDeleteRepository(doer *models.User, repo *models.Repository) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyDeleteRepository", + Args: args, + }) +} + +// NotifyForkRepository is a placeholder function +func (q *QueueNotifier) NotifyForkRepository(doer *models.User, oldRepo *models.Repository, repo *models.Repository) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&oldRepo) + if err != nil { + log.Error("Unable to marshall oldRepo: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyForkRepository", + Args: args, + }) +} + +// NotifyRenameRepository is a placeholder function +func (q *QueueNotifier) NotifyRenameRepository(doer *models.User, repo *models.Repository, oldRepoName string) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&oldRepoName) + if err != nil { + log.Error("Unable to marshall oldRepoName: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyRenameRepository", + Args: args, + }) +} + +// NotifyTransferRepository is a placeholder function +func (q *QueueNotifier) NotifyTransferRepository(doer *models.User, repo *models.Repository, oldOwnerName string) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&oldOwnerName) + if err != nil { + log.Error("Unable to marshall oldOwnerName: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyTransferRepository", + Args: args, + }) +} + +// NotifyNewIssue is a placeholder function +func (q *QueueNotifier) NotifyNewIssue(unknown0 *models.Issue) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&unknown0) + if err != nil { + log.Error("Unable to marshall unknown0: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyNewIssue", + Args: args, + }) +} + +// NotifyIssueChangeStatus is a placeholder function +func (q *QueueNotifier) NotifyIssueChangeStatus(unknown0 *models.User, unknown1 *models.Issue, unknown2 *models.Comment, unknown3 bool) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&unknown0) + if err != nil { + log.Error("Unable to marshall unknown0: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown1) + if err != nil { + log.Error("Unable to marshall unknown1: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown2) + if err != nil { + log.Error("Unable to marshall unknown2: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown3) + if err != nil { + log.Error("Unable to marshall unknown3: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyIssueChangeStatus", + Args: args, + }) +} + +// NotifyIssueChangeMilestone is a placeholder function +func (q *QueueNotifier) NotifyIssueChangeMilestone(doer *models.User, issue *models.Issue, oldMilestoneID int64) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&issue) + if err != nil { + log.Error("Unable to marshall issue: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&oldMilestoneID) + if err != nil { + log.Error("Unable to marshall oldMilestoneID: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyIssueChangeMilestone", + Args: args, + }) +} + +// NotifyIssueChangeAssignee is a placeholder function +func (q *QueueNotifier) NotifyIssueChangeAssignee(doer *models.User, issue *models.Issue, assignee *models.User, removed bool, comment *models.Comment) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&issue) + if err != nil { + log.Error("Unable to marshall issue: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&assignee) + if err != nil { + log.Error("Unable to marshall assignee: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&removed) + if err != nil { + log.Error("Unable to marshall removed: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&comment) + if err != nil { + log.Error("Unable to marshall comment: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyIssueChangeAssignee", + Args: args, + }) +} + +// NotifyIssueChangeContent is a placeholder function +func (q *QueueNotifier) NotifyIssueChangeContent(doer *models.User, issue *models.Issue, oldContent string) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&issue) + if err != nil { + log.Error("Unable to marshall issue: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&oldContent) + if err != nil { + log.Error("Unable to marshall oldContent: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyIssueChangeContent", + Args: args, + }) +} + +// NotifyIssueClearLabels is a placeholder function +func (q *QueueNotifier) NotifyIssueClearLabels(doer *models.User, issue *models.Issue) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&issue) + if err != nil { + log.Error("Unable to marshall issue: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyIssueClearLabels", + Args: args, + }) +} + +// NotifyIssueChangeTitle is a placeholder function +func (q *QueueNotifier) NotifyIssueChangeTitle(doer *models.User, issue *models.Issue, oldTitle string) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&issue) + if err != nil { + log.Error("Unable to marshall issue: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&oldTitle) + if err != nil { + log.Error("Unable to marshall oldTitle: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyIssueChangeTitle", + Args: args, + }) +} + +// NotifyIssueChangeLabels is a placeholder function +func (q *QueueNotifier) NotifyIssueChangeLabels(doer *models.User, issue *models.Issue, addedLabels []*models.Label, removedLabels []*models.Label) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&issue) + if err != nil { + log.Error("Unable to marshall issue: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&addedLabels) + if err != nil { + log.Error("Unable to marshall addedLabels: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&removedLabels) + if err != nil { + log.Error("Unable to marshall removedLabels: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyIssueChangeLabels", + Args: args, + }) +} + +// NotifyNewPullRequest is a placeholder function +func (q *QueueNotifier) NotifyNewPullRequest(unknown0 *models.PullRequest) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&unknown0) + if err != nil { + log.Error("Unable to marshall unknown0: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyNewPullRequest", + Args: args, + }) +} + +// NotifyMergePullRequest is a placeholder function +func (q *QueueNotifier) NotifyMergePullRequest(unknown0 *models.PullRequest, unknown1 *models.User, unknown2 *git.Repository) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&unknown0) + if err != nil { + log.Error("Unable to marshall unknown0: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown1) + if err != nil { + log.Error("Unable to marshall unknown1: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown2) + if err != nil { + log.Error("Unable to marshall unknown2: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyMergePullRequest", + Args: args, + }) +} + +// NotifyPullRequestSynchronized is a placeholder function +func (q *QueueNotifier) NotifyPullRequestSynchronized(doer *models.User, pr *models.PullRequest) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&pr) + if err != nil { + log.Error("Unable to marshall pr: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyPullRequestSynchronized", + Args: args, + }) +} + +// NotifyPullRequestReview is a placeholder function +func (q *QueueNotifier) NotifyPullRequestReview(unknown0 *models.PullRequest, unknown1 *models.Review, unknown2 *models.Comment) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&unknown0) + if err != nil { + log.Error("Unable to marshall unknown0: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown1) + if err != nil { + log.Error("Unable to marshall unknown1: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown2) + if err != nil { + log.Error("Unable to marshall unknown2: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyPullRequestReview", + Args: args, + }) +} + +// NotifyPullRequestChangeTargetBranch is a placeholder function +func (q *QueueNotifier) NotifyPullRequestChangeTargetBranch(doer *models.User, pr *models.PullRequest, oldBranch string) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&pr) + if err != nil { + log.Error("Unable to marshall pr: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&oldBranch) + if err != nil { + log.Error("Unable to marshall oldBranch: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyPullRequestChangeTargetBranch", + Args: args, + }) +} + +// NotifyCreateIssueComment is a placeholder function +func (q *QueueNotifier) NotifyCreateIssueComment(unknown0 *models.User, unknown1 *models.Repository, unknown2 *models.Issue, unknown3 *models.Comment) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&unknown0) + if err != nil { + log.Error("Unable to marshall unknown0: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown1) + if err != nil { + log.Error("Unable to marshall unknown1: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown2) + if err != nil { + log.Error("Unable to marshall unknown2: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown3) + if err != nil { + log.Error("Unable to marshall unknown3: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyCreateIssueComment", + Args: args, + }) +} + +// NotifyUpdateComment is a placeholder function +func (q *QueueNotifier) NotifyUpdateComment(unknown0 *models.User, unknown1 *models.Comment, unknown2 string) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&unknown0) + if err != nil { + log.Error("Unable to marshall unknown0: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown1) + if err != nil { + log.Error("Unable to marshall unknown1: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown2) + if err != nil { + log.Error("Unable to marshall unknown2: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyUpdateComment", + Args: args, + }) +} + +// NotifyDeleteComment is a placeholder function +func (q *QueueNotifier) NotifyDeleteComment(unknown0 *models.User, unknown1 *models.Comment) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&unknown0) + if err != nil { + log.Error("Unable to marshall unknown0: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&unknown1) + if err != nil { + log.Error("Unable to marshall unknown1: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyDeleteComment", + Args: args, + }) +} + +// NotifyNewRelease is a placeholder function +func (q *QueueNotifier) NotifyNewRelease(rel *models.Release) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&rel) + if err != nil { + log.Error("Unable to marshall rel: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyNewRelease", + Args: args, + }) +} + +// NotifyUpdateRelease is a placeholder function +func (q *QueueNotifier) NotifyUpdateRelease(doer *models.User, rel *models.Release) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&rel) + if err != nil { + log.Error("Unable to marshall rel: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyUpdateRelease", + Args: args, + }) +} + +// NotifyDeleteRelease is a placeholder function +func (q *QueueNotifier) NotifyDeleteRelease(doer *models.User, rel *models.Release) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&rel) + if err != nil { + log.Error("Unable to marshall rel: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyDeleteRelease", + Args: args, + }) +} + +// NotifyPushCommits is a placeholder function +func (q *QueueNotifier) NotifyPushCommits(pusher *models.User, repo *models.Repository, refName string, oldCommitID string, newCommitID string, commits *models.PushCommits) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&pusher) + if err != nil { + log.Error("Unable to marshall pusher: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&refName) + if err != nil { + log.Error("Unable to marshall refName: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&oldCommitID) + if err != nil { + log.Error("Unable to marshall oldCommitID: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&newCommitID) + if err != nil { + log.Error("Unable to marshall newCommitID: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&commits) + if err != nil { + log.Error("Unable to marshall commits: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyPushCommits", + Args: args, + }) +} + +// NotifyCreateRef is a placeholder function +func (q *QueueNotifier) NotifyCreateRef(doer *models.User, repo *models.Repository, refType string, refFullName string) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&refType) + if err != nil { + log.Error("Unable to marshall refType: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&refFullName) + if err != nil { + log.Error("Unable to marshall refFullName: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyCreateRef", + Args: args, + }) +} + +// NotifyDeleteRef is a placeholder function +func (q *QueueNotifier) NotifyDeleteRef(doer *models.User, repo *models.Repository, refType string, refFullName string) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&refType) + if err != nil { + log.Error("Unable to marshall refType: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&refFullName) + if err != nil { + log.Error("Unable to marshall refFullName: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifyDeleteRef", + Args: args, + }) +} + +// NotifySyncPushCommits is a placeholder function +func (q *QueueNotifier) NotifySyncPushCommits(pusher *models.User, repo *models.Repository, refName string, oldCommitID string, newCommitID string, commits *models.PushCommits) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&pusher) + if err != nil { + log.Error("Unable to marshall pusher: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&refName) + if err != nil { + log.Error("Unable to marshall refName: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&oldCommitID) + if err != nil { + log.Error("Unable to marshall oldCommitID: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&newCommitID) + if err != nil { + log.Error("Unable to marshall newCommitID: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&commits) + if err != nil { + log.Error("Unable to marshall commits: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifySyncPushCommits", + Args: args, + }) +} + +// NotifySyncCreateRef is a placeholder function +func (q *QueueNotifier) NotifySyncCreateRef(doer *models.User, repo *models.Repository, refType string, refFullName string) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&refType) + if err != nil { + log.Error("Unable to marshall refType: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&refFullName) + if err != nil { + log.Error("Unable to marshall refFullName: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifySyncCreateRef", + Args: args, + }) +} + +// NotifySyncDeleteRef is a placeholder function +func (q *QueueNotifier) NotifySyncDeleteRef(doer *models.User, repo *models.Repository, refType string, refFullName string) { + args := make([][]byte, 0) + var err error + var bs []byte + bs, err = json.Marshal(&doer) + if err != nil { + log.Error("Unable to marshall doer: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&repo) + if err != nil { + log.Error("Unable to marshall repo: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&refType) + if err != nil { + log.Error("Unable to marshall refType: %v", err) + return + } + args = append(args, bs) + bs, err = json.Marshal(&refFullName) + if err != nil { + log.Error("Unable to marshall refFullName: %v", err) + return + } + args = append(args, bs) + + q.internal.Push(&FunctionCall{ + Name: "NotifySyncDeleteRef", + Args: args, + }) +} diff --git a/modules/notification/indexer/indexer.go b/modules/notification/indexer/indexer.go index 4ca5e64c3e431..ffa3700b537a5 100644 --- a/modules/notification/indexer/indexer.go +++ b/modules/notification/indexer/indexer.go @@ -47,6 +47,10 @@ func (r *indexerNotifier) NotifyNewIssue(issue *models.Issue) { } func (r *indexerNotifier) NotifyNewPullRequest(pr *models.PullRequest) { + if err := pr.LoadIssue(); err != nil { + log.Error("Unable to load issue: %d for pr: %d, error: %v", pr.IssueID, pr.ID, err) + return + } issue_indexer.UpdateIssueIndexer(pr.Issue) } diff --git a/modules/notification/mail/mail.go b/modules/notification/mail/mail.go index 5148434dca23e..e290c633c785e 100644 --- a/modules/notification/mail/mail.go +++ b/modules/notification/mail/mail.go @@ -46,6 +46,10 @@ func (m *mailNotifier) NotifyCreateIssueComment(doer *models.User, repo *models. } func (m *mailNotifier) NotifyNewIssue(issue *models.Issue) { + if err := issue.LoadPoster(); err != nil { + log.Error("Unable to load poster: %d for issue: %d: Error: %v", issue.PosterID, issue.ID, err) + return + } if err := mailer.MailParticipants(issue, issue.Poster, models.ActionCreateIssue); err != nil { log.Error("MailParticipants: %v", err) } @@ -73,6 +77,14 @@ func (m *mailNotifier) NotifyIssueChangeStatus(doer *models.User, issue *models. } func (m *mailNotifier) NotifyNewPullRequest(pr *models.PullRequest) { + if err := pr.LoadIssue(); err != nil { + log.Error("Unable to load issue: %d for pr: %d: Error: %v", pr.IssueID, pr.ID, err) + return + } + if err := pr.Issue.LoadPoster(); err != nil { + log.Error("Unable to load poster: %d for pr: %d, issue: %d: Error: %v", pr.Issue.PosterID, pr.ID, pr.IssueID, err) + return + } if err := mailer.MailParticipants(pr.Issue, pr.Issue.Poster, models.ActionCreatePullRequest); err != nil { log.Error("MailParticipants: %v", err) } diff --git a/modules/notification/notification.go b/modules/notification/notification.go index a5b09d97f87eb..2b3891645ee7e 100644 --- a/modules/notification/notification.go +++ b/modules/notification/notification.go @@ -5,6 +5,8 @@ package notification import ( + "sync" + "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/git" "code.gitea.io/gitea/modules/notification/action" @@ -18,10 +20,10 @@ import ( var ( notifiers []base.Notifier + once = sync.Once{} ) // RegisterNotifier providers method to receive notify messages -// FIXME: graceful: This may need to become a queue func RegisterNotifier(notifier base.Notifier) { go notifier.Run() notifiers = append(notifiers, notifier) @@ -29,13 +31,18 @@ func RegisterNotifier(notifier base.Notifier) { // NewContext registers notification handlers func NewContext() { - RegisterNotifier(ui.NewNotifier()) - if setting.Service.EnableNotifyMail { - RegisterNotifier(mail.NewNotifier()) - } - RegisterNotifier(indexer.NewNotifier()) - RegisterNotifier(webhook.NewNotifier()) - RegisterNotifier(action.NewNotifier()) + once.Do(func() { + var ns []base.Notifier + ns = append(ns, ui.NewNotifier()) + if setting.Service.EnableNotifyMail { + ns = append(ns, mail.NewNotifier()) + } + ns = append(ns, indexer.NewNotifier()) + ns = append(ns, webhook.NewNotifier()) + ns = append(ns, action.NewNotifier()) + + RegisterNotifier(base.NewQueueNotifier("notification", ns)) + }) } // NotifyCreateIssueComment notifies issue comment related message to notifiers diff --git a/modules/notification/ui/ui.go b/modules/notification/ui/ui.go index 1f28d37b98a65..38fc5fd14b596 100644 --- a/modules/notification/ui/ui.go +++ b/modules/notification/ui/ui.go @@ -79,7 +79,7 @@ func (ns *notificationService) NotifyIssueChangeStatus(doer *models.User, issue func (ns *notificationService) NotifyMergePullRequest(pr *models.PullRequest, doer *models.User, gitRepo *git.Repository) { _ = ns.issueQueue.Push(issueNotificationOpts{ - issueID: pr.Issue.ID, + issueID: pr.IssueID, notificationAuthorID: doer.ID, }) } @@ -90,14 +90,14 @@ func (ns *notificationService) NotifyNewPullRequest(pr *models.PullRequest) { return } _ = ns.issueQueue.Push(issueNotificationOpts{ - issueID: pr.Issue.ID, + issueID: pr.IssueID, notificationAuthorID: pr.Issue.PosterID, }) } func (ns *notificationService) NotifyPullRequestReview(pr *models.PullRequest, r *models.Review, c *models.Comment) { var opts = issueNotificationOpts{ - issueID: pr.Issue.ID, + issueID: pr.IssueID, notificationAuthorID: r.Reviewer.ID, } if c != nil { diff --git a/modules/setting/queue.go b/modules/setting/queue.go index 9053cfb12eefc..cd203322e2f24 100644 --- a/modules/setting/queue.go +++ b/modules/setting/queue.go @@ -63,7 +63,6 @@ func CreateQueue(name string, handle queue.HandlerFunc, exemplar interface{}) qu log.Error("Unable to create queue for %s", name, err) return nil } - returnable, err := queue.CreateQueue(queue.Type(q.Type), handle, cfg, exemplar) if q.WrapIfNecessary && err != nil { log.Warn("Unable to create queue for %s: %v", name, err) diff --git a/routers/api/v1/repo/issue_comment.go b/routers/api/v1/repo/issue_comment.go index c13fc93cdfb73..49bce7e29262f 100644 --- a/routers/api/v1/repo/issue_comment.go +++ b/routers/api/v1/repo/issue_comment.go @@ -312,6 +312,8 @@ func editIssueComment(ctx *context.APIContext, form api.EditIssueCommentOption) ctx.Error(http.StatusInternalServerError, "UpdateComment", err) return } + _ = comment.LoadAssigneeUser() + _ = comment.LoadPoster() ctx.JSON(http.StatusOK, comment.APIFormat()) } From ff3bba261e0c370d8dcf8f8af4a470e3ba43769b Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Sun, 22 Dec 2019 23:46:55 +0000 Subject: [PATCH 18/21] broken merge --- modules/notification/ui/ui.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/notification/ui/ui.go b/modules/notification/ui/ui.go index 38fc5fd14b596..e6b0d6a85e7ed 100644 --- a/modules/notification/ui/ui.go +++ b/modules/notification/ui/ui.go @@ -71,7 +71,7 @@ func (ns *notificationService) NotifyNewIssue(issue *models.Issue) { } func (ns *notificationService) NotifyIssueChangeStatus(doer *models.User, issue *models.Issue, actionComment *models.Comment, isClosed bool) { - ns.issueQueue <- issueNotificationOpts{ + _ = ns.issueQueue.Push(issueNotificationOpts{ issueID: issue.ID, notificationAuthorID: doer.ID, }) From 7cb70d93c89da920467232457647009a831c6d05 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Mon, 23 Dec 2019 23:06:10 +0000 Subject: [PATCH 19/21] Ensure that Message is json encodable --- services/mailer/mail.go | 2 +- services/mailer/mail_test.go | 29 ++++++++-------- services/mailer/mailer.go | 64 +++++++++++++++++++++++++----------- 3 files changed, 61 insertions(+), 34 deletions(-) diff --git a/services/mailer/mail.go b/services/mailer/mail.go index a8768de6cdbde..bdbee403d54f1 100644 --- a/services/mailer/mail.go +++ b/services/mailer/mail.go @@ -52,7 +52,7 @@ func InitMailRender(subjectTpl *texttmpl.Template, bodyTpl *template.Template) { // SendTestMail sends a test mail func SendTestMail(email string) error { - return gomail.Send(Sender, NewMessage([]string{email}, "Gitea Test Email!", "Gitea Test Email!").Message) + return gomail.Send(Sender, NewMessage([]string{email}, "Gitea Test Email!", "Gitea Test Email!").ToMessage()) } // SendUserMail sends a mail to the user diff --git a/services/mailer/mail_test.go b/services/mailer/mail_test.go index 43e99c635e78a..d7d02d9dee822 100644 --- a/services/mailer/mail_test.go +++ b/services/mailer/mail_test.go @@ -61,11 +61,11 @@ func TestComposeIssueCommentMessage(t *testing.T) { msgs := composeIssueCommentMessages(&mailCommentContext{Issue: issue, Doer: doer, ActionType: models.ActionCommentIssue, Content: "test body", Comment: comment}, tos, false, "issue comment") assert.Len(t, msgs, 2) - - mailto := msgs[0].GetHeader("To") - subject := msgs[0].GetHeader("Subject") - inreplyTo := msgs[0].GetHeader("In-Reply-To") - references := msgs[0].GetHeader("References") + gomailMsg := msgs[0].ToMessage() + mailto := gomailMsg.GetHeader("To") + subject := gomailMsg.GetHeader("Subject") + inreplyTo := gomailMsg.GetHeader("In-Reply-To") + references := gomailMsg.GetHeader("References") assert.Len(t, mailto, 1, "exactly one recipient is expected in the To field") assert.Equal(t, "Re: ", subject[0][:4], "Comment reply subject should contain Re:") @@ -96,14 +96,15 @@ func TestComposeIssueMessage(t *testing.T) { Content: "test body"}, tos, false, "issue create") assert.Len(t, msgs, 2) - mailto := msgs[0].GetHeader("To") - subject := msgs[0].GetHeader("Subject") - messageID := msgs[0].GetHeader("Message-ID") + gomailMsg := msgs[0].ToMessage() + mailto := gomailMsg.GetHeader("To") + subject := gomailMsg.GetHeader("Subject") + messageID := gomailMsg.GetHeader("Message-ID") assert.Len(t, mailto, 1, "exactly one recipient is expected in the To field") assert.Equal(t, "[user2/repo1] @user2 #1 - issue1", subject[0]) - assert.Nil(t, msgs[0].GetHeader("In-Reply-To")) - assert.Nil(t, msgs[0].GetHeader("References")) + assert.Nil(t, gomailMsg.GetHeader("In-Reply-To")) + assert.Nil(t, gomailMsg.GetHeader("References")) assert.Equal(t, messageID[0], "", "Message-ID header doesn't match") } @@ -134,9 +135,9 @@ func TestTemplateSelection(t *testing.T) { InitMailRender(stpl, btpl) expect := func(t *testing.T, msg *Message, expSubject, expBody string) { - subject := msg.GetHeader("Subject") + subject := msg.ToMessage().GetHeader("Subject") msgbuf := new(bytes.Buffer) - _, _ = msg.WriteTo(msgbuf) + _, _ = msg.ToMessage().WriteTo(msgbuf) wholemsg := msgbuf.String() assert.Equal(t, []string{expSubject}, subject) assert.Contains(t, wholemsg, expBody) @@ -188,9 +189,9 @@ func TestTemplateServices(t *testing.T) { msg := testComposeIssueCommentMessage(t, &mailCommentContext{Issue: issue, Doer: doer, ActionType: actionType, Content: "test body", Comment: comment}, tos, fromMention, "TestTemplateServices") - subject := msg.GetHeader("Subject") + subject := msg.ToMessage().GetHeader("Subject") msgbuf := new(bytes.Buffer) - _, _ = msg.WriteTo(msgbuf) + _, _ = msg.ToMessage().WriteTo(msgbuf) wholemsg := msgbuf.String() assert.Equal(t, []string{expSubject}, subject) diff --git a/services/mailer/mailer.go b/services/mailer/mailer.go index 25ad87ab25201..6a48ed8d95d5a 100644 --- a/services/mailer/mailer.go +++ b/services/mailer/mailer.go @@ -29,38 +29,63 @@ import ( // Message mail body and log info type Message struct { - Info string // Message information for log purpose. - *gomail.Message + Info string // Message information for log purpose. + FromAddress string + FromDisplayName string + To []string + Subject string + Date time.Time + Body string + Headers map[string][]string } -// NewMessageFrom creates new mail message object with custom From header. -func NewMessageFrom(to []string, fromDisplayName, fromAddress, subject, body string) *Message { - log.Trace("NewMessageFrom (body):\n%s", body) - +// ToMessage converts a Message to gomail.Message +func (m *Message) ToMessage() *gomail.Message { msg := gomail.NewMessage() - msg.SetAddressHeader("From", fromAddress, fromDisplayName) - msg.SetHeader("To", to...) + msg.SetAddressHeader("From", m.FromAddress, m.FromDisplayName) + msg.SetHeader("To", m.To...) + for header := range m.Headers { + msg.SetHeader(header, m.Headers[header]...) + } + if len(setting.MailService.SubjectPrefix) > 0 { - msg.SetHeader("Subject", setting.MailService.SubjectPrefix+" "+subject) + msg.SetHeader("Subject", setting.MailService.SubjectPrefix+" "+m.Subject) } else { - msg.SetHeader("Subject", subject) + msg.SetHeader("Subject", m.Subject) } - msg.SetDateHeader("Date", time.Now()) + msg.SetDateHeader("Date", m.Date) msg.SetHeader("X-Auto-Response-Suppress", "All") - plainBody, err := html2text.FromString(body) + plainBody, err := html2text.FromString(m.Body) if err != nil || setting.MailService.SendAsPlainText { - if strings.Contains(base.TruncateString(body, 100), "") { + if strings.Contains(base.TruncateString(m.Body, 100), "") { log.Warn("Mail contains HTML but configured to send as plain text.") } msg.SetBody("text/plain", plainBody) } else { msg.SetBody("text/plain", plainBody) - msg.AddAlternative("text/html", body) + msg.AddAlternative("text/html", m.Body) } + return msg +} + +// SetHeader adds additional headers to a message +func (m *Message) SetHeader(field string, value ...string) { + m.Headers[field] = value +} + +// NewMessageFrom creates new mail message object with custom From header. +func NewMessageFrom(to []string, fromDisplayName, fromAddress, subject, body string) *Message { + log.Trace("NewMessageFrom (body):\n%s", body) return &Message{ - Message: msg, + FromAddress: fromAddress, + FromDisplayName: fromDisplayName, + To: to, + Subject: subject, + Date: time.Now(), + Body: body, + Headers: map[string][]string{}, } } @@ -285,11 +310,12 @@ func NewContext() { mailQueue = setting.CreateQueue("mail", func(data ...queue.Data) { for _, datum := range data { msg := datum.(*Message) - log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info) - if err := gomail.Send(Sender, msg.Message); err != nil { - log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err) + gomailMsg := msg.ToMessage() + log.Trace("New e-mail sending request %s: %s", gomailMsg.GetHeader("To"), msg.Info) + if err := gomail.Send(Sender, gomailMsg); err != nil { + log.Error("Failed to send emails %s: %s - %v", gomailMsg.GetHeader("To"), msg.Info, err) } else { - log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info) + log.Trace("E-mails sent %s: %s", gomailMsg.GetHeader("To"), msg.Info) } } }, &Message{}) From 79ad552c738f4eb49e96dcb9d7c3a1a07d0a2b71 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Thu, 26 Dec 2019 19:00:51 +0000 Subject: [PATCH 20/21] Ensure only one NotifyNewPullRequest --- integrations/pull_merge_test.go | 36 +++++++++++++++++++++++++++++---- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/integrations/pull_merge_test.go b/integrations/pull_merge_test.go index 9a3e126b85994..49fb7c2f40929 100644 --- a/integrations/pull_merge_test.go +++ b/integrations/pull_merge_test.go @@ -61,8 +61,15 @@ func testPullCleanUp(t *testing.T, session *TestSession, user, repo, pullnum str func TestPullMerge(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { + createPullNotified := make(chan interface{}, 10) + deferable := notifierListener.RegisterChannel("NotifyNewPullRequest", createPullNotified, 0, &models.PullRequest{}) + defer func() { + deferable() + close(createPullNotified) + }() + mergePullNotified := make(chan interface{}, 10) - deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", mergePullNotified, 0, &models.PullRequest{}) + deferable = notifierListener.RegisterChannel("NotifyMergePullRequest", mergePullNotified, 0, &models.PullRequest{}) defer func() { deferable() close(mergePullNotified) @@ -72,21 +79,37 @@ func TestPullMerge(t *testing.T) { testRepoFork(t, session, "user2", "repo1", "user1", "repo1") testEditFile(t, session, "user1", "repo1", "master", "README.md", "Hello, World (Edited)\n") + var prInterface interface{} + resp := testPullCreate(t, session, "user1", "repo1", "master", "This is a pull title") + select { + case prInterface = <-createPullNotified: + case <-time.After(500 * time.Millisecond): + assert.Fail(t, "Took too long to notify!") + } + pr := prInterface.(*models.PullRequest) + pr.LoadBaseRepo() + pr.LoadHeadRepo() + pr.BaseRepo.MustOwner() + pr.HeadRepo.MustOwner() + + assert.EqualValues(t, "user1", pr.HeadRepo.Owner.Name) + assert.EqualValues(t, "repo1", pr.HeadRepo.Name) + assert.EqualValues(t, "user2", pr.BaseRepo.Owner.Name) + assert.EqualValues(t, "repo1", pr.BaseRepo.Name) elem := strings.Split(test.RedirectURL(resp), "/") assert.EqualValues(t, "pulls", elem[3]) testPullMerge(t, session, elem[1], elem[2], elem[4], models.MergeStyleMerge) - var prInterface interface{} select { case prInterface = <-mergePullNotified: case <-time.After(500 * time.Millisecond): assert.Fail(t, "Took too long to notify!") } - pr := prInterface.(*models.PullRequest) + pr = prInterface.(*models.PullRequest) pr.LoadBaseRepo() pr.LoadHeadRepo() pr.BaseRepo.MustOwner() @@ -99,10 +122,15 @@ func TestPullMerge(t *testing.T) { time.Sleep(100 * time.Millisecond) select { - case prInterface = <-mergePullNotified: + case prInterface = <-createPullNotified: assert.Fail(t, "Should only have one pull create notification: %v", prInterface) default: } + select { + case prInterface = <-mergePullNotified: + assert.Fail(t, "Should only have one pull merge notification: %v", prInterface) + default: + } }) } From a4386c27a80606593293d6fec48a8e93c03b76b2 Mon Sep 17 00:00:00 2001 From: Andrew Thornton Date: Fri, 27 Dec 2019 14:47:20 +0000 Subject: [PATCH 21/21] Slight change of notifierListener.RegisterChannel function --- integrations/notification_helper_test.go | 13 +++---- integrations/pull_merge_test.go | 43 +++++++----------------- 2 files changed, 19 insertions(+), 37 deletions(-) diff --git a/integrations/notification_helper_test.go b/integrations/notification_helper_test.go index 1f7e0819439c5..d7ef5f74678d2 100644 --- a/integrations/notification_helper_test.go +++ b/integrations/notification_helper_test.go @@ -60,9 +60,10 @@ func (n *NotifierListener) Deregister(functionName string, callback *func(string n.lock.Unlock() } -// RegisterChannel will register a provided channel with function name and return a function to deregister it -func (n *NotifierListener) RegisterChannel(name string, channel chan<- interface{}, argNumber int, exemplar interface{}) (deregister func()) { +// RegisterChannel will return a registered channel with function name and return a function to deregister it and close the channel at the end +func (n *NotifierListener) RegisterChannel(name string, argNumber int, exemplar interface{}) (<-chan interface{}, func()) { t := reflect.TypeOf(exemplar) + channel := make(chan interface{}, 10) callback := func(_ string, args [][]byte) { n := reflect.New(t).Elem() err := json.Unmarshal(args[argNumber], n.Addr().Interface()) @@ -73,8 +74,9 @@ func (n *NotifierListener) RegisterChannel(name string, channel chan<- interface } n.Register(name, &callback) - return func() { + return channel, func() { n.Deregister(name, &callback) + close(channel) } } @@ -95,8 +97,8 @@ func (n *NotifierListener) handle(data ...queue.Data) { func TestNotifierListener(t *testing.T) { defer prepareTestEnv(t)() - createPullNotified := make(chan interface{}, 10) - deregister := notifierListener.RegisterChannel("NotifyNewPullRequest", createPullNotified, 0, &models.PullRequest{}) + createPullNotified, deregister := notifierListener.RegisterChannel("NotifyNewPullRequest", 0, &models.PullRequest{}) + bs, _ := json.Marshal(&models.PullRequest{}) notifierListener.handle(&base.FunctionCall{ Name: "NotifyNewPullRequest", @@ -113,7 +115,6 @@ func TestNotifierListener(t *testing.T) { <-createPullNotified deregister() - close(createPullNotified) notification.NotifyNewPullRequest(&models.PullRequest{}) // would panic if not deregistered diff --git a/integrations/pull_merge_test.go b/integrations/pull_merge_test.go index 49fb7c2f40929..c38a5cd1af8a0 100644 --- a/integrations/pull_merge_test.go +++ b/integrations/pull_merge_test.go @@ -61,19 +61,11 @@ func testPullCleanUp(t *testing.T, session *TestSession, user, repo, pullnum str func TestPullMerge(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - createPullNotified := make(chan interface{}, 10) - deferable := notifierListener.RegisterChannel("NotifyNewPullRequest", createPullNotified, 0, &models.PullRequest{}) - defer func() { - deferable() - close(createPullNotified) - }() - - mergePullNotified := make(chan interface{}, 10) - deferable = notifierListener.RegisterChannel("NotifyMergePullRequest", mergePullNotified, 0, &models.PullRequest{}) - defer func() { - deferable() - close(mergePullNotified) - }() + createPullNotified, deferableCreate := notifierListener.RegisterChannel("NotifyNewPullRequest", 0, &models.PullRequest{}) + defer deferableCreate() + + mergePullNotified, deferableMerge := notifierListener.RegisterChannel("NotifyMergePullRequest", 0, &models.PullRequest{}) + defer deferableMerge() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") @@ -136,12 +128,8 @@ func TestPullMerge(t *testing.T) { func TestPullRebase(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - mergePullNotified := make(chan interface{}, 10) - deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", mergePullNotified, 0, &models.PullRequest{}) - defer func() { - deferable() - close(mergePullNotified) - }() + mergePullNotified, deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", 0, &models.PullRequest{}) + defer deferable() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") @@ -162,12 +150,9 @@ func TestPullRebase(t *testing.T) { func TestPullRebaseMerge(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - mergePullNotified := make(chan interface{}, 10) - deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", mergePullNotified, 0, &models.PullRequest{}) - defer func() { - deferable() - close(mergePullNotified) - }() + mergePullNotified, deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", 0, &models.PullRequest{}) + defer deferable() + session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1") testEditFile(t, session, "user1", "repo1", "master", "README.md", "Hello, World (Edited)\n") @@ -188,12 +173,8 @@ func TestPullRebaseMerge(t *testing.T) { func TestPullSquash(t *testing.T) { onGiteaRun(t, func(t *testing.T, giteaURL *url.URL) { - mergePullNotified := make(chan interface{}, 10) - deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", mergePullNotified, 0, &models.PullRequest{}) - defer func() { - deferable() - close(mergePullNotified) - }() + mergePullNotified, deferable := notifierListener.RegisterChannel("NotifyMergePullRequest", 0, &models.PullRequest{}) + defer deferable() session := loginUser(t, "user1") testRepoFork(t, session, "user2", "repo1", "user1", "repo1")