GODT-1779: Remove go-imap

This commit is contained in:
James Houlahan
2022-08-26 17:00:21 +02:00
parent 3b0bc1ca15
commit 39433fe707
593 changed files with 12725 additions and 91626 deletions

177
internal/pool/pool.go Normal file
View File

@ -0,0 +1,177 @@
package pool
import (
"context"
"errors"
"sync"
"github.com/ProtonMail/gluon/queue"
)
// ErrJobCancelled indicates the job was cancelled.
var ErrJobCancelled = errors.New("Job cancelled by surrounding context")
// Pool is a worker pool that handles input of type In and returns results of type Out.
type Pool[In comparable, Out any] struct {
queue *queue.QueuedChannel[*Job[In, Out]]
size int
}
// DoneFunc must be called to free up pool resources.
type DoneFunc func()
// New returns a new pool.
func New[In comparable, Out any](size int, work func(context.Context, In) (Out, error)) *Pool[In, Out] {
queue := queue.NewQueuedChannel[*Job[In, Out]](0, 0)
for i := 0; i < size; i++ {
go func() {
for job := range queue.GetChannel() {
select {
case <-job.ctx.Done():
job.postFailure(ErrJobCancelled)
default:
res, err := work(job.ctx, job.req)
if err != nil {
job.postFailure(err)
} else {
job.postSuccess(res)
}
job.waitDone()
}
}
}()
}
return &Pool[In, Out]{
queue: queue,
size: size,
}
}
// NewJob submits a job to the pool. It returns a job handle and a DoneFunc.
// The job handle allows the job result to be obtained. The DoneFunc is used to mark the job as done,
// which frees up the worker in the pool for reuse.
func (pool *Pool[In, Out]) NewJob(ctx context.Context, req In) (*Job[In, Out], DoneFunc) {
job := newJob[In, Out](ctx, req)
pool.queue.Enqueue(job)
return job, func() { close(job.done) }
}
// Process submits jobs to the pool. The callback provides access to the result, or an error if one occurred.
func (pool *Pool[In, Out]) Process(ctx context.Context, reqs []In, fn func(In, Out, error) error) error {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var (
wg sync.WaitGroup
errList []error
lock sync.Mutex
)
for _, req := range reqs {
req := req
wg.Add(1)
go func() {
defer wg.Done()
job, done := pool.NewJob(ctx, req)
defer done()
res, err := job.Result()
if err := fn(req, res, err); err != nil {
lock.Lock()
defer lock.Unlock()
// Cancel ongoing jobs.
cancel()
// Collect the error.
errList = append(errList, err)
}
}()
}
wg.Wait()
// TODO: Join the errors somehow?
if len(errList) > 0 {
return errList[0]
}
return nil
}
// ProcessAll submits jobs to the pool. All results are returned once available.
func (pool *Pool[In, Out]) ProcessAll(ctx context.Context, reqs []In) (map[In]Out, error) {
var (
data = make(map[In]Out)
lock = sync.Mutex{}
)
if err := pool.Process(ctx, reqs, func(req In, res Out, err error) error {
if err != nil {
return err
}
lock.Lock()
defer lock.Unlock()
data[req] = res
return nil
}); err != nil {
return nil, err
}
return data, nil
}
func (pool *Pool[In, Out]) Done() {
pool.queue.Close()
}
type Job[In, Out any] struct {
ctx context.Context
req In
res chan Out
err chan error
done chan struct{}
}
func newJob[In, Out any](ctx context.Context, req In) *Job[In, Out] {
return &Job[In, Out]{
ctx: ctx,
req: req,
res: make(chan Out),
err: make(chan error),
done: make(chan struct{}),
}
}
func (job *Job[In, Out]) Result() (Out, error) {
return <-job.res, <-job.err
}
func (job *Job[In, Out]) postSuccess(res Out) {
close(job.err)
job.res <- res
}
func (job *Job[In, Out]) postFailure(err error) {
close(job.res)
job.err <- err
}
func (job *Job[In, Out]) waitDone() {
<-job.done
}

163
internal/pool/pool_test.go Normal file
View File

@ -0,0 +1,163 @@
package pool
import (
"context"
"errors"
"runtime"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestPool_NewJob(t *testing.T) {
doubler := newDoubler(runtime.NumCPU())
job1, done1 := doubler.NewJob(context.Background(), 1)
defer done1()
job2, done2 := doubler.NewJob(context.Background(), 2)
defer done2()
res2, err := job2.Result()
require.NoError(t, err)
res1, err := job1.Result()
require.NoError(t, err)
assert.Equal(t, 2, res1)
assert.Equal(t, 4, res2)
}
func TestPool_NewJob_Done(t *testing.T) {
// Create a doubler pool with 2 workers.
doubler := newDoubler(2)
// Start two jobs. Don't mark the jobs as done yet.
job1, done1 := doubler.NewJob(context.Background(), 1)
job2, done2 := doubler.NewJob(context.Background(), 2)
// Get the first result.
res1, _ := job1.Result()
assert.Equal(t, 2, res1)
// Get the first result.
res2, _ := job2.Result()
assert.Equal(t, 4, res2)
// Additional jobs will wait.
job3, _ := doubler.NewJob(context.Background(), 3)
job4, _ := doubler.NewJob(context.Background(), 4)
// Channel to collect results from jobs 3 and 4.
resCh := make(chan int, 2)
go func() {
res, _ := job3.Result()
resCh <- res
}()
go func() {
res, _ := job4.Result()
resCh <- res
}()
// Mark jobs 1 and 2 as done, freeing up the workers.
done1()
done2()
assert.ElementsMatch(t, []int{6, 8}, []int{<-resCh, <-resCh})
}
func TestPool_Process(t *testing.T) {
doubler := newDoubler(runtime.NumCPU())
var (
res = make(map[int]int)
lock sync.Mutex
)
require.NoError(t, doubler.Process(context.Background(), []int{1, 2, 3, 4, 5}, func(reqVal, resVal int, err error) error {
require.NoError(t, err)
lock.Lock()
defer lock.Unlock()
res[reqVal] = resVal
return nil
}))
assert.Equal(t, map[int]int{
1: 2,
2: 4,
3: 6,
4: 8,
5: 10,
}, res)
}
func TestPool_Process_Error(t *testing.T) {
doubler := newDoublerWithError(runtime.NumCPU())
assert.Error(t, doubler.Process(context.Background(), []int{1, 2, 3, 4, 5}, func(_ int, _ int, err error) error {
return err
}))
}
func TestPool_Process_Parallel(t *testing.T) {
doubler := newDoubler(runtime.NumCPU(), 100*time.Millisecond)
var wg sync.WaitGroup
for i := 0; i < 8; i++ {
wg.Add(1)
go func() {
defer wg.Done()
require.NoError(t, doubler.Process(context.Background(), []int{1, 2, 3, 4}, func(_ int, _ int, err error) error {
return nil
}))
}()
}
wg.Wait()
}
func TestPool_ProcessAll(t *testing.T) {
doubler := newDoubler(runtime.NumCPU())
res, err := doubler.ProcessAll(context.Background(), []int{1, 2, 3, 4, 5})
require.NoError(t, err)
assert.Equal(t, map[int]int{
1: 2,
2: 4,
3: 6,
4: 8,
5: 10,
}, res)
}
func newDoubler(workers int, delay ...time.Duration) *Pool[int, int] {
return New(workers, func(ctx context.Context, req int) (int, error) {
if len(delay) > 0 {
time.Sleep(delay[0])
}
return 2 * req, nil
})
}
func newDoublerWithError(workers int) *Pool[int, int] {
return New(workers, func(ctx context.Context, req int) (int, error) {
if req%2 == 0 {
return 0, errors.New("oops")
}
return 2 * req, nil
})
}