Files
proton-bridge/pkg/message/build.go
James Houlahan 6bd0739013 GODT-1158: Store full messages bodies on disk
- GODT-1158: simple on-disk cache in store
- GODT-1158: better member naming in event loop
- GODT-1158: create on-disk cache during bridge setup
- GODT-1158: better job options
- GODT-1158: rename GetLiteral to GetRFC822
- GODT-1158: rename events -> currentEvents
- GODT-1158: unlock cache per-user
- GODT-1158: clean up cache after logout
- GODT-1158: randomized encrypted cache passphrase
- GODT-1158: Opt out of on-disk cache in settings
- GODT-1158: free space in cache
- GODT-1158: make tests compile
- GODT-1158: optional compression
- GODT-1158: cache custom location
- GODT-1158: basic capacity checker
- GODT-1158: cache free space config
- GODT-1158: only unlock cache if pmapi client is unlocked as well
- GODT-1158: simple background sync worker
- GODT-1158: set size/bodystructure when caching message
- GODT-1158: limit store db update blocking with semaphore
- GODT-1158: dumb 10-semaphore
- GODT-1158: properly handle delete; remove bad bodystructure handling
- GODT-1158: hacky fix for caching after logout... baaaaad
- GODT-1158: cache worker
- GODT-1158: compute body structure lazily
- GODT-1158: cache size in store
- GODT-1158: notify cacher when adding to store
- GODT-1158: 15 second store cache watcher
- GODT-1158: enable cacher
- GODT-1158: better cache worker starting/stopping
- GODT-1158: limit cacher to less concurrency than disk cache
- GODT-1158: message builder prio + pchan pkg
- GODT-1158: fix pchan, use in message builder
- GODT-1158: no sem in cacher (rely on message builder prio)
- GODT-1158: raise priority of existing jobs when requested
- GODT-1158: pending messages in on-disk cache
- GODT-1158: WIP just a note about deleting messages from disk cache
- GODT-1158: pending wait when trying to write
- GODT-1158: pending.add to return bool
- GODT-1225: Headers in bodystructure are stored as bytes.
- GODT-1158: fixing header caching
- GODT-1158: don't cache in background
- GODT-1158: all concurrency set in settings
- GODT-1158: worker pools inside message builder
- GODT-1158: fix linter issues
- GODT-1158: remove completed builds from builder
- GODT-1158: remove builder pool
- GODT-1158: cacher defer job done properly
- GODT-1158: fix linter
- GODT-1299: Continue with bodystructure build if deserialization failed
- GODT-1324: Delete messages from the cache when they are deleted on the server
- GODT-1158: refactor cache tests
- GODT-1158: move builder to app/bridge
- GODT-1306: Migrate cache on disk when location is changed (and delete when disabled)
2021-11-30 10:12:36 +01:00

213 lines
4.8 KiB
Go

// Copyright (c) 2021 Proton Technologies AG
//
// This file is part of ProtonMail Bridge.
//
// ProtonMail Bridge is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// ProtonMail Bridge is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
package message
import (
"context"
"io"
"io/ioutil"
"sync"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/ProtonMail/proton-bridge/pkg/pmapi"
"github.com/ProtonMail/proton-bridge/pkg/pool"
"github.com/pkg/errors"
)
var (
ErrDecryptionFailed = errors.New("message could not be decrypted")
ErrNoSuchKeyRing = errors.New("the keyring to decrypt this message could not be found")
)
const (
BackgroundPriority = 1 << iota
ForegroundPriority
)
type Builder struct {
pool *pool.Pool
jobs map[string]*Job
lock sync.Mutex
}
type Fetcher interface {
GetMessage(context.Context, string) (*pmapi.Message, error)
GetAttachment(context.Context, string) (io.ReadCloser, error)
KeyRingForAddressID(string) (*crypto.KeyRing, error)
}
// NewBuilder creates a new builder which manages the given number of fetch/attach/build workers.
// - fetchWorkers: the number of workers which fetch messages from API
// - attachWorkers: the number of workers which fetch attachments from API.
//
// The returned builder is ready to handle jobs -- see (*Builder).NewJob for more information.
//
// Call (*Builder).Done to shut down the builder and stop all workers.
func NewBuilder(fetchWorkers, attachWorkers int) *Builder {
attacherPool := pool.New(attachWorkers, newAttacherWorkFunc())
fetcherPool := pool.New(fetchWorkers, newFetcherWorkFunc(attacherPool))
return &Builder{
pool: fetcherPool,
jobs: make(map[string]*Job),
}
}
func (builder *Builder) NewJob(ctx context.Context, fetcher Fetcher, messageID string, prio int) (*Job, pool.DoneFunc) {
return builder.NewJobWithOptions(ctx, fetcher, messageID, JobOptions{}, prio)
}
func (builder *Builder) NewJobWithOptions(ctx context.Context, fetcher Fetcher, messageID string, opts JobOptions, prio int) (*Job, pool.DoneFunc) {
builder.lock.Lock()
defer builder.lock.Unlock()
if job, ok := builder.jobs[messageID]; ok {
if job.GetPriority() < prio {
job.SetPriority(prio)
}
return job, job.done
}
job, done := builder.pool.NewJob(
&fetchReq{
fetcher: fetcher,
messageID: messageID,
options: opts,
},
prio,
)
buildJob := &Job{
Job: job,
done: done,
}
builder.jobs[messageID] = buildJob
return buildJob, func() {
builder.lock.Lock()
defer builder.lock.Unlock()
// Remove the job from the builder.
delete(builder.jobs, messageID)
// And mark it as done.
done()
}
}
func (builder *Builder) Done() {
// NOTE(GODT-1158): Stop worker pool.
}
type fetchReq struct {
fetcher Fetcher
messageID string
options JobOptions
}
type attachReq struct {
fetcher Fetcher
message *pmapi.Message
}
type Job struct {
*pool.Job
done pool.DoneFunc
}
func (job *Job) GetResult() ([]byte, error) {
res, err := job.Job.GetResult()
if err != nil {
return nil, err
}
return res.([]byte), nil
}
func newAttacherWorkFunc() pool.WorkFunc {
return func(payload interface{}, prio int) (interface{}, error) {
req, ok := payload.(*attachReq)
if !ok {
panic("bad payload type")
}
res := make(map[string][]byte)
for _, att := range req.message.Attachments {
rc, err := req.fetcher.GetAttachment(context.Background(), att.ID)
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
if err := rc.Close(); err != nil {
return nil, err
}
res[att.ID] = b
}
return res, nil
}
}
func newFetcherWorkFunc(attacherPool *pool.Pool) pool.WorkFunc {
return func(payload interface{}, prio int) (interface{}, error) {
req, ok := payload.(*fetchReq)
if !ok {
panic("bad payload type")
}
msg, err := req.fetcher.GetMessage(context.Background(), req.messageID)
if err != nil {
return nil, err
}
attJob, attDone := attacherPool.NewJob(&attachReq{
fetcher: req.fetcher,
message: msg,
}, prio)
defer attDone()
val, err := attJob.GetResult()
if err != nil {
return nil, err
}
attData, ok := val.(map[string][]byte)
if !ok {
panic("bad response type")
}
kr, err := req.fetcher.KeyRingForAddressID(msg.AddressID)
if err != nil {
return nil, ErrNoSuchKeyRing
}
return buildRFC822(kr, msg, attData, req.options)
}
}