forked from Silverfish/proton-bridge
GODT-1158: Store full messages bodies on disk
- GODT-1158: simple on-disk cache in store - GODT-1158: better member naming in event loop - GODT-1158: create on-disk cache during bridge setup - GODT-1158: better job options - GODT-1158: rename GetLiteral to GetRFC822 - GODT-1158: rename events -> currentEvents - GODT-1158: unlock cache per-user - GODT-1158: clean up cache after logout - GODT-1158: randomized encrypted cache passphrase - GODT-1158: Opt out of on-disk cache in settings - GODT-1158: free space in cache - GODT-1158: make tests compile - GODT-1158: optional compression - GODT-1158: cache custom location - GODT-1158: basic capacity checker - GODT-1158: cache free space config - GODT-1158: only unlock cache if pmapi client is unlocked as well - GODT-1158: simple background sync worker - GODT-1158: set size/bodystructure when caching message - GODT-1158: limit store db update blocking with semaphore - GODT-1158: dumb 10-semaphore - GODT-1158: properly handle delete; remove bad bodystructure handling - GODT-1158: hacky fix for caching after logout... baaaaad - GODT-1158: cache worker - GODT-1158: compute body structure lazily - GODT-1158: cache size in store - GODT-1158: notify cacher when adding to store - GODT-1158: 15 second store cache watcher - GODT-1158: enable cacher - GODT-1158: better cache worker starting/stopping - GODT-1158: limit cacher to less concurrency than disk cache - GODT-1158: message builder prio + pchan pkg - GODT-1158: fix pchan, use in message builder - GODT-1158: no sem in cacher (rely on message builder prio) - GODT-1158: raise priority of existing jobs when requested - GODT-1158: pending messages in on-disk cache - GODT-1158: WIP just a note about deleting messages from disk cache - GODT-1158: pending wait when trying to write - GODT-1158: pending.add to return bool - GODT-1225: Headers in bodystructure are stored as bytes. - GODT-1158: fixing header caching - GODT-1158: don't cache in background - GODT-1158: all concurrency set in settings - GODT-1158: worker pools inside message builder - GODT-1158: fix linter issues - GODT-1158: remove completed builds from builder - GODT-1158: remove builder pool - GODT-1158: cacher defer job done properly - GODT-1158: fix linter - GODT-1299: Continue with bodystructure build if deserialization failed - GODT-1324: Delete messages from the cache when they are deleted on the server - GODT-1158: refactor cache tests - GODT-1158: move builder to app/bridge - GODT-1306: Migrate cache on disk when location is changed (and delete when disabled)
This commit is contained in:
@ -37,21 +37,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/internal/bridge"
|
||||
"github.com/ProtonMail/proton-bridge/internal/config/settings"
|
||||
"github.com/ProtonMail/proton-bridge/internal/events"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/listener"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/message"
|
||||
"github.com/emersion/go-imap"
|
||||
goIMAPBackend "github.com/emersion/go-imap/backend"
|
||||
)
|
||||
|
||||
const (
|
||||
// NOTE: Each fetch worker has its own set of attach workers so there can be up to 20*5=100 API requests at once.
|
||||
// This is a reasonable limit to not overwhelm API while still maintaining as much parallelism as possible.
|
||||
fetchWorkers = 20 // In how many workers to fetch message (group list on IMAP).
|
||||
attachWorkers = 5 // In how many workers to fetch attachments (for one message).
|
||||
buildWorkers = 20 // In how many workers to build messages.
|
||||
)
|
||||
|
||||
type panicHandler interface {
|
||||
HandlePanic()
|
||||
}
|
||||
@ -61,26 +53,32 @@ type imapBackend struct {
|
||||
bridge bridger
|
||||
updates *imapUpdates
|
||||
eventListener listener.Listener
|
||||
listWorkers int
|
||||
|
||||
users map[string]*imapUser
|
||||
usersLocker sync.Locker
|
||||
|
||||
builder *message.Builder
|
||||
|
||||
imapCache map[string]map[string]string
|
||||
imapCachePath string
|
||||
imapCacheLock *sync.RWMutex
|
||||
}
|
||||
|
||||
type settingsProvider interface {
|
||||
GetInt(string) int
|
||||
}
|
||||
|
||||
// NewIMAPBackend returns struct implementing go-imap/backend interface.
|
||||
func NewIMAPBackend(
|
||||
panicHandler panicHandler,
|
||||
eventListener listener.Listener,
|
||||
cache cacheProvider,
|
||||
setting settingsProvider,
|
||||
bridge *bridge.Bridge,
|
||||
) *imapBackend { //nolint[golint]
|
||||
bridgeWrap := newBridgeWrap(bridge)
|
||||
backend := newIMAPBackend(panicHandler, cache, bridgeWrap, eventListener)
|
||||
|
||||
imapWorkers := setting.GetInt(settings.IMAPWorkers)
|
||||
backend := newIMAPBackend(panicHandler, cache, bridgeWrap, eventListener, imapWorkers)
|
||||
|
||||
go backend.monitorDisconnectedUsers()
|
||||
|
||||
@ -92,6 +90,7 @@ func newIMAPBackend(
|
||||
cache cacheProvider,
|
||||
bridge bridger,
|
||||
eventListener listener.Listener,
|
||||
listWorkers int,
|
||||
) *imapBackend {
|
||||
return &imapBackend{
|
||||
panicHandler: panicHandler,
|
||||
@ -102,10 +101,9 @@ func newIMAPBackend(
|
||||
users: map[string]*imapUser{},
|
||||
usersLocker: &sync.Mutex{},
|
||||
|
||||
builder: message.NewBuilder(fetchWorkers, attachWorkers, buildWorkers),
|
||||
|
||||
imapCachePath: cache.GetIMAPCachePath(),
|
||||
imapCacheLock: &sync.RWMutex{},
|
||||
listWorkers: listWorkers,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
151
internal/imap/cache/cache.go
vendored
151
internal/imap/cache/cache.go
vendored
@ -1,151 +0,0 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
pkgMsg "github.com/ProtonMail/proton-bridge/pkg/message"
|
||||
)
|
||||
|
||||
type key struct {
|
||||
ID string
|
||||
Timestamp int64
|
||||
Size int
|
||||
}
|
||||
|
||||
type oldestFirst []key
|
||||
|
||||
func (s oldestFirst) Len() int { return len(s) }
|
||||
func (s oldestFirst) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s oldestFirst) Less(i, j int) bool { return s[i].Timestamp < s[j].Timestamp }
|
||||
|
||||
type cachedMessage struct {
|
||||
key
|
||||
data []byte
|
||||
structure pkgMsg.BodyStructure
|
||||
}
|
||||
|
||||
//nolint[gochecknoglobals]
|
||||
var (
|
||||
cacheTimeLimit = int64(1 * 60 * 60 * 1000) // milliseconds
|
||||
cacheSizeLimit = 100 * 1000 * 1000 // B - MUST be larger than email max size limit (~ 25 MB)
|
||||
mailCache = make(map[string]cachedMessage)
|
||||
|
||||
// cacheMutex takes care of one single operation, whereas buildMutex takes
|
||||
// care of the whole action doing multiple operations. buildMutex will protect
|
||||
// you from asking server or decrypting or building the same message more
|
||||
// than once. When first request to build the message comes, it will block
|
||||
// all other build requests. When the first one is done, all others are
|
||||
// handled by cache, not doing anything twice. With cacheMutex we are safe
|
||||
// only to not mess up with the cache, but we could end up downloading and
|
||||
// building message twice.
|
||||
cacheMutex = &sync.Mutex{}
|
||||
buildMutex = &sync.Mutex{}
|
||||
buildLocks = map[string]interface{}{}
|
||||
)
|
||||
|
||||
func (m *cachedMessage) isValidOrDel() bool {
|
||||
if m.key.Timestamp+cacheTimeLimit < timestamp() {
|
||||
delete(mailCache, m.key.ID)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func timestamp() int64 {
|
||||
return time.Now().UnixNano() / int64(time.Millisecond)
|
||||
}
|
||||
|
||||
func Clear() {
|
||||
mailCache = make(map[string]cachedMessage)
|
||||
}
|
||||
|
||||
// BuildLock locks per message level, not on global level.
|
||||
// Multiple different messages can be building at once.
|
||||
func BuildLock(messageID string) {
|
||||
for {
|
||||
buildMutex.Lock()
|
||||
if _, ok := buildLocks[messageID]; ok { // if locked, wait
|
||||
buildMutex.Unlock()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
} else { // if unlocked, lock it
|
||||
buildLocks[messageID] = struct{}{}
|
||||
buildMutex.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BuildUnlock(messageID string) {
|
||||
buildMutex.Lock()
|
||||
defer buildMutex.Unlock()
|
||||
delete(buildLocks, messageID)
|
||||
}
|
||||
|
||||
func LoadMail(mID string) (reader *bytes.Reader, structure *pkgMsg.BodyStructure) {
|
||||
reader = &bytes.Reader{}
|
||||
cacheMutex.Lock()
|
||||
defer cacheMutex.Unlock()
|
||||
if message, ok := mailCache[mID]; ok && message.isValidOrDel() {
|
||||
reader = bytes.NewReader(message.data)
|
||||
structure = &message.structure
|
||||
|
||||
// Update timestamp to keep emails which are used often.
|
||||
message.Timestamp = timestamp()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func SaveMail(mID string, msg []byte, structure *pkgMsg.BodyStructure) {
|
||||
cacheMutex.Lock()
|
||||
defer cacheMutex.Unlock()
|
||||
|
||||
newMessage := cachedMessage{
|
||||
key: key{
|
||||
ID: mID,
|
||||
Timestamp: timestamp(),
|
||||
Size: len(msg),
|
||||
},
|
||||
data: msg,
|
||||
structure: *structure,
|
||||
}
|
||||
|
||||
// Remove old and reduce size.
|
||||
totalSize := 0
|
||||
messageList := []key{}
|
||||
for _, message := range mailCache {
|
||||
if message.isValidOrDel() {
|
||||
messageList = append(messageList, message.key)
|
||||
totalSize += message.key.Size
|
||||
}
|
||||
}
|
||||
sort.Sort(oldestFirst(messageList))
|
||||
var oldest key
|
||||
for totalSize+newMessage.key.Size >= cacheSizeLimit {
|
||||
oldest, messageList = messageList[0], messageList[1:]
|
||||
delete(mailCache, oldest.ID)
|
||||
totalSize -= oldest.Size
|
||||
}
|
||||
|
||||
// Write new.
|
||||
mailCache[mID] = newMessage
|
||||
}
|
||||
98
internal/imap/cache/cache_test.go
vendored
98
internal/imap/cache/cache_test.go
vendored
@ -1,98 +0,0 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
pkgMsg "github.com/ProtonMail/proton-bridge/pkg/message"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var bs = &pkgMsg.BodyStructure{} //nolint[gochecknoglobals]
|
||||
const testUID = "testmsg"
|
||||
|
||||
func TestSaveAndLoad(t *testing.T) {
|
||||
msg := []byte("Test message")
|
||||
|
||||
SaveMail(testUID, msg, bs)
|
||||
require.Equal(t, mailCache[testUID].data, msg)
|
||||
|
||||
reader, _ := LoadMail(testUID)
|
||||
require.Equal(t, reader.Len(), len(msg))
|
||||
stored := make([]byte, len(msg))
|
||||
_, _ = reader.Read(stored)
|
||||
require.Equal(t, stored, msg)
|
||||
}
|
||||
|
||||
func TestMissing(t *testing.T) {
|
||||
reader, _ := LoadMail("non-existing")
|
||||
require.Equal(t, reader.Len(), 0)
|
||||
}
|
||||
|
||||
func TestClearOld(t *testing.T) {
|
||||
cacheTimeLimit = 10
|
||||
msg := []byte("Test message")
|
||||
SaveMail(testUID, msg, bs)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
reader, _ := LoadMail(testUID)
|
||||
require.Equal(t, reader.Len(), 0)
|
||||
}
|
||||
|
||||
func TestClearBig(t *testing.T) {
|
||||
r := require.New(t)
|
||||
wantMessage := []byte("Test message")
|
||||
|
||||
wantCacheSize := 3
|
||||
nTestMessages := wantCacheSize * wantCacheSize
|
||||
cacheSizeLimit = wantCacheSize*len(wantMessage) + 1
|
||||
cacheTimeLimit = int64(1 << 20) // be sure the message will survive
|
||||
|
||||
// It should never have more than nSize items.
|
||||
for i := 0; i < nTestMessages; i++ {
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
SaveMail(fmt.Sprintf("%s%d", testUID, i), wantMessage, bs)
|
||||
r.LessOrEqual(len(mailCache), wantCacheSize, "cache too big when %d", i)
|
||||
}
|
||||
|
||||
// Check that the oldest are deleted first.
|
||||
for i := 0; i < nTestMessages; i++ {
|
||||
iUID := fmt.Sprintf("%s%d", testUID, i)
|
||||
reader, _ := LoadMail(iUID)
|
||||
mail := mailCache[iUID]
|
||||
|
||||
if i < (nTestMessages - wantCacheSize) {
|
||||
r.Zero(reader.Len(), "LoadMail should return empty, but have %s for %s time %d ", string(mail.data), iUID, mail.key.Timestamp)
|
||||
} else {
|
||||
stored := make([]byte, len(wantMessage))
|
||||
_, err := reader.Read(stored)
|
||||
r.NoError(err)
|
||||
r.Equal(wantMessage, stored, "LoadMail returned wrong message: %s for %s time %d", stored, iUID, mail.key.Timestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConcurency(t *testing.T) {
|
||||
msg := []byte("Test message")
|
||||
for i := 0; i < 10; i++ {
|
||||
go SaveMail(fmt.Sprintf("%s%d", testUID, i), msg, bs)
|
||||
}
|
||||
}
|
||||
@ -37,12 +37,10 @@ type imapMailbox struct {
|
||||
storeUser storeUserProvider
|
||||
storeAddress storeAddressProvider
|
||||
storeMailbox storeMailboxProvider
|
||||
|
||||
builder *message.Builder
|
||||
}
|
||||
|
||||
// newIMAPMailbox returns struct implementing go-imap/mailbox interface.
|
||||
func newIMAPMailbox(panicHandler panicHandler, user *imapUser, storeMailbox storeMailboxProvider, builder *message.Builder) *imapMailbox {
|
||||
func newIMAPMailbox(panicHandler panicHandler, user *imapUser, storeMailbox storeMailboxProvider) *imapMailbox {
|
||||
return &imapMailbox{
|
||||
panicHandler: panicHandler,
|
||||
user: user,
|
||||
@ -56,8 +54,6 @@ func newIMAPMailbox(panicHandler panicHandler, user *imapUser, storeMailbox stor
|
||||
storeUser: user.storeUser,
|
||||
storeAddress: user.storeAddress,
|
||||
storeMailbox: storeMailbox,
|
||||
|
||||
builder: builder,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -19,21 +19,13 @@ package imap
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/internal/imap/cache"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/message"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/pmapi"
|
||||
"github.com/emersion/go-imap"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func (im *imapMailbox) getMessage(
|
||||
storeMessage storeMessageProvider,
|
||||
items []imap.FetchItem,
|
||||
msgBuildCountHistogram *msgBuildCountHistogram,
|
||||
) (msg *imap.Message, err error) {
|
||||
func (im *imapMailbox) getMessage(storeMessage storeMessageProvider, items []imap.FetchItem) (msg *imap.Message, err error) {
|
||||
msglog := im.log.WithField("msgID", storeMessage.ID())
|
||||
msglog.Trace("Getting message")
|
||||
|
||||
@ -69,9 +61,12 @@ func (im *imapMailbox) getMessage(
|
||||
// There is no point having message older than RFC itself, it's not possible.
|
||||
msg.InternalDate = message.SanitizeMessageDate(m.Time)
|
||||
case imap.FetchRFC822Size:
|
||||
if msg.Size, err = im.getSize(storeMessage); err != nil {
|
||||
size, err := storeMessage.GetRFC822Size()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msg.Size = size
|
||||
case imap.FetchUid:
|
||||
if msg.Uid, err = storeMessage.UID(); err != nil {
|
||||
return nil, err
|
||||
@ -79,7 +74,7 @@ func (im *imapMailbox) getMessage(
|
||||
case imap.FetchAll, imap.FetchFast, imap.FetchFull, imap.FetchRFC822, imap.FetchRFC822Header, imap.FetchRFC822Text:
|
||||
fallthrough // this is list of defined items by go-imap, but items can be also sections generated from requests
|
||||
default:
|
||||
if err = im.getLiteralForSection(item, msg, storeMessage, msgBuildCountHistogram); err != nil {
|
||||
if err = im.getLiteralForSection(item, msg, storeMessage); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
@ -88,35 +83,7 @@ func (im *imapMailbox) getMessage(
|
||||
return msg, err
|
||||
}
|
||||
|
||||
// getSize returns cached size or it will build the message, save the size in
|
||||
// DB and then returns the size after build.
|
||||
//
|
||||
// We are storing size in DB as part of pmapi messages metada. The size
|
||||
// attribute on the server represents size of encrypted body. The value is
|
||||
// cleared in Bridge and the final decrypted size (including header, attachment
|
||||
// and MIME structure) is computed after building the message.
|
||||
func (im *imapMailbox) getSize(storeMessage storeMessageProvider) (uint32, error) {
|
||||
m := storeMessage.Message()
|
||||
if m.Size <= 0 {
|
||||
im.log.WithField("msgID", m.ID).Debug("Size unknown - downloading body")
|
||||
// We are sure the size is not a problem right now. Clients
|
||||
// might not first check sizes of all messages so we couldn't
|
||||
// be sure if seeing 1st or 2nd sync is all right or not.
|
||||
// Therefore, it's better to exclude getting size from the
|
||||
// counting and see build count as real message build.
|
||||
if _, _, err := im.getBodyAndStructure(storeMessage, nil); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return uint32(m.Size), nil
|
||||
}
|
||||
|
||||
func (im *imapMailbox) getLiteralForSection(
|
||||
itemSection imap.FetchItem,
|
||||
msg *imap.Message,
|
||||
storeMessage storeMessageProvider,
|
||||
msgBuildCountHistogram *msgBuildCountHistogram,
|
||||
) error {
|
||||
func (im *imapMailbox) getLiteralForSection(itemSection imap.FetchItem, msg *imap.Message, storeMessage storeMessageProvider) error {
|
||||
section, err := imap.ParseBodySectionName(itemSection)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Failed to parse body section name; part will be skipped")
|
||||
@ -124,7 +91,7 @@ func (im *imapMailbox) getLiteralForSection(
|
||||
}
|
||||
|
||||
var literal imap.Literal
|
||||
if literal, err = im.getMessageBodySection(storeMessage, section, msgBuildCountHistogram); err != nil {
|
||||
if literal, err = im.getMessageBodySection(storeMessage, section); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -149,88 +116,25 @@ func (im *imapMailbox) getBodyStructure(storeMessage storeMessageProvider) (bs *
|
||||
// be sure if seeing 1st or 2nd sync is all right or not.
|
||||
// Therefore, it's better to exclude first body structure fetch
|
||||
// from the counting and see build count as real message build.
|
||||
if bs, _, err = im.getBodyAndStructure(storeMessage, nil); err != nil {
|
||||
if bs, _, err = im.getBodyAndStructure(storeMessage); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (im *imapMailbox) getBodyAndStructure(
|
||||
storeMessage storeMessageProvider, msgBuildCountHistogram *msgBuildCountHistogram,
|
||||
) (
|
||||
structure *message.BodyStructure, bodyReader *bytes.Reader, err error,
|
||||
) {
|
||||
m := storeMessage.Message()
|
||||
id := im.storeUser.UserID() + m.ID
|
||||
cache.BuildLock(id)
|
||||
defer cache.BuildUnlock(id)
|
||||
bodyReader, structure = cache.LoadMail(id)
|
||||
|
||||
// return the message which was found in cache
|
||||
if bodyReader.Len() != 0 && structure != nil {
|
||||
return structure, bodyReader, nil
|
||||
func (im *imapMailbox) getBodyAndStructure(storeMessage storeMessageProvider) (*message.BodyStructure, *bytes.Reader, error) {
|
||||
rfc822, err := storeMessage.GetRFC822()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
structure, body, err := im.buildMessage(m)
|
||||
bodyReader = bytes.NewReader(body)
|
||||
size := int64(len(body))
|
||||
l := im.log.WithField("newSize", size).WithField("msgID", m.ID)
|
||||
|
||||
if err != nil || structure == nil || size == 0 {
|
||||
l.WithField("hasStructure", structure != nil).Warn("Failed to build message")
|
||||
return structure, bodyReader, err
|
||||
structure, err := storeMessage.GetBodyStructure()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Save the size, body structure and header even for messages which
|
||||
// were unable to decrypt. Hence they doesn't have to be computed every
|
||||
// time.
|
||||
m.Size = size
|
||||
cacheMessageInStore(storeMessage, structure, body, l)
|
||||
|
||||
if msgBuildCountHistogram != nil {
|
||||
times, errCount := storeMessage.IncreaseBuildCount()
|
||||
if errCount != nil {
|
||||
l.WithError(errCount).Warn("Cannot increase build count")
|
||||
}
|
||||
msgBuildCountHistogram.add(times)
|
||||
}
|
||||
|
||||
// Drafts can change therefore we don't want to cache them.
|
||||
if !isMessageInDraftFolder(m) {
|
||||
cache.SaveMail(id, body, structure)
|
||||
}
|
||||
|
||||
return structure, bodyReader, err
|
||||
}
|
||||
|
||||
func cacheMessageInStore(storeMessage storeMessageProvider, structure *message.BodyStructure, body []byte, l *logrus.Entry) {
|
||||
m := storeMessage.Message()
|
||||
if errSize := storeMessage.SetSize(m.Size); errSize != nil {
|
||||
l.WithError(errSize).Warn("Cannot update size while building")
|
||||
}
|
||||
if structure != nil && !isMessageInDraftFolder(m) {
|
||||
if errStruct := storeMessage.SetBodyStructure(structure); errStruct != nil {
|
||||
l.WithError(errStruct).Warn("Cannot update bodystructure while building")
|
||||
}
|
||||
}
|
||||
header, errHead := structure.GetMailHeaderBytes(bytes.NewReader(body))
|
||||
if errHead == nil && len(header) != 0 {
|
||||
if errStore := storeMessage.SetHeader(header); errStore != nil {
|
||||
l.WithError(errStore).Warn("Cannot update header in store")
|
||||
}
|
||||
} else {
|
||||
l.WithError(errHead).Warn("Cannot get header bytes from structure")
|
||||
}
|
||||
}
|
||||
|
||||
func isMessageInDraftFolder(m *pmapi.Message) bool {
|
||||
for _, labelID := range m.LabelIDs {
|
||||
if labelID == pmapi.DraftLabel {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
return structure, bytes.NewReader(rfc822), nil
|
||||
}
|
||||
|
||||
// This will download message (or read from cache) and pick up the section,
|
||||
@ -246,11 +150,7 @@ func isMessageInDraftFolder(m *pmapi.Message) bool {
|
||||
// For all other cases it is necessary to download and decrypt the message
|
||||
// and drop the header which was obtained from cache. The header will
|
||||
// will be stored in DB once successfully built. Check `getBodyAndStructure`.
|
||||
func (im *imapMailbox) getMessageBodySection(
|
||||
storeMessage storeMessageProvider,
|
||||
section *imap.BodySectionName,
|
||||
msgBuildCountHistogram *msgBuildCountHistogram,
|
||||
) (imap.Literal, error) {
|
||||
func (im *imapMailbox) getMessageBodySection(storeMessage storeMessageProvider, section *imap.BodySectionName) (imap.Literal, error) {
|
||||
var header []byte
|
||||
var response []byte
|
||||
|
||||
@ -260,7 +160,7 @@ func (im *imapMailbox) getMessageBodySection(
|
||||
if isMainHeaderRequested && storeMessage.IsFullHeaderCached() {
|
||||
header = storeMessage.GetHeader()
|
||||
} else {
|
||||
structure, bodyReader, err := im.getBodyAndStructure(storeMessage, msgBuildCountHistogram)
|
||||
structure, bodyReader, err := im.getBodyAndStructure(storeMessage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -276,7 +176,7 @@ func (im *imapMailbox) getMessageBodySection(
|
||||
case section.Specifier == imap.MIMESpecifier: // The MIME part specifier refers to the [MIME-IMB] header for this part.
|
||||
fallthrough
|
||||
case section.Specifier == imap.HeaderSpecifier:
|
||||
header, err = structure.GetSectionHeaderBytes(bodyReader, section.Path)
|
||||
header, err = structure.GetSectionHeaderBytes(section.Path)
|
||||
default:
|
||||
err = errors.New("Unknown specifier " + string(section.Specifier))
|
||||
}
|
||||
@ -293,30 +193,3 @@ func (im *imapMailbox) getMessageBodySection(
|
||||
// Trim any output if requested.
|
||||
return bytes.NewBuffer(section.ExtractPartial(response)), nil
|
||||
}
|
||||
|
||||
// buildMessage from PM to IMAP.
|
||||
func (im *imapMailbox) buildMessage(m *pmapi.Message) (*message.BodyStructure, []byte, error) {
|
||||
body, err := im.builder.NewJobWithOptions(
|
||||
context.Background(),
|
||||
im.user.client(),
|
||||
m.ID,
|
||||
message.JobOptions{
|
||||
IgnoreDecryptionErrors: true, // Whether to ignore decryption errors and create a "custom message" instead.
|
||||
SanitizeDate: true, // Whether to replace all dates before 1970 with RFC822's birthdate.
|
||||
AddInternalID: true, // Whether to include MessageID as X-Pm-Internal-Id.
|
||||
AddExternalID: true, // Whether to include ExternalID as X-Pm-External-Id.
|
||||
AddMessageDate: true, // Whether to include message time as X-Pm-Date.
|
||||
AddMessageIDReference: true, // Whether to include the MessageID in References.
|
||||
},
|
||||
).GetResult()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
structure, err := message.NewBodyStructure(bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return structure, body, nil
|
||||
}
|
||||
|
||||
@ -479,11 +479,16 @@ func (im *imapMailbox) SearchMessages(isUID bool, criteria *imap.SearchCriteria)
|
||||
}
|
||||
|
||||
// Filter by size (only if size was already calculated).
|
||||
if m.Size > 0 {
|
||||
if criteria.Larger != 0 && m.Size <= int64(criteria.Larger) {
|
||||
size, err := storeMessage.GetRFC822Size()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if size > 0 {
|
||||
if criteria.Larger != 0 && int64(size) <= int64(criteria.Larger) {
|
||||
continue
|
||||
}
|
||||
if criteria.Smaller != 0 && m.Size >= int64(criteria.Smaller) {
|
||||
if criteria.Smaller != 0 && int64(size) >= int64(criteria.Smaller) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@ -513,13 +518,12 @@ func (im *imapMailbox) SearchMessages(isUID bool, criteria *imap.SearchCriteria)
|
||||
//
|
||||
// Messages must be sent to msgResponse. When the function returns, msgResponse must be closed.
|
||||
func (im *imapMailbox) ListMessages(isUID bool, seqSet *imap.SeqSet, items []imap.FetchItem, msgResponse chan<- *imap.Message) error {
|
||||
msgBuildCountHistogram := newMsgBuildCountHistogram()
|
||||
return im.logCommand(func() error {
|
||||
return im.listMessages(isUID, seqSet, items, msgResponse, msgBuildCountHistogram)
|
||||
}, "FETCH", isUID, seqSet, items, msgBuildCountHistogram)
|
||||
return im.listMessages(isUID, seqSet, items, msgResponse)
|
||||
}, "FETCH", isUID, seqSet, items)
|
||||
}
|
||||
|
||||
func (im *imapMailbox) listMessages(isUID bool, seqSet *imap.SeqSet, items []imap.FetchItem, msgResponse chan<- *imap.Message, msgBuildCountHistogram *msgBuildCountHistogram) (err error) { //nolint[funlen]
|
||||
func (im *imapMailbox) listMessages(isUID bool, seqSet *imap.SeqSet, items []imap.FetchItem, msgResponse chan<- *imap.Message) (err error) { //nolint[funlen]
|
||||
defer func() {
|
||||
close(msgResponse)
|
||||
if err != nil {
|
||||
@ -564,7 +568,7 @@ func (im *imapMailbox) listMessages(isUID bool, seqSet *imap.SeqSet, items []ima
|
||||
return nil, err
|
||||
}
|
||||
|
||||
msg, err := im.getMessage(storeMessage, items, msgBuildCountHistogram)
|
||||
msg, err := im.getMessage(storeMessage, items)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("list message build: %v", err)
|
||||
l.WithField("metaID", storeMessage.ID()).Error(err)
|
||||
@ -594,7 +598,7 @@ func (im *imapMailbox) listMessages(isUID bool, seqSet *imap.SeqSet, items []ima
|
||||
return nil
|
||||
}
|
||||
|
||||
err = parallel.RunParallel(fetchWorkers, input, processCallback, collectCallback)
|
||||
err = parallel.RunParallel(im.user.backend.listWorkers, input, processCallback, collectCallback)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -1,65 +0,0 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package imap
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// msgBuildCountHistogram is used to analyse and log the number of repetitive
|
||||
// downloads of requested messages per one fetch. The number of builds per each
|
||||
// messageID is stored in persistent database. The msgBuildCountHistogram will
|
||||
// take this number for each message in ongoing fetch and create histogram of
|
||||
// repeats.
|
||||
//
|
||||
// Example: During `fetch 1:300` there were
|
||||
// - 100 messages were downloaded first time
|
||||
// - 100 messages were downloaded second time
|
||||
// - 99 messages were downloaded 10th times
|
||||
// - 1 messages were downloaded 100th times.
|
||||
type msgBuildCountHistogram struct {
|
||||
// Key represents how many times message was build.
|
||||
// Value stores how many messages are build X times based on the key.
|
||||
counts map[uint32]uint32
|
||||
lock sync.Locker
|
||||
}
|
||||
|
||||
func newMsgBuildCountHistogram() *msgBuildCountHistogram {
|
||||
return &msgBuildCountHistogram{
|
||||
counts: map[uint32]uint32{},
|
||||
lock: &sync.Mutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *msgBuildCountHistogram) String() string {
|
||||
res := ""
|
||||
for nRebuild, counts := range c.counts {
|
||||
if res != "" {
|
||||
res += ", "
|
||||
}
|
||||
res += fmt.Sprintf("[%d]:%d", nRebuild, counts)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (c *msgBuildCountHistogram) add(nRebuild uint32) {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
c.counts[nRebuild]++
|
||||
}
|
||||
@ -80,7 +80,6 @@ type storeMailboxProvider interface {
|
||||
GetDelimiter() string
|
||||
|
||||
GetMessage(apiID string) (storeMessageProvider, error)
|
||||
FetchMessage(apiID string) (storeMessageProvider, error)
|
||||
LabelMessages(apiID []string) error
|
||||
UnlabelMessages(apiID []string) error
|
||||
MarkMessagesRead(apiID []string) error
|
||||
@ -100,14 +99,12 @@ type storeMessageProvider interface {
|
||||
Message() *pmapi.Message
|
||||
IsMarkedDeleted() bool
|
||||
|
||||
SetSize(int64) error
|
||||
SetHeader([]byte) error
|
||||
GetHeader() []byte
|
||||
GetRFC822() ([]byte, error)
|
||||
GetRFC822Size() (uint32, error)
|
||||
GetMIMEHeader() textproto.MIMEHeader
|
||||
IsFullHeaderCached() bool
|
||||
SetBodyStructure(*pkgMsg.BodyStructure) error
|
||||
GetBodyStructure() (*pkgMsg.BodyStructure, error)
|
||||
IncreaseBuildCount() (uint32, error)
|
||||
}
|
||||
|
||||
type storeUserWrap struct {
|
||||
@ -165,7 +162,3 @@ func newStoreMailboxWrap(mailbox *store.Mailbox) *storeMailboxWrap {
|
||||
func (s *storeMailboxWrap) GetMessage(apiID string) (storeMessageProvider, error) {
|
||||
return s.Mailbox.GetMessage(apiID)
|
||||
}
|
||||
|
||||
func (s *storeMailboxWrap) FetchMessage(apiID string) (storeMessageProvider, error) {
|
||||
return s.Mailbox.FetchMessage(apiID)
|
||||
}
|
||||
|
||||
@ -135,7 +135,7 @@ func (iu *imapUser) ListMailboxes(showOnlySubcribed bool) ([]goIMAPBackend.Mailb
|
||||
if showOnlySubcribed && !iu.isSubscribed(storeMailbox.LabelID()) {
|
||||
continue
|
||||
}
|
||||
mailbox := newIMAPMailbox(iu.panicHandler, iu, storeMailbox, iu.backend.builder)
|
||||
mailbox := newIMAPMailbox(iu.panicHandler, iu, storeMailbox)
|
||||
mailboxes = append(mailboxes, mailbox)
|
||||
}
|
||||
|
||||
@ -167,7 +167,7 @@ func (iu *imapUser) GetMailbox(name string) (mb goIMAPBackend.Mailbox, err error
|
||||
return
|
||||
}
|
||||
|
||||
return newIMAPMailbox(iu.panicHandler, iu, storeMailbox, iu.backend.builder), nil
|
||||
return newIMAPMailbox(iu.panicHandler, iu, storeMailbox), nil
|
||||
}
|
||||
|
||||
// CreateMailbox creates a new mailbox.
|
||||
|
||||
Reference in New Issue
Block a user