mirror of
https://github.com/ProtonMail/proton-bridge.git
synced 2025-12-17 23:56:56 +00:00
GODT-1158: Store full messages bodies on disk
- GODT-1158: simple on-disk cache in store - GODT-1158: better member naming in event loop - GODT-1158: create on-disk cache during bridge setup - GODT-1158: better job options - GODT-1158: rename GetLiteral to GetRFC822 - GODT-1158: rename events -> currentEvents - GODT-1158: unlock cache per-user - GODT-1158: clean up cache after logout - GODT-1158: randomized encrypted cache passphrase - GODT-1158: Opt out of on-disk cache in settings - GODT-1158: free space in cache - GODT-1158: make tests compile - GODT-1158: optional compression - GODT-1158: cache custom location - GODT-1158: basic capacity checker - GODT-1158: cache free space config - GODT-1158: only unlock cache if pmapi client is unlocked as well - GODT-1158: simple background sync worker - GODT-1158: set size/bodystructure when caching message - GODT-1158: limit store db update blocking with semaphore - GODT-1158: dumb 10-semaphore - GODT-1158: properly handle delete; remove bad bodystructure handling - GODT-1158: hacky fix for caching after logout... baaaaad - GODT-1158: cache worker - GODT-1158: compute body structure lazily - GODT-1158: cache size in store - GODT-1158: notify cacher when adding to store - GODT-1158: 15 second store cache watcher - GODT-1158: enable cacher - GODT-1158: better cache worker starting/stopping - GODT-1158: limit cacher to less concurrency than disk cache - GODT-1158: message builder prio + pchan pkg - GODT-1158: fix pchan, use in message builder - GODT-1158: no sem in cacher (rely on message builder prio) - GODT-1158: raise priority of existing jobs when requested - GODT-1158: pending messages in on-disk cache - GODT-1158: WIP just a note about deleting messages from disk cache - GODT-1158: pending wait when trying to write - GODT-1158: pending.add to return bool - GODT-1225: Headers in bodystructure are stored as bytes. - GODT-1158: fixing header caching - GODT-1158: don't cache in background - GODT-1158: all concurrency set in settings - GODT-1158: worker pools inside message builder - GODT-1158: fix linter issues - GODT-1158: remove completed builds from builder - GODT-1158: remove builder pool - GODT-1158: cacher defer job done properly - GODT-1158: fix linter - GODT-1299: Continue with bodystructure build if deserialization failed - GODT-1324: Delete messages from the cache when they are deleted on the server - GODT-1158: refactor cache tests - GODT-1158: move builder to app/bridge - GODT-1306: Migrate cache on disk when location is changed (and delete when disabled)
This commit is contained in:
@ -18,99 +18,113 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/message"
|
||||
"github.com/sirupsen/logrus"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// Cache caches the last event IDs for all accounts (there should be only one instance).
|
||||
type Cache struct {
|
||||
// cache is map from userID => key (such as last event) => value (such as event ID).
|
||||
cache map[string]map[string]string
|
||||
path string
|
||||
lock *sync.RWMutex
|
||||
}
|
||||
const passphraseKey = "passphrase"
|
||||
|
||||
// NewCache constructs a new cache at the given path.
|
||||
func NewCache(path string) *Cache {
|
||||
return &Cache{
|
||||
path: path,
|
||||
lock: &sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) getEventID(userID string) string {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.loadCache(); err != nil {
|
||||
log.WithError(err).Warn("Problem to load store cache")
|
||||
}
|
||||
|
||||
if c.cache == nil {
|
||||
c.cache = map[string]map[string]string{}
|
||||
}
|
||||
if c.cache[userID] == nil {
|
||||
c.cache[userID] = map[string]string{}
|
||||
}
|
||||
|
||||
return c.cache[userID]["events"]
|
||||
}
|
||||
|
||||
func (c *Cache) setEventID(userID, eventID string) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.cache[userID] == nil {
|
||||
c.cache[userID] = map[string]string{}
|
||||
}
|
||||
c.cache[userID]["events"] = eventID
|
||||
|
||||
return c.saveCache()
|
||||
}
|
||||
|
||||
func (c *Cache) loadCache() error {
|
||||
if c.cache != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Open(c.path)
|
||||
// UnlockCache unlocks the cache for the user with the given keyring.
|
||||
func (store *Store) UnlockCache(kr *crypto.KeyRing) error {
|
||||
passphrase, err := store.getCachePassphrase()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close() //nolint[errcheck]
|
||||
|
||||
return json.NewDecoder(f).Decode(&c.cache)
|
||||
}
|
||||
if passphrase == nil {
|
||||
if passphrase, err = crypto.RandomToken(32); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Cache) saveCache() error {
|
||||
if c.cache == nil {
|
||||
return errors.New("events: cannot save cache: cache is nil")
|
||||
enc, err := kr.Encrypt(crypto.NewPlainMessage(passphrase), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := store.setCachePassphrase(enc.GetBinary()); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
dec, err := kr.Decrypt(crypto.NewPGPMessage(passphrase), nil, crypto.GetUnixTime())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
passphrase = dec.GetBinary()
|
||||
}
|
||||
|
||||
f, err := os.Create(c.path)
|
||||
if err := store.cache.Unlock(store.user.ID(), passphrase); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store.cacher.start()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *Store) getCachePassphrase() ([]byte, error) {
|
||||
var passphrase []byte
|
||||
|
||||
if err := store.db.View(func(tx *bolt.Tx) error {
|
||||
passphrase = tx.Bucket(cachePassphraseBucket).Get([]byte(passphraseKey))
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return passphrase, nil
|
||||
}
|
||||
|
||||
func (store *Store) setCachePassphrase(passphrase []byte) error {
|
||||
return store.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(cachePassphraseBucket).Put([]byte(passphraseKey), passphrase)
|
||||
})
|
||||
}
|
||||
|
||||
func (store *Store) clearCachePassphrase() error {
|
||||
return store.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(cachePassphraseBucket).Delete([]byte(passphraseKey))
|
||||
})
|
||||
}
|
||||
|
||||
func (store *Store) getCachedMessage(messageID string) ([]byte, error) {
|
||||
if store.cache.Has(store.user.ID(), messageID) {
|
||||
return store.cache.Get(store.user.ID(), messageID)
|
||||
}
|
||||
|
||||
job, done := store.newBuildJob(messageID, message.ForegroundPriority)
|
||||
defer done()
|
||||
|
||||
literal, err := job.GetResult()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// NOTE(GODT-1158): No need to block until cache has been set; do this async?
|
||||
if err := store.cache.Set(store.user.ID(), messageID, literal); err != nil {
|
||||
logrus.WithError(err).Error("Failed to cache message")
|
||||
}
|
||||
|
||||
return literal, nil
|
||||
}
|
||||
|
||||
// IsCached returns whether the given message already exists in the cache.
|
||||
func (store *Store) IsCached(messageID string) bool {
|
||||
return store.cache.Has(store.user.ID(), messageID)
|
||||
}
|
||||
|
||||
// BuildAndCacheMessage builds the given message (with background priority) and puts it in the cache.
|
||||
// It builds with background priority.
|
||||
func (store *Store) BuildAndCacheMessage(messageID string) error {
|
||||
job, done := store.newBuildJob(messageID, message.BackgroundPriority)
|
||||
defer done()
|
||||
|
||||
literal, err := job.GetResult()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close() //nolint[errcheck]
|
||||
|
||||
return json.NewEncoder(f).Encode(c.cache)
|
||||
}
|
||||
|
||||
func (c *Cache) clearCacheUser(userID string) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.cache == nil {
|
||||
log.WithField("user", userID).Warning("Cannot clear user from cache: cache is nil")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithField("user", userID).Trace("Removing user from event loop cache")
|
||||
|
||||
delete(c.cache, userID)
|
||||
|
||||
return c.saveCache()
|
||||
return store.cache.Set(store.user.ID(), messageID, literal)
|
||||
}
|
||||
|
||||
73
internal/store/cache/cache_test.go
vendored
Normal file
73
internal/store/cache/cache_test.go
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOnDiskCacheNoCompression(t *testing.T) {
|
||||
cache, err := NewOnDiskCache(t.TempDir(), &NoopCompressor{}, Options{ConcurrentRead: runtime.NumCPU(), ConcurrentWrite: runtime.NumCPU()})
|
||||
require.NoError(t, err)
|
||||
|
||||
testCache(t, cache)
|
||||
}
|
||||
|
||||
func TestOnDiskCacheGZipCompression(t *testing.T) {
|
||||
cache, err := NewOnDiskCache(t.TempDir(), &GZipCompressor{}, Options{ConcurrentRead: runtime.NumCPU(), ConcurrentWrite: runtime.NumCPU()})
|
||||
require.NoError(t, err)
|
||||
|
||||
testCache(t, cache)
|
||||
}
|
||||
|
||||
func TestInMemoryCache(t *testing.T) {
|
||||
testCache(t, NewInMemoryCache(1<<20))
|
||||
}
|
||||
|
||||
func testCache(t *testing.T, cache Cache) {
|
||||
assert.NoError(t, cache.Unlock("userID1", []byte("my secret passphrase")))
|
||||
assert.NoError(t, cache.Unlock("userID2", []byte("my other passphrase")))
|
||||
|
||||
getSetCachedMessage(t, cache, "userID1", "messageID1", "some secret")
|
||||
assert.True(t, cache.Has("userID1", "messageID1"))
|
||||
|
||||
getSetCachedMessage(t, cache, "userID2", "messageID2", "some other secret")
|
||||
assert.True(t, cache.Has("userID2", "messageID2"))
|
||||
|
||||
assert.NoError(t, cache.Rem("userID1", "messageID1"))
|
||||
assert.False(t, cache.Has("userID1", "messageID1"))
|
||||
|
||||
assert.NoError(t, cache.Rem("userID2", "messageID2"))
|
||||
assert.False(t, cache.Has("userID2", "messageID2"))
|
||||
|
||||
assert.NoError(t, cache.Delete("userID1"))
|
||||
assert.NoError(t, cache.Delete("userID2"))
|
||||
}
|
||||
|
||||
func getSetCachedMessage(t *testing.T, cache Cache, userID, messageID, secret string) {
|
||||
assert.NoError(t, cache.Set(userID, messageID, []byte(secret)))
|
||||
|
||||
data, err := cache.Get(userID, messageID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, []byte(secret), data)
|
||||
}
|
||||
33
internal/store/cache/compressor.go
vendored
Normal file
33
internal/store/cache/compressor.go
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
type Compressor interface {
|
||||
Compress([]byte) ([]byte, error)
|
||||
Decompress([]byte) ([]byte, error)
|
||||
}
|
||||
|
||||
type NoopCompressor struct{}
|
||||
|
||||
func (NoopCompressor) Compress(dec []byte) ([]byte, error) {
|
||||
return dec, nil
|
||||
}
|
||||
|
||||
func (NoopCompressor) Decompress(cmp []byte) ([]byte, error) {
|
||||
return cmp, nil
|
||||
}
|
||||
60
internal/store/cache/compressor_gzip.go
vendored
Normal file
60
internal/store/cache/compressor_gzip.go
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
)
|
||||
|
||||
type GZipCompressor struct{}
|
||||
|
||||
func (GZipCompressor) Compress(dec []byte) ([]byte, error) {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
zw := gzip.NewWriter(buf)
|
||||
|
||||
if _, err := zw.Write(dec); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := zw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (GZipCompressor) Decompress(cmp []byte) ([]byte, error) {
|
||||
zr, err := gzip.NewReader(bytes.NewReader(cmp))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
if _, err := buf.ReadFrom(zr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := zr.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
244
internal/store/cache/disk.go
vendored
Normal file
244
internal/store/cache/disk.go
vendored
Normal file
@ -0,0 +1,244 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/pkg/semaphore"
|
||||
"github.com/ricochet2200/go-disk-usage/du"
|
||||
)
|
||||
|
||||
var ErrLowSpace = errors.New("not enough free space left on device")
|
||||
|
||||
type onDiskCache struct {
|
||||
path string
|
||||
opts Options
|
||||
|
||||
gcm map[string]cipher.AEAD
|
||||
cmp Compressor
|
||||
rsem, wsem semaphore.Semaphore
|
||||
pending *pending
|
||||
|
||||
diskSize uint64
|
||||
diskFree uint64
|
||||
once *sync.Once
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func NewOnDiskCache(path string, cmp Compressor, opts Options) (Cache, error) {
|
||||
if err := os.MkdirAll(path, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
usage := du.NewDiskUsage(path)
|
||||
|
||||
// NOTE(GODT-1158): use Available() or Free()?
|
||||
return &onDiskCache{
|
||||
path: path,
|
||||
opts: opts,
|
||||
|
||||
gcm: make(map[string]cipher.AEAD),
|
||||
cmp: cmp,
|
||||
rsem: semaphore.New(opts.ConcurrentRead),
|
||||
wsem: semaphore.New(opts.ConcurrentWrite),
|
||||
pending: newPending(),
|
||||
|
||||
diskSize: usage.Size(),
|
||||
diskFree: usage.Available(),
|
||||
once: &sync.Once{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *onDiskCache) Unlock(userID string, passphrase []byte) error {
|
||||
hash := sha256.New()
|
||||
|
||||
if _, err := hash.Write(passphrase); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
aes, err := aes.NewCipher(hash.Sum(nil))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(aes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(c.getUserPath(userID), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.gcm[userID] = gcm
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *onDiskCache) Delete(userID string) error {
|
||||
defer c.update()
|
||||
|
||||
return os.RemoveAll(c.getUserPath(userID))
|
||||
}
|
||||
|
||||
// Has returns whether the given message exists in the cache.
|
||||
func (c *onDiskCache) Has(userID, messageID string) bool {
|
||||
c.pending.wait(c.getMessagePath(userID, messageID))
|
||||
|
||||
c.rsem.Lock()
|
||||
defer c.rsem.Unlock()
|
||||
|
||||
_, err := os.Stat(c.getMessagePath(userID, messageID))
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
return true
|
||||
|
||||
case os.IsNotExist(err):
|
||||
return false
|
||||
|
||||
default:
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *onDiskCache) Get(userID, messageID string) ([]byte, error) {
|
||||
enc, err := c.readFile(c.getMessagePath(userID, messageID))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmp, err := c.gcm[userID].Open(nil, enc[:c.gcm[userID].NonceSize()], enc[c.gcm[userID].NonceSize():], nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.cmp.Decompress(cmp)
|
||||
}
|
||||
|
||||
func (c *onDiskCache) Set(userID, messageID string, literal []byte) error {
|
||||
nonce := make([]byte, c.gcm[userID].NonceSize())
|
||||
|
||||
if _, err := rand.Read(nonce); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cmp, err := c.cmp.Compress(literal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE(GODT-1158): How to properly handle low space? Don't return error, that's bad. Instead send event?
|
||||
if !c.hasSpace(len(cmp)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return c.writeFile(c.getMessagePath(userID, messageID), c.gcm[userID].Seal(nonce, nonce, cmp, nil))
|
||||
}
|
||||
|
||||
func (c *onDiskCache) Rem(userID, messageID string) error {
|
||||
defer c.update()
|
||||
|
||||
return os.Remove(c.getMessagePath(userID, messageID))
|
||||
}
|
||||
|
||||
func (c *onDiskCache) readFile(path string) ([]byte, error) {
|
||||
c.rsem.Lock()
|
||||
defer c.rsem.Unlock()
|
||||
|
||||
// Wait before reading in case the file is currently being written.
|
||||
c.pending.wait(path)
|
||||
|
||||
return ioutil.ReadFile(filepath.Clean(path))
|
||||
}
|
||||
|
||||
func (c *onDiskCache) writeFile(path string, b []byte) error {
|
||||
c.wsem.Lock()
|
||||
defer c.wsem.Unlock()
|
||||
|
||||
// Mark the file as currently being written.
|
||||
// If it's already being written, wait for it to be done and return nil.
|
||||
// NOTE(GODT-1158): Let's hope it succeeded...
|
||||
if ok := c.pending.add(path); !ok {
|
||||
c.pending.wait(path)
|
||||
return nil
|
||||
}
|
||||
defer c.pending.done(path)
|
||||
|
||||
// Reduce the approximate free space (update it exactly later).
|
||||
c.lock.Lock()
|
||||
c.diskFree -= uint64(len(b))
|
||||
c.lock.Unlock()
|
||||
|
||||
// Update the diskFree eventually.
|
||||
defer c.update()
|
||||
|
||||
// NOTE(GODT-1158): What happens when this fails? Should be fixed eventually.
|
||||
return ioutil.WriteFile(filepath.Clean(path), b, 0600)
|
||||
}
|
||||
|
||||
func (c *onDiskCache) hasSpace(size int) bool {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.opts.MinFreeAbs > 0 {
|
||||
if c.diskFree-uint64(size) < c.opts.MinFreeAbs {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if c.opts.MinFreeRat > 0 {
|
||||
if float64(c.diskFree-uint64(size))/float64(c.diskSize) < c.opts.MinFreeRat {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *onDiskCache) update() {
|
||||
go func() {
|
||||
c.once.Do(func() {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
// Update the free space.
|
||||
c.diskFree = du.NewDiskUsage(c.path).Available()
|
||||
|
||||
// Reset the Once object (so we can update again).
|
||||
c.once = &sync.Once{}
|
||||
})
|
||||
}()
|
||||
}
|
||||
|
||||
func (c *onDiskCache) getUserPath(userID string) string {
|
||||
return filepath.Join(c.path, getHash(userID))
|
||||
}
|
||||
|
||||
func (c *onDiskCache) getMessagePath(userID, messageID string) string {
|
||||
return filepath.Join(c.getUserPath(userID), getHash(messageID))
|
||||
}
|
||||
33
internal/store/cache/hash.go
vendored
Normal file
33
internal/store/cache/hash.go
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
)
|
||||
|
||||
func getHash(name string) string {
|
||||
hash := sha256.New()
|
||||
|
||||
if _, err := hash.Write([]byte(name)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
104
internal/store/cache/memory.go
vendored
Normal file
104
internal/store/cache/memory.go
vendored
Normal file
@ -0,0 +1,104 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type inMemoryCache struct {
|
||||
lock sync.RWMutex
|
||||
data map[string]map[string][]byte
|
||||
size, limit int
|
||||
}
|
||||
|
||||
// NewInMemoryCache creates a new in memory cache which stores up to the given number of bytes of cached data.
|
||||
// NOTE(GODT-1158): Make this threadsafe.
|
||||
func NewInMemoryCache(limit int) Cache {
|
||||
return &inMemoryCache{
|
||||
data: make(map[string]map[string][]byte),
|
||||
limit: limit,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *inMemoryCache) Unlock(userID string, passphrase []byte) error {
|
||||
c.data[userID] = make(map[string][]byte)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *inMemoryCache) Delete(userID string) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
for _, message := range c.data[userID] {
|
||||
c.size -= len(message)
|
||||
}
|
||||
|
||||
delete(c.data, userID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Has returns whether the given message exists in the cache.
|
||||
func (c *inMemoryCache) Has(userID, messageID string) bool {
|
||||
if _, err := c.Get(userID, messageID); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *inMemoryCache) Get(userID, messageID string) ([]byte, error) {
|
||||
c.lock.RLock()
|
||||
defer c.lock.RUnlock()
|
||||
|
||||
literal, ok := c.data[userID][messageID]
|
||||
if !ok {
|
||||
return nil, errors.New("no such message in cache")
|
||||
}
|
||||
|
||||
return literal, nil
|
||||
}
|
||||
|
||||
// NOTE(GODT-1158): What to actually do when memory limit is reached? Replace something existing? Return error? Drop silently?
|
||||
// NOTE(GODT-1158): Pull in cache-rotating feature from old IMAP cache.
|
||||
func (c *inMemoryCache) Set(userID, messageID string, literal []byte) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.size+len(literal) > c.limit {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.size += len(literal)
|
||||
c.data[userID][messageID] = literal
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *inMemoryCache) Rem(userID, messageID string) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
c.size -= len(c.data[userID][messageID])
|
||||
|
||||
delete(c.data[userID], messageID)
|
||||
|
||||
return nil
|
||||
}
|
||||
25
internal/store/cache/options.go
vendored
Normal file
25
internal/store/cache/options.go
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
type Options struct {
|
||||
MinFreeAbs uint64
|
||||
MinFreeRat float64
|
||||
ConcurrentRead int
|
||||
ConcurrentWrite int
|
||||
}
|
||||
61
internal/store/cache/pending.go
vendored
Normal file
61
internal/store/cache/pending.go
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
import "sync"
|
||||
|
||||
type pending struct {
|
||||
lock sync.Mutex
|
||||
path map[string]chan struct{}
|
||||
}
|
||||
|
||||
func newPending() *pending {
|
||||
return &pending{path: make(map[string]chan struct{})}
|
||||
}
|
||||
|
||||
func (p *pending) add(path string) bool {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
if _, ok := p.path[path]; ok {
|
||||
return false
|
||||
}
|
||||
|
||||
p.path[path] = make(chan struct{})
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *pending) wait(path string) {
|
||||
p.lock.Lock()
|
||||
ch, ok := p.path[path]
|
||||
p.lock.Unlock()
|
||||
|
||||
if ok {
|
||||
<-ch
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pending) done(path string) {
|
||||
p.lock.Lock()
|
||||
defer p.lock.Unlock()
|
||||
|
||||
defer close(p.path[path])
|
||||
|
||||
delete(p.path, path)
|
||||
}
|
||||
51
internal/store/cache/pending_test.go
vendored
Normal file
51
internal/store/cache/pending_test.go
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestPending(t *testing.T) {
|
||||
pending := newPending()
|
||||
|
||||
pending.add("1")
|
||||
pending.add("2")
|
||||
pending.add("3")
|
||||
|
||||
resCh := make(chan string)
|
||||
|
||||
go func() { pending.wait("1"); resCh <- "1" }()
|
||||
go func() { pending.wait("2"); resCh <- "2" }()
|
||||
go func() { pending.wait("3"); resCh <- "3" }()
|
||||
|
||||
pending.done("1")
|
||||
assert.Equal(t, "1", <-resCh)
|
||||
|
||||
pending.done("2")
|
||||
assert.Equal(t, "2", <-resCh)
|
||||
|
||||
pending.done("3")
|
||||
assert.Equal(t, "3", <-resCh)
|
||||
}
|
||||
|
||||
func TestPendingUnknown(t *testing.T) {
|
||||
newPending().wait("this is not currently being waited")
|
||||
}
|
||||
28
internal/store/cache/types.go
vendored
Normal file
28
internal/store/cache/types.go
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cache
|
||||
|
||||
type Cache interface {
|
||||
Unlock(userID string, passphrase []byte) error
|
||||
Delete(userID string) error
|
||||
|
||||
Has(userID, messageID string) bool
|
||||
Get(userID, messageID string) ([]byte, error)
|
||||
Set(userID, messageID string, literal []byte) error
|
||||
Rem(userID, messageID string) error
|
||||
}
|
||||
63
internal/store/cache_watcher.go
Normal file
63
internal/store/cache_watcher.go
Normal file
@ -0,0 +1,63 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package store
|
||||
|
||||
import "time"
|
||||
|
||||
func (store *Store) StartWatcher() {
|
||||
store.done = make(chan struct{})
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(3 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// NOTE(GODT-1158): Race condition here? What if DB was already closed?
|
||||
messageIDs, err := store.getAllMessageIDs()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, messageID := range messageIDs {
|
||||
if !store.IsCached(messageID) {
|
||||
store.cacher.newJob(messageID)
|
||||
}
|
||||
}
|
||||
|
||||
case <-store.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (store *Store) stopWatcher() {
|
||||
if store.done == nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
default:
|
||||
close(store.done)
|
||||
|
||||
case <-store.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
104
internal/store/cache_worker.go
Normal file
104
internal/store/cache_worker.go
Normal file
@ -0,0 +1,104 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Cacher struct {
|
||||
storer Storer
|
||||
jobs chan string
|
||||
done chan struct{}
|
||||
started bool
|
||||
wg *sync.WaitGroup
|
||||
}
|
||||
|
||||
type Storer interface {
|
||||
IsCached(messageID string) bool
|
||||
BuildAndCacheMessage(messageID string) error
|
||||
}
|
||||
|
||||
func newCacher(storer Storer) *Cacher {
|
||||
return &Cacher{
|
||||
storer: storer,
|
||||
jobs: make(chan string),
|
||||
done: make(chan struct{}),
|
||||
wg: &sync.WaitGroup{},
|
||||
}
|
||||
}
|
||||
|
||||
// newJob sends a new job to the cacher if it's running.
|
||||
func (cacher *Cacher) newJob(messageID string) {
|
||||
if !cacher.started {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-cacher.done:
|
||||
return
|
||||
|
||||
default:
|
||||
if !cacher.storer.IsCached(messageID) {
|
||||
cacher.wg.Add(1)
|
||||
go func() { cacher.jobs <- messageID }()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cacher *Cacher) start() {
|
||||
cacher.started = true
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case messageID := <-cacher.jobs:
|
||||
go cacher.handleJob(messageID)
|
||||
|
||||
case <-cacher.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (cacher *Cacher) handleJob(messageID string) {
|
||||
defer cacher.wg.Done()
|
||||
|
||||
if err := cacher.storer.BuildAndCacheMessage(messageID); err != nil {
|
||||
logrus.WithError(err).Error("Failed to build and cache message")
|
||||
} else {
|
||||
logrus.WithField("messageID", messageID).Trace("Message cached")
|
||||
}
|
||||
}
|
||||
|
||||
func (cacher *Cacher) stop() {
|
||||
cacher.started = false
|
||||
|
||||
cacher.wg.Wait()
|
||||
|
||||
select {
|
||||
case <-cacher.done:
|
||||
return
|
||||
|
||||
default:
|
||||
close(cacher.done)
|
||||
}
|
||||
}
|
||||
103
internal/store/cache_worker_test.go
Normal file
103
internal/store/cache_worker_test.go
Normal file
@ -0,0 +1,103 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
storemocks "github.com/ProtonMail/proton-bridge/internal/store/mocks"
|
||||
"github.com/golang/mock/gomock"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func withTestCacher(t *testing.T, doTest func(storer *storemocks.MockStorer, cacher *Cacher)) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
// Mock storer used to build/cache messages.
|
||||
storer := storemocks.NewMockStorer(ctrl)
|
||||
|
||||
// Create a new cacher pointing to the fake store.
|
||||
cacher := newCacher(storer)
|
||||
|
||||
// Start the cacher and wait for it to stop.
|
||||
cacher.start()
|
||||
defer cacher.stop()
|
||||
|
||||
doTest(storer, cacher)
|
||||
}
|
||||
|
||||
func TestCacher(t *testing.T) {
|
||||
// If the message is not yet cached, we should expect to try to build and cache it.
|
||||
withTestCacher(t, func(storer *storemocks.MockStorer, cacher *Cacher) {
|
||||
storer.EXPECT().IsCached("messageID").Return(false)
|
||||
storer.EXPECT().BuildAndCacheMessage("messageID").Return(nil)
|
||||
cacher.newJob("messageID")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCacherAlreadyCached(t *testing.T) {
|
||||
// If the message is already cached, we should not try to build it.
|
||||
withTestCacher(t, func(storer *storemocks.MockStorer, cacher *Cacher) {
|
||||
storer.EXPECT().IsCached("messageID").Return(true)
|
||||
cacher.newJob("messageID")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCacherFail(t *testing.T) {
|
||||
// If building the message fails, we should not try to cache it.
|
||||
withTestCacher(t, func(storer *storemocks.MockStorer, cacher *Cacher) {
|
||||
storer.EXPECT().IsCached("messageID").Return(false)
|
||||
storer.EXPECT().BuildAndCacheMessage("messageID").Return(errors.New("failed to build message"))
|
||||
cacher.newJob("messageID")
|
||||
})
|
||||
}
|
||||
|
||||
func TestCacherStop(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
// Mock storer used to build/cache messages.
|
||||
storer := storemocks.NewMockStorer(ctrl)
|
||||
|
||||
// Create a new cacher pointing to the fake store.
|
||||
cacher := newCacher(storer)
|
||||
|
||||
// Start the cacher.
|
||||
cacher.start()
|
||||
|
||||
// Send a job -- this should succeed.
|
||||
storer.EXPECT().IsCached("messageID").Return(false)
|
||||
storer.EXPECT().BuildAndCacheMessage("messageID").Return(nil)
|
||||
cacher.newJob("messageID")
|
||||
|
||||
// Stop the cacher.
|
||||
cacher.stop()
|
||||
|
||||
// Send more jobs -- these should all be dropped.
|
||||
cacher.newJob("messageID2")
|
||||
cacher.newJob("messageID3")
|
||||
cacher.newJob("messageID4")
|
||||
cacher.newJob("messageID5")
|
||||
|
||||
// Stopping the cacher multiple times is safe.
|
||||
cacher.stop()
|
||||
cacher.stop()
|
||||
cacher.stop()
|
||||
cacher.stop()
|
||||
}
|
||||
@ -34,7 +34,7 @@ func TestNotifyChangeCreateOrUpdateMessage(t *testing.T) {
|
||||
m.changeNotifier.EXPECT().UpdateMessage(addr1, "All Mail", uint32(1), uint32(1), gomock.Any(), false)
|
||||
m.changeNotifier.EXPECT().UpdateMessage(addr1, "All Mail", uint32(2), uint32(2), gomock.Any(), false)
|
||||
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
m.store.SetChangeNotifier(m.changeNotifier)
|
||||
|
||||
insertMessage(t, m, "msg1", "Test message 1", addrID1, false, []string{pmapi.AllMailLabel})
|
||||
@ -49,7 +49,7 @@ func TestNotifyChangeCreateOrUpdateMessages(t *testing.T) {
|
||||
m.changeNotifier.EXPECT().UpdateMessage(addr1, "All Mail", uint32(1), uint32(1), gomock.Any(), false)
|
||||
m.changeNotifier.EXPECT().UpdateMessage(addr1, "All Mail", uint32(2), uint32(2), gomock.Any(), false)
|
||||
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
m.store.SetChangeNotifier(m.changeNotifier)
|
||||
|
||||
msg1 := getTestMessage("msg1", "Test message 1", addrID1, false, []string{pmapi.AllMailLabel})
|
||||
@ -61,7 +61,7 @@ func TestNotifyChangeDeleteMessage(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
|
||||
insertMessage(t, m, "msg1", "Test message 1", addrID1, false, []string{pmapi.AllMailLabel})
|
||||
insertMessage(t, m, "msg2", "Test message 2", addrID1, false, []string{pmapi.AllMailLabel})
|
||||
|
||||
@ -38,7 +38,7 @@ const (
|
||||
)
|
||||
|
||||
type eventLoop struct {
|
||||
cache *Cache
|
||||
currentEvents *Events
|
||||
currentEventID string
|
||||
currentEvent *pmapi.Event
|
||||
pollCh chan chan struct{}
|
||||
@ -51,26 +51,26 @@ type eventLoop struct {
|
||||
|
||||
log *logrus.Entry
|
||||
|
||||
store *Store
|
||||
user BridgeUser
|
||||
events listener.Listener
|
||||
store *Store
|
||||
user BridgeUser
|
||||
listener listener.Listener
|
||||
}
|
||||
|
||||
func newEventLoop(cache *Cache, store *Store, user BridgeUser, events listener.Listener) *eventLoop {
|
||||
func newEventLoop(currentEvents *Events, store *Store, user BridgeUser, listener listener.Listener) *eventLoop {
|
||||
eventLog := log.WithField("userID", user.ID())
|
||||
eventLog.Trace("Creating new event loop")
|
||||
|
||||
return &eventLoop{
|
||||
cache: cache,
|
||||
currentEventID: cache.getEventID(user.ID()),
|
||||
currentEvents: currentEvents,
|
||||
currentEventID: currentEvents.getEventID(user.ID()),
|
||||
pollCh: make(chan chan struct{}),
|
||||
isRunning: false,
|
||||
|
||||
log: eventLog,
|
||||
|
||||
store: store,
|
||||
user: user,
|
||||
events: events,
|
||||
store: store,
|
||||
user: user,
|
||||
listener: listener,
|
||||
}
|
||||
}
|
||||
|
||||
@ -89,7 +89,7 @@ func (loop *eventLoop) setFirstEventID() (err error) {
|
||||
|
||||
loop.currentEventID = event.EventID
|
||||
|
||||
if err = loop.cache.setEventID(loop.user.ID(), loop.currentEventID); err != nil {
|
||||
if err = loop.currentEvents.setEventID(loop.user.ID(), loop.currentEventID); err != nil {
|
||||
loop.log.WithError(err).Error("Could not set latest event ID in user cache")
|
||||
return
|
||||
}
|
||||
@ -229,7 +229,7 @@ func (loop *eventLoop) processNextEvent() (more bool, err error) { // nolint[fun
|
||||
|
||||
if err != nil && isFdCloseToULimit() {
|
||||
l.Warn("Ulimit reached")
|
||||
loop.events.Emit(bridgeEvents.RestartBridgeEvent, "")
|
||||
loop.listener.Emit(bridgeEvents.RestartBridgeEvent, "")
|
||||
err = nil
|
||||
}
|
||||
|
||||
@ -291,7 +291,7 @@ func (loop *eventLoop) processNextEvent() (more bool, err error) { // nolint[fun
|
||||
// This allows the event loop to continue to function (unless the cache was broken
|
||||
// and bridge stopped, in which case it will start from the old event ID anyway).
|
||||
loop.currentEventID = event.EventID
|
||||
if err = loop.cache.setEventID(loop.user.ID(), event.EventID); err != nil {
|
||||
if err = loop.currentEvents.setEventID(loop.user.ID(), event.EventID); err != nil {
|
||||
return false, errors.Wrap(err, "failed to save event ID to cache")
|
||||
}
|
||||
}
|
||||
@ -371,7 +371,7 @@ func (loop *eventLoop) processAddresses(log *logrus.Entry, addressEvents []*pmap
|
||||
switch addressEvent.Action {
|
||||
case pmapi.EventCreate:
|
||||
log.WithField("email", addressEvent.Address.Email).Debug("Address was created")
|
||||
loop.events.Emit(bridgeEvents.AddressChangedEvent, loop.user.GetPrimaryAddress())
|
||||
loop.listener.Emit(bridgeEvents.AddressChangedEvent, loop.user.GetPrimaryAddress())
|
||||
|
||||
case pmapi.EventUpdate:
|
||||
oldAddress := oldList.ByID(addressEvent.ID)
|
||||
@ -383,7 +383,7 @@ func (loop *eventLoop) processAddresses(log *logrus.Entry, addressEvents []*pmap
|
||||
email := oldAddress.Email
|
||||
log.WithField("email", email).Debug("Address was updated")
|
||||
if addressEvent.Address.Receive != oldAddress.Receive {
|
||||
loop.events.Emit(bridgeEvents.AddressChangedLogoutEvent, email)
|
||||
loop.listener.Emit(bridgeEvents.AddressChangedLogoutEvent, email)
|
||||
}
|
||||
|
||||
case pmapi.EventDelete:
|
||||
@ -396,7 +396,7 @@ func (loop *eventLoop) processAddresses(log *logrus.Entry, addressEvents []*pmap
|
||||
email := oldAddress.Email
|
||||
log.WithField("email", email).Debug("Address was deleted")
|
||||
loop.user.CloseConnection(email)
|
||||
loop.events.Emit(bridgeEvents.AddressChangedLogoutEvent, email)
|
||||
loop.listener.Emit(bridgeEvents.AddressChangedLogoutEvent, email)
|
||||
case pmapi.EventUpdateFlags:
|
||||
log.Error("EventUpdateFlags for address event is uknown operation")
|
||||
}
|
||||
|
||||
@ -53,7 +53,7 @@ func TestEventLoopProcessMoreEvents(t *testing.T) {
|
||||
More: false,
|
||||
}, nil),
|
||||
)
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
|
||||
// Event loop runs in goroutine started during store creation (newStoreNoEvents).
|
||||
// Force to run the next event.
|
||||
@ -78,7 +78,7 @@ func TestEventLoopUpdateMessageFromLoop(t *testing.T) {
|
||||
subject := "old subject"
|
||||
newSubject := "new subject"
|
||||
|
||||
m.newStoreNoEvents(true, &pmapi.Message{
|
||||
m.newStoreNoEvents(t, true, &pmapi.Message{
|
||||
ID: "msg1",
|
||||
Subject: subject,
|
||||
})
|
||||
@ -106,7 +106,7 @@ func TestEventLoopDeletionNotPaused(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(true, &pmapi.Message{
|
||||
m.newStoreNoEvents(t, true, &pmapi.Message{
|
||||
ID: "msg1",
|
||||
Subject: "subject",
|
||||
LabelIDs: []string{"label"},
|
||||
@ -133,7 +133,7 @@ func TestEventLoopDeletionPaused(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(true, &pmapi.Message{
|
||||
m.newStoreNoEvents(t, true, &pmapi.Message{
|
||||
ID: "msg1",
|
||||
Subject: "subject",
|
||||
LabelIDs: []string{"label"},
|
||||
|
||||
116
internal/store/events.go
Normal file
116
internal/store/events.go
Normal file
@ -0,0 +1,116 @@
|
||||
// Copyright (c) 2021 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Events caches the last event IDs for all accounts (there should be only one instance).
|
||||
type Events struct {
|
||||
// eventMap is map from userID => key (such as last event) => value (such as event ID).
|
||||
eventMap map[string]map[string]string
|
||||
path string
|
||||
lock *sync.RWMutex
|
||||
}
|
||||
|
||||
// NewEvents constructs a new event cache at the given path.
|
||||
func NewEvents(path string) *Events {
|
||||
return &Events{
|
||||
path: path,
|
||||
lock: &sync.RWMutex{},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Events) getEventID(userID string) string {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if err := c.loadEvents(); err != nil {
|
||||
log.WithError(err).Warn("Problem to load store events")
|
||||
}
|
||||
|
||||
if c.eventMap == nil {
|
||||
c.eventMap = map[string]map[string]string{}
|
||||
}
|
||||
if c.eventMap[userID] == nil {
|
||||
c.eventMap[userID] = map[string]string{}
|
||||
}
|
||||
|
||||
return c.eventMap[userID]["events"]
|
||||
}
|
||||
|
||||
func (c *Events) setEventID(userID, eventID string) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.eventMap[userID] == nil {
|
||||
c.eventMap[userID] = map[string]string{}
|
||||
}
|
||||
c.eventMap[userID]["events"] = eventID
|
||||
|
||||
return c.saveEvents()
|
||||
}
|
||||
|
||||
func (c *Events) loadEvents() error {
|
||||
if c.eventMap != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
f, err := os.Open(c.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close() //nolint[errcheck]
|
||||
|
||||
return json.NewDecoder(f).Decode(&c.eventMap)
|
||||
}
|
||||
|
||||
func (c *Events) saveEvents() error {
|
||||
if c.eventMap == nil {
|
||||
return errors.New("events: cannot save events: events map is nil")
|
||||
}
|
||||
|
||||
f, err := os.Create(c.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close() //nolint[errcheck]
|
||||
|
||||
return json.NewEncoder(f).Encode(c.eventMap)
|
||||
}
|
||||
|
||||
func (c *Events) clearUserEvents(userID string) error {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
|
||||
if c.eventMap == nil {
|
||||
log.WithField("user", userID).Warning("Cannot clear user events: event map is nil")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.WithField("user", userID).Trace("Removing user events from event loop")
|
||||
|
||||
delete(c.eventMap, userID)
|
||||
|
||||
return c.saveEvents()
|
||||
}
|
||||
@ -107,7 +107,7 @@ func checkCounts(t testing.TB, wantCounts []*pmapi.MessagesCount, haveStore *Sto
|
||||
func TestMailboxCountRemove(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
|
||||
testCounts := []*pmapi.MessagesCount{
|
||||
{LabelID: "label1", Total: 100, Unread: 0},
|
||||
|
||||
@ -35,7 +35,7 @@ func TestGetSequenceNumberAndGetUID(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
|
||||
insertMessage(t, m, "msg1", "Test message 1", addrID1, false, []string{pmapi.AllMailLabel, pmapi.InboxLabel})
|
||||
insertMessage(t, m, "msg2", "Test message 2", addrID1, false, []string{pmapi.AllMailLabel, pmapi.ArchiveLabel})
|
||||
@ -80,7 +80,7 @@ func TestGetUIDByHeader(t *testing.T) { //nolint[funlen]
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
|
||||
tstMsg := getTestMessage("msg1", "Without external ID", addrID1, false, []string{pmapi.AllMailLabel, pmapi.SentLabel})
|
||||
require.Nil(t, m.store.createOrUpdateMessageEvent(tstMsg))
|
||||
|
||||
@ -67,40 +67,19 @@ func (message *Message) Message() *pmapi.Message {
|
||||
return message.msg
|
||||
}
|
||||
|
||||
// IsMarkedDeleted returns true if message is marked as deleted for specific
|
||||
// mailbox.
|
||||
// IsMarkedDeleted returns true if message is marked as deleted for specific mailbox.
|
||||
func (message *Message) IsMarkedDeleted() bool {
|
||||
isMarkedAsDeleted := false
|
||||
err := message.storeMailbox.db().View(func(tx *bolt.Tx) error {
|
||||
var isMarkedAsDeleted bool
|
||||
|
||||
if err := message.storeMailbox.db().View(func(tx *bolt.Tx) error {
|
||||
isMarkedAsDeleted = message.storeMailbox.txGetDeletedIDsBucket(tx).Get([]byte(message.msg.ID)) != nil
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
}); err != nil {
|
||||
message.storeMailbox.log.WithError(err).Error("Not able to retrieve deleted mark, assuming false.")
|
||||
return false
|
||||
}
|
||||
return isMarkedAsDeleted
|
||||
}
|
||||
|
||||
// SetSize updates the information about size of decrypted message which can be
|
||||
// used for IMAP. This should not trigger any IMAP update.
|
||||
// NOTE: The size from the server corresponds to pure body bytes. Hence it
|
||||
// should not be used. The correct size has to be calculated from decrypted and
|
||||
// built message.
|
||||
func (message *Message) SetSize(size int64) error {
|
||||
message.msg.Size = size
|
||||
txUpdate := func(tx *bolt.Tx) error {
|
||||
stored, err := message.store.txGetMessage(tx, message.msg.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stored.Size = size
|
||||
return message.store.txPutMessage(
|
||||
tx.Bucket(metadataBucket),
|
||||
stored,
|
||||
)
|
||||
}
|
||||
return message.store.db.Update(txUpdate)
|
||||
return isMarkedAsDeleted
|
||||
}
|
||||
|
||||
// SetContentTypeAndHeader updates the information about content type and
|
||||
@ -112,7 +91,7 @@ func (message *Message) SetSize(size int64) error {
|
||||
func (message *Message) SetContentTypeAndHeader(mimeType string, header mail.Header) error {
|
||||
message.msg.MIMEType = mimeType
|
||||
message.msg.Header = header
|
||||
txUpdate := func(tx *bolt.Tx) error {
|
||||
return message.store.db.Update(func(tx *bolt.Tx) error {
|
||||
stored, err := message.store.txGetMessage(tx, message.msg.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -123,34 +102,26 @@ func (message *Message) SetContentTypeAndHeader(mimeType string, header mail.Hea
|
||||
tx.Bucket(metadataBucket),
|
||||
stored,
|
||||
)
|
||||
}
|
||||
return message.store.db.Update(txUpdate)
|
||||
}
|
||||
|
||||
// SetHeader checks header can be parsed and if yes it stores header bytes in
|
||||
// database.
|
||||
func (message *Message) SetHeader(header []byte) error {
|
||||
_, err := textproto.NewReader(bufio.NewReader(bytes.NewReader(header))).ReadMIMEHeader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return message.store.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(headersBucket).Put([]byte(message.ID()), header)
|
||||
})
|
||||
}
|
||||
|
||||
// IsFullHeaderCached will check that valid full header is stored in DB.
|
||||
func (message *Message) IsFullHeaderCached() bool {
|
||||
header, err := message.getRawHeader()
|
||||
return err == nil && header != nil
|
||||
}
|
||||
|
||||
func (message *Message) getRawHeader() (raw []byte, err error) {
|
||||
err = message.store.db.View(func(tx *bolt.Tx) error {
|
||||
raw = tx.Bucket(headersBucket).Get([]byte(message.ID()))
|
||||
var raw []byte
|
||||
err := message.store.db.View(func(tx *bolt.Tx) error {
|
||||
raw = tx.Bucket(bodystructureBucket).Get([]byte(message.ID()))
|
||||
return nil
|
||||
})
|
||||
return
|
||||
return err == nil && raw != nil
|
||||
}
|
||||
|
||||
func (message *Message) getRawHeader() ([]byte, error) {
|
||||
bs, err := message.GetBodyStructure()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bs.GetMailHeaderBytes()
|
||||
}
|
||||
|
||||
// GetHeader will return cached header from DB.
|
||||
@ -178,44 +149,79 @@ func (message *Message) GetMIMEHeader() textproto.MIMEHeader {
|
||||
return header
|
||||
}
|
||||
|
||||
// SetBodyStructure stores serialized body structure in database.
|
||||
func (message *Message) SetBodyStructure(bs *pkgMsg.BodyStructure) error {
|
||||
txUpdate := func(tx *bolt.Tx) error {
|
||||
return message.store.txPutBodyStructure(
|
||||
tx.Bucket(bodystructureBucket),
|
||||
message.ID(), bs,
|
||||
)
|
||||
}
|
||||
return message.store.db.Update(txUpdate)
|
||||
}
|
||||
// GetBodyStructure returns the message's body structure.
|
||||
// It checks first if it's in the store. If it is, it returns it from store,
|
||||
// otherwise it computes it from the message cache (and saves the result to the store).
|
||||
func (message *Message) GetBodyStructure() (*pkgMsg.BodyStructure, error) {
|
||||
var raw []byte
|
||||
|
||||
// GetBodyStructure deserializes body structure from database. If body structure
|
||||
// is not in database it returns nil error and nil body structure. If error
|
||||
// occurs it returns nil body structure.
|
||||
func (message *Message) GetBodyStructure() (bs *pkgMsg.BodyStructure, err error) {
|
||||
txRead := func(tx *bolt.Tx) error {
|
||||
bs, err = message.store.txGetBodyStructure(
|
||||
tx.Bucket(bodystructureBucket),
|
||||
message.ID(),
|
||||
)
|
||||
return err
|
||||
}
|
||||
if err = message.store.db.View(txRead); err != nil {
|
||||
if err := message.store.db.View(func(tx *bolt.Tx) error {
|
||||
raw = tx.Bucket(bodystructureBucket).Get([]byte(message.ID()))
|
||||
return nil
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(raw) > 0 {
|
||||
// If not possible to deserialize just continue with build.
|
||||
if bs, err := pkgMsg.DeserializeBodyStructure(raw); err == nil {
|
||||
return bs, nil
|
||||
}
|
||||
}
|
||||
|
||||
literal, err := message.store.getCachedMessage(message.ID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bs, err := pkgMsg.NewBodyStructure(bytes.NewReader(literal))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if raw, err = bs.Serialize(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := message.store.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(bodystructureBucket).Put([]byte(message.ID()), raw)
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
func (message *Message) IncreaseBuildCount() (times uint32, err error) {
|
||||
txUpdate := func(tx *bolt.Tx) error {
|
||||
times, err = message.store.txIncreaseMsgBuildCount(
|
||||
tx.Bucket(msgBuildCountBucket),
|
||||
message.ID(),
|
||||
)
|
||||
return err
|
||||
}
|
||||
if err = message.store.db.Update(txUpdate); err != nil {
|
||||
// GetRFC822 returns the raw message literal.
|
||||
func (message *Message) GetRFC822() ([]byte, error) {
|
||||
return message.store.getCachedMessage(message.ID())
|
||||
}
|
||||
|
||||
// GetRFC822Size returns the size of the raw message literal.
|
||||
func (message *Message) GetRFC822Size() (uint32, error) {
|
||||
var raw []byte
|
||||
|
||||
if err := message.store.db.View(func(tx *bolt.Tx) error {
|
||||
raw = tx.Bucket(sizeBucket).Get([]byte(message.ID()))
|
||||
return nil
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return times, nil
|
||||
|
||||
if len(raw) > 0 {
|
||||
return btoi(raw), nil
|
||||
}
|
||||
|
||||
literal, err := message.store.getCachedMessage(message.ID())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := message.store.db.Update(func(tx *bolt.Tx) error {
|
||||
return tx.Bucket(sizeBucket).Put([]byte(message.ID()), itob(uint32(len(literal))))
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return uint32(len(literal)), nil
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: github.com/ProtonMail/proton-bridge/internal/store (interfaces: PanicHandler,BridgeUser,ChangeNotifier)
|
||||
// Source: github.com/ProtonMail/proton-bridge/internal/store (interfaces: PanicHandler,BridgeUser,ChangeNotifier,Storer)
|
||||
|
||||
// Package mocks is a generated GoMock package.
|
||||
package mocks
|
||||
@ -318,3 +318,54 @@ func (mr *MockChangeNotifierMockRecorder) UpdateMessage(arg0, arg1, arg2, arg3,
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMessage", reflect.TypeOf((*MockChangeNotifier)(nil).UpdateMessage), arg0, arg1, arg2, arg3, arg4, arg5)
|
||||
}
|
||||
|
||||
// MockStorer is a mock of Storer interface.
|
||||
type MockStorer struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockStorerMockRecorder
|
||||
}
|
||||
|
||||
// MockStorerMockRecorder is the mock recorder for MockStorer.
|
||||
type MockStorerMockRecorder struct {
|
||||
mock *MockStorer
|
||||
}
|
||||
|
||||
// NewMockStorer creates a new mock instance.
|
||||
func NewMockStorer(ctrl *gomock.Controller) *MockStorer {
|
||||
mock := &MockStorer{ctrl: ctrl}
|
||||
mock.recorder = &MockStorerMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockStorer) EXPECT() *MockStorerMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// BuildAndCacheMessage mocks base method.
|
||||
func (m *MockStorer) BuildAndCacheMessage(arg0 string) error {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "BuildAndCacheMessage", arg0)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// BuildAndCacheMessage indicates an expected call of BuildAndCacheMessage.
|
||||
func (mr *MockStorerMockRecorder) BuildAndCacheMessage(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildAndCacheMessage", reflect.TypeOf((*MockStorer)(nil).BuildAndCacheMessage), arg0)
|
||||
}
|
||||
|
||||
// IsCached mocks base method.
|
||||
func (m *MockStorer) IsCached(arg0 string) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsCached", arg0)
|
||||
ret0, _ := ret[0].(bool)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// IsCached indicates an expected call of IsCached.
|
||||
func (mr *MockStorerMockRecorder) IsCached(arg0 interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsCached", reflect.TypeOf((*MockStorer)(nil).IsCached), arg0)
|
||||
}
|
||||
|
||||
@ -26,8 +26,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/internal/sentry"
|
||||
"github.com/ProtonMail/proton-bridge/internal/store/cache"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/listener"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/message"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/pmapi"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/pool"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -52,19 +55,21 @@ var (
|
||||
|
||||
// Database structure:
|
||||
// * metadata
|
||||
// * {messageID} -> message data (subject, from, to, time, body size, ...)
|
||||
// * {messageID} -> message data (subject, from, to, time, ...)
|
||||
// * headers
|
||||
// * {messageID} -> header bytes
|
||||
// * bodystructure
|
||||
// * {messageID} -> message body structure
|
||||
// * msgbuildcount
|
||||
// * {messageID} -> uint32 number of message builds to track re-sync issues
|
||||
// * size
|
||||
// * {messageID} -> uint32 value
|
||||
// * counts
|
||||
// * {mailboxID} -> mailboxCounts: totalOnAPI, unreadOnAPI, labelName, labelColor, labelIsExclusive
|
||||
// * address_info
|
||||
// * {index} -> {address, addressID}
|
||||
// * address_mode
|
||||
// * mode -> string split or combined
|
||||
// * cache_passphrase
|
||||
// * passphrase -> cache passphrase (pgp encrypted message)
|
||||
// * mailboxes_version
|
||||
// * version -> uint32 value
|
||||
// * sync_state
|
||||
@ -79,19 +84,20 @@ var (
|
||||
// * {messageID} -> uint32 imapUID
|
||||
// * deleted_ids (can be missing or have no keys)
|
||||
// * {messageID} -> true
|
||||
metadataBucket = []byte("metadata") //nolint[gochecknoglobals]
|
||||
headersBucket = []byte("headers") //nolint[gochecknoglobals]
|
||||
bodystructureBucket = []byte("bodystructure") //nolint[gochecknoglobals]
|
||||
msgBuildCountBucket = []byte("msgbuildcount") //nolint[gochecknoglobals]
|
||||
countsBucket = []byte("counts") //nolint[gochecknoglobals]
|
||||
addressInfoBucket = []byte("address_info") //nolint[gochecknoglobals]
|
||||
addressModeBucket = []byte("address_mode") //nolint[gochecknoglobals]
|
||||
syncStateBucket = []byte("sync_state") //nolint[gochecknoglobals]
|
||||
mailboxesBucket = []byte("mailboxes") //nolint[gochecknoglobals]
|
||||
imapIDsBucket = []byte("imap_ids") //nolint[gochecknoglobals]
|
||||
apiIDsBucket = []byte("api_ids") //nolint[gochecknoglobals]
|
||||
deletedIDsBucket = []byte("deleted_ids") //nolint[gochecknoglobals]
|
||||
mboxVersionBucket = []byte("mailboxes_version") //nolint[gochecknoglobals]
|
||||
metadataBucket = []byte("metadata") //nolint[gochecknoglobals]
|
||||
headersBucket = []byte("headers") //nolint[gochecknoglobals]
|
||||
bodystructureBucket = []byte("bodystructure") //nolint[gochecknoglobals]
|
||||
sizeBucket = []byte("size") //nolint[gochecknoglobals]
|
||||
countsBucket = []byte("counts") //nolint[gochecknoglobals]
|
||||
addressInfoBucket = []byte("address_info") //nolint[gochecknoglobals]
|
||||
addressModeBucket = []byte("address_mode") //nolint[gochecknoglobals]
|
||||
cachePassphraseBucket = []byte("cache_passphrase") //nolint[gochecknoglobals]
|
||||
syncStateBucket = []byte("sync_state") //nolint[gochecknoglobals]
|
||||
mailboxesBucket = []byte("mailboxes") //nolint[gochecknoglobals]
|
||||
imapIDsBucket = []byte("imap_ids") //nolint[gochecknoglobals]
|
||||
apiIDsBucket = []byte("api_ids") //nolint[gochecknoglobals]
|
||||
deletedIDsBucket = []byte("deleted_ids") //nolint[gochecknoglobals]
|
||||
mboxVersionBucket = []byte("mailboxes_version") //nolint[gochecknoglobals]
|
||||
|
||||
// ErrNoSuchAPIID when mailbox does not have API ID.
|
||||
ErrNoSuchAPIID = errors.New("no such api id") //nolint[gochecknoglobals]
|
||||
@ -117,18 +123,23 @@ func exposeContextForSMTP() context.Context {
|
||||
type Store struct {
|
||||
sentryReporter *sentry.Reporter
|
||||
panicHandler PanicHandler
|
||||
eventLoop *eventLoop
|
||||
user BridgeUser
|
||||
eventLoop *eventLoop
|
||||
currentEvents *Events
|
||||
|
||||
log *logrus.Entry
|
||||
|
||||
cache *Cache
|
||||
filePath string
|
||||
db *bolt.DB
|
||||
lock *sync.RWMutex
|
||||
addresses map[string]*Address
|
||||
notifier ChangeNotifier
|
||||
|
||||
builder *message.Builder
|
||||
cache cache.Cache
|
||||
cacher *Cacher
|
||||
done chan struct{}
|
||||
|
||||
isSyncRunning bool
|
||||
syncCooldown cooldown
|
||||
addressMode addressMode
|
||||
@ -139,12 +150,14 @@ func New( // nolint[funlen]
|
||||
sentryReporter *sentry.Reporter,
|
||||
panicHandler PanicHandler,
|
||||
user BridgeUser,
|
||||
events listener.Listener,
|
||||
listener listener.Listener,
|
||||
cache cache.Cache,
|
||||
builder *message.Builder,
|
||||
path string,
|
||||
cache *Cache,
|
||||
currentEvents *Events,
|
||||
) (store *Store, err error) {
|
||||
if user == nil || events == nil || cache == nil {
|
||||
return nil, fmt.Errorf("missing parameters - user: %v, events: %v, cache: %v", user, events, cache)
|
||||
if user == nil || listener == nil || currentEvents == nil {
|
||||
return nil, fmt.Errorf("missing parameters - user: %v, listener: %v, currentEvents: %v", user, listener, currentEvents)
|
||||
}
|
||||
|
||||
l := log.WithField("user", user.ID())
|
||||
@ -160,21 +173,29 @@ func New( // nolint[funlen]
|
||||
|
||||
bdb, err := openBoltDatabase(path)
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, "failed to open store database")
|
||||
return
|
||||
return nil, errors.Wrap(err, "failed to open store database")
|
||||
}
|
||||
|
||||
store = &Store{
|
||||
sentryReporter: sentryReporter,
|
||||
panicHandler: panicHandler,
|
||||
user: user,
|
||||
cache: cache,
|
||||
filePath: path,
|
||||
db: bdb,
|
||||
lock: &sync.RWMutex{},
|
||||
log: l,
|
||||
currentEvents: currentEvents,
|
||||
|
||||
log: l,
|
||||
|
||||
filePath: path,
|
||||
db: bdb,
|
||||
lock: &sync.RWMutex{},
|
||||
|
||||
builder: builder,
|
||||
cache: cache,
|
||||
}
|
||||
|
||||
// Create a new cacher. It's not started yet.
|
||||
// NOTE(GODT-1158): I hate this circular dependency store->cacher->store :(
|
||||
store.cacher = newCacher(store)
|
||||
|
||||
// Minimal increase is event pollInterval, doubles every failed retry up to 5 minutes.
|
||||
store.syncCooldown.setExponentialWait(pollInterval, 2, 5*time.Minute)
|
||||
|
||||
@ -188,7 +209,7 @@ func New( // nolint[funlen]
|
||||
}
|
||||
|
||||
if user.IsConnected() {
|
||||
store.eventLoop = newEventLoop(cache, store, user, events)
|
||||
store.eventLoop = newEventLoop(currentEvents, store, user, listener)
|
||||
go func() {
|
||||
defer store.panicHandler.HandlePanic()
|
||||
store.eventLoop.start()
|
||||
@ -216,10 +237,11 @@ func openBoltDatabase(filePath string) (db *bolt.DB, err error) {
|
||||
metadataBucket,
|
||||
headersBucket,
|
||||
bodystructureBucket,
|
||||
msgBuildCountBucket,
|
||||
sizeBucket,
|
||||
countsBucket,
|
||||
addressInfoBucket,
|
||||
addressModeBucket,
|
||||
cachePassphraseBucket,
|
||||
syncStateBucket,
|
||||
mailboxesBucket,
|
||||
mboxVersionBucket,
|
||||
@ -365,6 +387,24 @@ func (store *Store) addAddress(address, addressID string, labels []*pmapi.Label)
|
||||
return
|
||||
}
|
||||
|
||||
// newBuildJob returns a new build job for the given message using the store's message builder.
|
||||
func (store *Store) newBuildJob(messageID string, priority int) (*message.Job, pool.DoneFunc) {
|
||||
return store.builder.NewJobWithOptions(
|
||||
context.Background(),
|
||||
store.client(),
|
||||
messageID,
|
||||
message.JobOptions{
|
||||
IgnoreDecryptionErrors: true, // Whether to ignore decryption errors and create a "custom message" instead.
|
||||
SanitizeDate: true, // Whether to replace all dates before 1970 with RFC822's birthdate.
|
||||
AddInternalID: true, // Whether to include MessageID as X-Pm-Internal-Id.
|
||||
AddExternalID: true, // Whether to include ExternalID as X-Pm-External-Id.
|
||||
AddMessageDate: true, // Whether to include message time as X-Pm-Date.
|
||||
AddMessageIDReference: true, // Whether to include the MessageID in References.
|
||||
},
|
||||
priority,
|
||||
)
|
||||
}
|
||||
|
||||
// Close stops the event loop and closes the database to free the file.
|
||||
func (store *Store) Close() error {
|
||||
store.lock.Lock()
|
||||
@ -381,12 +421,21 @@ func (store *Store) CloseEventLoop() {
|
||||
}
|
||||
|
||||
func (store *Store) close() error {
|
||||
// Stop the watcher first before closing the database.
|
||||
store.stopWatcher()
|
||||
|
||||
// Stop the cacher.
|
||||
store.cacher.stop()
|
||||
|
||||
// Stop the event loop.
|
||||
store.CloseEventLoop()
|
||||
|
||||
// Close the database.
|
||||
return store.db.Close()
|
||||
}
|
||||
|
||||
// Remove closes and removes the database file and clears the cache file.
|
||||
func (store *Store) Remove() (err error) {
|
||||
func (store *Store) Remove() error {
|
||||
store.lock.Lock()
|
||||
defer store.lock.Unlock()
|
||||
|
||||
@ -394,22 +443,34 @@ func (store *Store) Remove() (err error) {
|
||||
|
||||
var result *multierror.Error
|
||||
|
||||
if err = store.close(); err != nil {
|
||||
if err := store.close(); err != nil {
|
||||
result = multierror.Append(result, errors.Wrap(err, "failed to close store"))
|
||||
}
|
||||
|
||||
if err = RemoveStore(store.cache, store.filePath, store.user.ID()); err != nil {
|
||||
if err := RemoveStore(store.currentEvents, store.filePath, store.user.ID()); err != nil {
|
||||
result = multierror.Append(result, errors.Wrap(err, "failed to remove store"))
|
||||
}
|
||||
|
||||
if err := store.RemoveCache(); err != nil {
|
||||
result = multierror.Append(result, errors.Wrap(err, "failed to remove cache"))
|
||||
}
|
||||
|
||||
return result.ErrorOrNil()
|
||||
}
|
||||
|
||||
func (store *Store) RemoveCache() error {
|
||||
if err := store.clearCachePassphrase(); err != nil {
|
||||
logrus.WithError(err).Error("Failed to clear cache passphrase")
|
||||
}
|
||||
|
||||
return store.cache.Delete(store.user.ID())
|
||||
}
|
||||
|
||||
// RemoveStore removes the database file and clears the cache file.
|
||||
func RemoveStore(cache *Cache, path, userID string) error {
|
||||
func RemoveStore(currentEvents *Events, path, userID string) error {
|
||||
var result *multierror.Error
|
||||
|
||||
if err := cache.clearCacheUser(userID); err != nil {
|
||||
if err := currentEvents.clearUserEvents(userID); err != nil {
|
||||
result = multierror.Append(result, errors.Wrap(err, "failed to clear event loop user cache"))
|
||||
}
|
||||
|
||||
|
||||
@ -23,13 +23,17 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/ProtonMail/proton-bridge/internal/store/cache"
|
||||
storemocks "github.com/ProtonMail/proton-bridge/internal/store/mocks"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/message"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/pmapi"
|
||||
pmapimocks "github.com/ProtonMail/proton-bridge/pkg/pmapi/mocks"
|
||||
tests "github.com/ProtonMail/proton-bridge/test"
|
||||
"github.com/golang/mock/gomock"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -139,7 +143,7 @@ type mocksForStore struct {
|
||||
store *Store
|
||||
|
||||
tmpDir string
|
||||
cache *Cache
|
||||
cache *Events
|
||||
}
|
||||
|
||||
func initMocks(tb testing.TB) (*mocksForStore, func()) {
|
||||
@ -162,7 +166,7 @@ func initMocks(tb testing.TB) (*mocksForStore, func()) {
|
||||
require.NoError(tb, err)
|
||||
|
||||
cacheFile := filepath.Join(mocks.tmpDir, "cache.json")
|
||||
mocks.cache = NewCache(cacheFile)
|
||||
mocks.cache = NewEvents(cacheFile)
|
||||
|
||||
return mocks, func() {
|
||||
if err := recover(); err != nil {
|
||||
@ -176,13 +180,14 @@ func initMocks(tb testing.TB) (*mocksForStore, func()) {
|
||||
}
|
||||
}
|
||||
|
||||
func (mocks *mocksForStore) newStoreNoEvents(combinedMode bool, msgs ...*pmapi.Message) { //nolint[unparam]
|
||||
func (mocks *mocksForStore) newStoreNoEvents(t *testing.T, combinedMode bool, msgs ...*pmapi.Message) { //nolint[unparam]
|
||||
mocks.user.EXPECT().ID().Return("userID").AnyTimes()
|
||||
mocks.user.EXPECT().IsConnected().Return(true)
|
||||
mocks.user.EXPECT().IsCombinedAddressMode().Return(combinedMode)
|
||||
|
||||
mocks.user.EXPECT().GetClient().AnyTimes().Return(mocks.client)
|
||||
|
||||
mocks.client.EXPECT().GetUserKeyRing().Return(tests.MakeKeyRing(t), nil).AnyTimes()
|
||||
mocks.client.EXPECT().Addresses().Return(pmapi.AddressList{
|
||||
{ID: addrID1, Email: addr1, Type: pmapi.OriginalAddress, Receive: true},
|
||||
{ID: addrID2, Email: addr2, Type: pmapi.AliasAddress, Receive: true},
|
||||
@ -213,6 +218,8 @@ func (mocks *mocksForStore) newStoreNoEvents(combinedMode bool, msgs ...*pmapi.M
|
||||
mocks.panicHandler,
|
||||
mocks.user,
|
||||
mocks.events,
|
||||
cache.NewInMemoryCache(1<<20),
|
||||
message.NewBuilder(runtime.NumCPU(), runtime.NumCPU()),
|
||||
filepath.Join(mocks.tmpDir, "mailbox-test.db"),
|
||||
mocks.cache,
|
||||
)
|
||||
|
||||
@ -27,7 +27,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
pkgMsg "github.com/ProtonMail/proton-bridge/pkg/message"
|
||||
"github.com/ProtonMail/proton-bridge/pkg/pmapi"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -154,11 +153,6 @@ func (store *Store) checkDraftTotalSize(message *pmapi.Message, attachments []*d
|
||||
return false, err
|
||||
}
|
||||
|
||||
msgSize := message.Size
|
||||
if msgSize == 0 {
|
||||
msgSize = int64(len(message.Body))
|
||||
}
|
||||
|
||||
var attSize int64
|
||||
for _, att := range attachments {
|
||||
b, err := ioutil.ReadAll(att.encReader)
|
||||
@ -169,7 +163,7 @@ func (store *Store) checkDraftTotalSize(message *pmapi.Message, attachments []*d
|
||||
att.encReader = bytes.NewBuffer(b)
|
||||
}
|
||||
|
||||
return msgSize+attSize <= maxUpload, nil
|
||||
return int64(len(message.Body))+attSize <= maxUpload, nil
|
||||
}
|
||||
|
||||
func (store *Store) getDraftAction(message *pmapi.Message) int {
|
||||
@ -237,39 +231,6 @@ func (store *Store) txPutMessage(metaBucket *bolt.Bucket, onlyMeta *pmapi.Messag
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *Store) txPutBodyStructure(bsBucket *bolt.Bucket, msgID string, bs *pkgMsg.BodyStructure) error {
|
||||
raw, err := bs.Serialize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = bsBucket.Put([]byte(msgID), raw)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "cannot put bodystructure bucket")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (store *Store) txGetBodyStructure(bsBucket *bolt.Bucket, msgID string) (*pkgMsg.BodyStructure, error) {
|
||||
raw := bsBucket.Get([]byte(msgID))
|
||||
if len(raw) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return pkgMsg.DeserializeBodyStructure(raw)
|
||||
}
|
||||
|
||||
func (store *Store) txIncreaseMsgBuildCount(b *bolt.Bucket, msgID string) (uint32, error) {
|
||||
key := []byte(msgID)
|
||||
count := uint32(0)
|
||||
|
||||
raw := b.Get(key)
|
||||
if raw != nil {
|
||||
count = btoi(raw)
|
||||
}
|
||||
|
||||
count++
|
||||
return count, b.Put(key, itob(count))
|
||||
}
|
||||
|
||||
// createOrUpdateMessageEvent is helper to create only one message with
|
||||
// createOrUpdateMessagesEvent.
|
||||
func (store *Store) createOrUpdateMessageEvent(msg *pmapi.Message) error {
|
||||
@ -287,7 +248,7 @@ func (store *Store) createOrUpdateMessagesEvent(msgs []*pmapi.Message) error { /
|
||||
b := tx.Bucket(metadataBucket)
|
||||
for _, msg := range msgs {
|
||||
clearNonMetadata(msg)
|
||||
txUpdateMetadaFromDB(b, msg, store.log)
|
||||
txUpdateMetadataFromDB(b, msg, store.log)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@ -341,6 +302,11 @@ func (store *Store) createOrUpdateMessagesEvent(msgs []*pmapi.Message) error { /
|
||||
return err
|
||||
}
|
||||
|
||||
// Notify the cacher that it should start caching messages.
|
||||
for _, msg := range msgs {
|
||||
store.cacher.newJob(msg.ID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -351,16 +317,12 @@ func clearNonMetadata(onlyMeta *pmapi.Message) {
|
||||
onlyMeta.Attachments = nil
|
||||
}
|
||||
|
||||
// txUpdateMetadaFromDB changes the the onlyMeta data.
|
||||
// txUpdateMetadataFromDB changes the the onlyMeta data.
|
||||
// If there is stored message in metaBucket the size, header and MIMEType are
|
||||
// not changed if already set. To change these:
|
||||
// * size must be updated by Message.SetSize
|
||||
// * contentType and header must be updated by Message.SetContentTypeAndHeader.
|
||||
func txUpdateMetadaFromDB(metaBucket *bolt.Bucket, onlyMeta *pmapi.Message, log *logrus.Entry) {
|
||||
// Size attribute on the server is counting encrypted data. We need to compute
|
||||
// "real" size of decrypted data. Negative values will be processed during fetch.
|
||||
onlyMeta.Size = -1
|
||||
|
||||
func txUpdateMetadataFromDB(metaBucket *bolt.Bucket, onlyMeta *pmapi.Message, log *logrus.Entry) {
|
||||
msgb := metaBucket.Get([]byte(onlyMeta.ID))
|
||||
if msgb == nil {
|
||||
return
|
||||
@ -378,8 +340,7 @@ func txUpdateMetadaFromDB(metaBucket *bolt.Bucket, onlyMeta *pmapi.Message, log
|
||||
return
|
||||
}
|
||||
|
||||
// Keep already calculated size and content type.
|
||||
onlyMeta.Size = stored.Size
|
||||
// Keep content type.
|
||||
onlyMeta.MIMEType = stored.MIMEType
|
||||
if stored.Header != "" && stored.Header != "(No Header)" {
|
||||
tmpMsg, err := mail.ReadMessage(
|
||||
@ -401,6 +362,12 @@ func (store *Store) deleteMessageEvent(apiID string) error {
|
||||
|
||||
// deleteMessagesEvent deletes the message from metadata and all mailbox buckets.
|
||||
func (store *Store) deleteMessagesEvent(apiIDs []string) error {
|
||||
for _, messageID := range apiIDs {
|
||||
if err := store.cache.Rem(store.UserID(), messageID); err != nil {
|
||||
logrus.WithError(err).Error("Failed to remove message from cache")
|
||||
}
|
||||
}
|
||||
|
||||
return store.db.Update(func(tx *bolt.Tx) error {
|
||||
for _, apiID := range apiIDs {
|
||||
if err := tx.Bucket(metadataBucket).Delete([]byte(apiID)); err != nil {
|
||||
|
||||
@ -33,7 +33,7 @@ func TestGetAllMessageIDs(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
|
||||
insertMessage(t, m, "msg1", "Test message 1", addrID1, false, []string{pmapi.AllMailLabel, pmapi.InboxLabel})
|
||||
insertMessage(t, m, "msg2", "Test message 2", addrID1, false, []string{pmapi.AllMailLabel, pmapi.ArchiveLabel})
|
||||
@ -47,7 +47,7 @@ func TestGetMessageFromDB(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
insertMessage(t, m, "msg1", "Test message 1", addrID1, false, []string{pmapi.AllMailLabel})
|
||||
|
||||
tests := []struct{ msgID, wantErr string }{
|
||||
@ -72,7 +72,7 @@ func TestCreateOrUpdateMessageMetadata(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
insertMessage(t, m, "msg1", "Test message 1", addrID1, false, []string{pmapi.AllMailLabel})
|
||||
|
||||
msg, err := m.store.getMessageFromDB("msg1")
|
||||
@ -81,12 +81,10 @@ func TestCreateOrUpdateMessageMetadata(t *testing.T) {
|
||||
// Check non-meta and calculated data are cleared/empty.
|
||||
a.Equal(t, "", msg.Body)
|
||||
a.Equal(t, []*pmapi.Attachment(nil), msg.Attachments)
|
||||
a.Equal(t, int64(-1), msg.Size)
|
||||
a.Equal(t, "", msg.MIMEType)
|
||||
a.Equal(t, make(mail.Header), msg.Header)
|
||||
|
||||
// Change the calculated data.
|
||||
wantSize := int64(42)
|
||||
wantMIMEType := "plain-text"
|
||||
wantHeader := mail.Header{
|
||||
"Key": []string{"value"},
|
||||
@ -94,13 +92,11 @@ func TestCreateOrUpdateMessageMetadata(t *testing.T) {
|
||||
|
||||
storeMsg, err := m.store.addresses[addrID1].mailboxes[pmapi.AllMailLabel].GetMessage("msg1")
|
||||
require.Nil(t, err)
|
||||
require.Nil(t, storeMsg.SetSize(wantSize))
|
||||
require.Nil(t, storeMsg.SetContentTypeAndHeader(wantMIMEType, wantHeader))
|
||||
|
||||
// Check calculated data.
|
||||
msg, err = m.store.getMessageFromDB("msg1")
|
||||
require.Nil(t, err)
|
||||
a.Equal(t, wantSize, msg.Size)
|
||||
a.Equal(t, wantMIMEType, msg.MIMEType)
|
||||
a.Equal(t, wantHeader, msg.Header)
|
||||
|
||||
@ -109,7 +105,6 @@ func TestCreateOrUpdateMessageMetadata(t *testing.T) {
|
||||
|
||||
msg, err = m.store.getMessageFromDB("msg1")
|
||||
require.Nil(t, err)
|
||||
a.Equal(t, wantSize, msg.Size)
|
||||
a.Equal(t, wantMIMEType, msg.MIMEType)
|
||||
a.Equal(t, wantHeader, msg.Header)
|
||||
}
|
||||
@ -118,7 +113,7 @@ func TestDeleteMessage(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
insertMessage(t, m, "msg1", "Test message 1", addrID1, false, []string{pmapi.AllMailLabel})
|
||||
insertMessage(t, m, "msg2", "Test message 2", addrID1, false, []string{pmapi.AllMailLabel})
|
||||
|
||||
@ -129,8 +124,7 @@ func TestDeleteMessage(t *testing.T) {
|
||||
}
|
||||
|
||||
func insertMessage(t *testing.T, m *mocksForStore, id, subject, sender string, unread bool, labelIDs []string) { //nolint[unparam]
|
||||
msg := getTestMessage(id, subject, sender, unread, labelIDs)
|
||||
require.Nil(t, m.store.createOrUpdateMessageEvent(msg))
|
||||
require.Nil(t, m.store.createOrUpdateMessageEvent(getTestMessage(id, subject, sender, unread, labelIDs)))
|
||||
}
|
||||
|
||||
func getTestMessage(id, subject, sender string, unread bool, labelIDs []string) *pmapi.Message {
|
||||
@ -142,7 +136,6 @@ func getTestMessage(id, subject, sender string, unread bool, labelIDs []string)
|
||||
Sender: address,
|
||||
ToList: []*mail.Address{address},
|
||||
LabelIDs: labelIDs,
|
||||
Size: 12345,
|
||||
Body: "body of message",
|
||||
Attachments: []*pmapi.Attachment{{
|
||||
ID: "attachment1",
|
||||
@ -162,7 +155,7 @@ func TestCreateDraftCheckMessageSize(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(false)
|
||||
m.newStoreNoEvents(t, false)
|
||||
m.client.EXPECT().CurrentUser(gomock.Any()).Return(&pmapi.User{
|
||||
MaxUpload: 100, // Decrypted message 5 chars, encrypted 500+.
|
||||
}, nil)
|
||||
@ -181,7 +174,7 @@ func TestCreateDraftCheckMessageWithAttachmentSize(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(false)
|
||||
m.newStoreNoEvents(t, false)
|
||||
m.client.EXPECT().CurrentUser(gomock.Any()).Return(&pmapi.User{
|
||||
MaxUpload: 800, // Decrypted message 5 chars + 5 chars of attachment, encrypted 500+ + 300+.
|
||||
}, nil)
|
||||
|
||||
@ -30,7 +30,7 @@ func TestLoadSaveSyncState(t *testing.T) {
|
||||
m, clear := initMocks(t)
|
||||
defer clear()
|
||||
|
||||
m.newStoreNoEvents(true)
|
||||
m.newStoreNoEvents(t, true)
|
||||
insertMessage(t, m, "msg1", "Test message 1", addrID1, false, []string{pmapi.AllMailLabel, pmapi.InboxLabel})
|
||||
insertMessage(t, m, "msg2", "Test message 2", addrID1, false, []string{pmapi.AllMailLabel, pmapi.InboxLabel})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user