GODT-1356 GODT-1302: Cache on disk concurency and API retries

- GODT-1302: Change maximum resty retries from 0 to 30
- GODT-1302: Make sure we are closing GetAttachmen io.ReadCloser on error
- GODT-1356: Do not use attachmentPool - it was useless anyway
- GODT-1356: Increase cache watcher limit to 10min
- GODT-1356: Start cache watcher right after start (do not wait first 10 min)
- GODT-1356: Limit number of buildJobs (memory allocation) in BuildAndCacheMessage
- Other: Pass context from job options (message builder) to fetcher (both message and attachments)
- Other: BuildJob contains same function as returned buildDone (proper map locking)
This commit is contained in:
Jakub
2021-10-19 17:08:12 +02:00
parent db7ead3901
commit a93a8e7be9
8 changed files with 118 additions and 48 deletions

View File

@ -32,6 +32,7 @@ import (
"github.com/ProtonMail/proton-bridge/internal/frontend/types"
"github.com/ProtonMail/proton-bridge/internal/imap"
"github.com/ProtonMail/proton-bridge/internal/smtp"
"github.com/ProtonMail/proton-bridge/internal/store"
"github.com/ProtonMail/proton-bridge/internal/store/cache"
"github.com/ProtonMail/proton-bridge/internal/updater"
"github.com/ProtonMail/proton-bridge/pkg/message"
@ -74,7 +75,7 @@ func run(b *base.Base, c *cli.Context) error { // nolint[funlen]
return err
}
cache, err := loadCache(b)
cache, err := loadMessageCache(b)
if err != nil {
return err
}
@ -247,16 +248,20 @@ func checkAndHandleUpdate(u types.Updater, f frontend.Frontend, autoUpdate bool)
f.NotifySilentUpdateInstalled()
}
// NOTE(GODT-1158): How big should in-memory cache be?
// NOTE(GODT-1158): How to handle cache location migration if user changes custom path?
func loadCache(b *base.Base) (cache.Cache, error) {
func loadMessageCache(b *base.Base) (cache.Cache, error) {
if !b.Settings.GetBool(settings.CacheEnabledKey) {
return cache.NewInMemoryCache(100 * (1 << 20)), nil
// Memory cache was estimated by empirical usage in past and it
// was set to 100MB.
// NOTE: This value must not be less than maximal size of one
// email (~30MB).
return cache.NewInMemoryCache(100 << 20), nil
}
var compressor cache.Compressor
// NOTE(GODT-1158): If user changes compression setting we have to nuke the cache.
// NOTE(GODT-1158): Changing compression is not an option currently
// available for user but, if user changes compression setting we have
// to nuke the cache.
if b.Settings.GetBool(settings.CacheCompressionKey) {
compressor = &cache.GZipCompressor{}
} else {
@ -269,10 +274,15 @@ func loadCache(b *base.Base) (cache.Cache, error) {
path = customPath
} else {
path = b.Cache.GetDefaultMessageCacheDir()
// Store path so it will allways persist if default location will be changed in new version.
// Store path so it will allways persist if default location
// will be changed in new version.
b.Settings.Set(settings.CacheLocationKey, path)
}
// To prevent memory peaks we set maximal write concurency for store
// build jobs.
store.SetBuildAndCacheJobLimit(b.Settings.GetInt(settings.CacheConcurrencyWrite))
return cache.NewOnDiskCache(path, compressor, cache.Options{
MinFreeAbs: uint64(b.Settings.GetInt(settings.CacheMinFreeAbsKey)),
MinFreeRat: b.Settings.GetFloat64(settings.CacheMinFreeRatKey),

View File

@ -89,6 +89,24 @@ func (store *Store) clearCachePassphrase() error {
})
}
// buildAndCacheJobs is used to limit the number of parallel background build
// jobs by using a buffered channel. When channel is blocking the go routines
// is running but the download didn't started yet and hence no space needs to
// be allocated. Once other instances are finished the job can continue. The
// bottleneck is `store.cache.Set` which can be take some time to write all
// downloaded bytes. Therefore, it is not effective to start fetching and
// building the message for more than maximum of possible parallel cache
// writers.
//
// Default buildAndCacheJobs vaule is 16, it can be changed by SetBuildAndCacheJobLimit.
var (
buildAndCacheJobs = make(chan struct{}, 16) //nolint[gochecknoglobals]
)
func SetBuildAndCacheJobLimit(maxJobs int) {
buildAndCacheJobs = make(chan struct{}, maxJobs)
}
func (store *Store) getCachedMessage(messageID string) ([]byte, error) {
if store.cache.Has(store.user.ID(), messageID) {
return store.cache.Get(store.user.ID(), messageID)
@ -118,6 +136,9 @@ func (store *Store) IsCached(messageID string) bool {
// BuildAndCacheMessage builds the given message (with background priority) and puts it in the cache.
// It builds with background priority.
func (store *Store) BuildAndCacheMessage(messageID string) error {
buildAndCacheJobs <- struct{}{}
defer func() { <-buildAndCacheJobs }()
job, done := store.newBuildJob(messageID, message.BackgroundPriority)
defer done()

View File

@ -23,26 +23,27 @@ func (store *Store) StartWatcher() {
store.done = make(chan struct{})
go func() {
ticker := time.NewTicker(3 * time.Minute)
ticker := time.NewTicker(10 * time.Minute)
defer ticker.Stop()
for {
// NOTE(GODT-1158): Race condition here? What if DB was already closed?
messageIDs, err := store.getAllMessageIDs()
if err != nil {
return
}
for _, messageID := range messageIDs {
if !store.IsCached(messageID) {
store.cacher.newJob(messageID)
}
}
select {
case <-ticker.C:
// NOTE(GODT-1158): Race condition here? What if DB was already closed?
messageIDs, err := store.getAllMessageIDs()
if err != nil {
return
}
for _, messageID := range messageIDs {
if !store.IsCached(messageID) {
store.cacher.newJob(messageID)
}
}
case <-store.done:
return
case <-ticker.C:
continue
}
}
}()