forked from Silverfish/proton-bridge
GODT-1356 GODT-1302: Cache on disk concurency and API retries
- GODT-1302: Change maximum resty retries from 0 to 30 - GODT-1302: Make sure we are closing GetAttachmen io.ReadCloser on error - GODT-1356: Do not use attachmentPool - it was useless anyway - GODT-1356: Increase cache watcher limit to 10min - GODT-1356: Start cache watcher right after start (do not wait first 10 min) - GODT-1356: Limit number of buildJobs (memory allocation) in BuildAndCacheMessage - Other: Pass context from job options (message builder) to fetcher (both message and attachments) - Other: BuildJob contains same function as returned buildDone (proper map locking)
This commit is contained in:
@ -89,6 +89,24 @@ func (store *Store) clearCachePassphrase() error {
|
||||
})
|
||||
}
|
||||
|
||||
// buildAndCacheJobs is used to limit the number of parallel background build
|
||||
// jobs by using a buffered channel. When channel is blocking the go routines
|
||||
// is running but the download didn't started yet and hence no space needs to
|
||||
// be allocated. Once other instances are finished the job can continue. The
|
||||
// bottleneck is `store.cache.Set` which can be take some time to write all
|
||||
// downloaded bytes. Therefore, it is not effective to start fetching and
|
||||
// building the message for more than maximum of possible parallel cache
|
||||
// writers.
|
||||
//
|
||||
// Default buildAndCacheJobs vaule is 16, it can be changed by SetBuildAndCacheJobLimit.
|
||||
var (
|
||||
buildAndCacheJobs = make(chan struct{}, 16) //nolint[gochecknoglobals]
|
||||
)
|
||||
|
||||
func SetBuildAndCacheJobLimit(maxJobs int) {
|
||||
buildAndCacheJobs = make(chan struct{}, maxJobs)
|
||||
}
|
||||
|
||||
func (store *Store) getCachedMessage(messageID string) ([]byte, error) {
|
||||
if store.cache.Has(store.user.ID(), messageID) {
|
||||
return store.cache.Get(store.user.ID(), messageID)
|
||||
@ -118,6 +136,9 @@ func (store *Store) IsCached(messageID string) bool {
|
||||
// BuildAndCacheMessage builds the given message (with background priority) and puts it in the cache.
|
||||
// It builds with background priority.
|
||||
func (store *Store) BuildAndCacheMessage(messageID string) error {
|
||||
buildAndCacheJobs <- struct{}{}
|
||||
defer func() { <-buildAndCacheJobs }()
|
||||
|
||||
job, done := store.newBuildJob(messageID, message.BackgroundPriority)
|
||||
defer done()
|
||||
|
||||
|
||||
@ -23,26 +23,27 @@ func (store *Store) StartWatcher() {
|
||||
store.done = make(chan struct{})
|
||||
|
||||
go func() {
|
||||
ticker := time.NewTicker(3 * time.Minute)
|
||||
ticker := time.NewTicker(10 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
// NOTE(GODT-1158): Race condition here? What if DB was already closed?
|
||||
messageIDs, err := store.getAllMessageIDs()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, messageID := range messageIDs {
|
||||
if !store.IsCached(messageID) {
|
||||
store.cacher.newJob(messageID)
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// NOTE(GODT-1158): Race condition here? What if DB was already closed?
|
||||
messageIDs, err := store.getAllMessageIDs()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, messageID := range messageIDs {
|
||||
if !store.IsCached(messageID) {
|
||||
store.cacher.newJob(messageID)
|
||||
}
|
||||
}
|
||||
|
||||
case <-store.done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
Reference in New Issue
Block a user