Files
proton-bridge/internal/imap/backend.go
James Houlahan 6bd0739013 GODT-1158: Store full messages bodies on disk
- GODT-1158: simple on-disk cache in store
- GODT-1158: better member naming in event loop
- GODT-1158: create on-disk cache during bridge setup
- GODT-1158: better job options
- GODT-1158: rename GetLiteral to GetRFC822
- GODT-1158: rename events -> currentEvents
- GODT-1158: unlock cache per-user
- GODT-1158: clean up cache after logout
- GODT-1158: randomized encrypted cache passphrase
- GODT-1158: Opt out of on-disk cache in settings
- GODT-1158: free space in cache
- GODT-1158: make tests compile
- GODT-1158: optional compression
- GODT-1158: cache custom location
- GODT-1158: basic capacity checker
- GODT-1158: cache free space config
- GODT-1158: only unlock cache if pmapi client is unlocked as well
- GODT-1158: simple background sync worker
- GODT-1158: set size/bodystructure when caching message
- GODT-1158: limit store db update blocking with semaphore
- GODT-1158: dumb 10-semaphore
- GODT-1158: properly handle delete; remove bad bodystructure handling
- GODT-1158: hacky fix for caching after logout... baaaaad
- GODT-1158: cache worker
- GODT-1158: compute body structure lazily
- GODT-1158: cache size in store
- GODT-1158: notify cacher when adding to store
- GODT-1158: 15 second store cache watcher
- GODT-1158: enable cacher
- GODT-1158: better cache worker starting/stopping
- GODT-1158: limit cacher to less concurrency than disk cache
- GODT-1158: message builder prio + pchan pkg
- GODT-1158: fix pchan, use in message builder
- GODT-1158: no sem in cacher (rely on message builder prio)
- GODT-1158: raise priority of existing jobs when requested
- GODT-1158: pending messages in on-disk cache
- GODT-1158: WIP just a note about deleting messages from disk cache
- GODT-1158: pending wait when trying to write
- GODT-1158: pending.add to return bool
- GODT-1225: Headers in bodystructure are stored as bytes.
- GODT-1158: fixing header caching
- GODT-1158: don't cache in background
- GODT-1158: all concurrency set in settings
- GODT-1158: worker pools inside message builder
- GODT-1158: fix linter issues
- GODT-1158: remove completed builds from builder
- GODT-1158: remove builder pool
- GODT-1158: cacher defer job done properly
- GODT-1158: fix linter
- GODT-1299: Continue with bodystructure build if deserialization failed
- GODT-1324: Delete messages from the cache when they are deleted on the server
- GODT-1158: refactor cache tests
- GODT-1158: move builder to app/bridge
- GODT-1306: Migrate cache on disk when location is changed (and delete when disabled)
2021-11-30 10:12:36 +01:00

222 lines
6.9 KiB
Go

// Copyright (c) 2021 Proton Technologies AG
//
// This file is part of ProtonMail Bridge.
//
// ProtonMail Bridge is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// ProtonMail Bridge is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
// Package imap provides IMAP server of the Bridge.
//
// Methods are called by the go-imap library in parallel.
// Additional parallelism is achieved while handling each IMAP request.
//
// For example, ListMessages internally uses `fetchWorkers` workers to resolve each requested item.
// When IMAP clients request message literals (or parts thereof), we sometimes need to build RFC822 message literals.
// To do this, we pass build jobs to the message builder, which internally manages its own parallelism.
// Summary:
// - each IMAP fetch request is handled in parallel,
// - within each IMAP fetch request, individual items are handled by a pool of `fetchWorkers` workers,
// - within each worker, build jobs are posted to the message builder,
// - the message builder handles build jobs using its own, independent worker pool,
// The builder will handle jobs in parallel up to its own internal limit. This prevents it from overwhelming API.
package imap
import (
"strings"
"sync"
"time"
"github.com/ProtonMail/proton-bridge/internal/bridge"
"github.com/ProtonMail/proton-bridge/internal/config/settings"
"github.com/ProtonMail/proton-bridge/internal/events"
"github.com/ProtonMail/proton-bridge/pkg/listener"
"github.com/emersion/go-imap"
goIMAPBackend "github.com/emersion/go-imap/backend"
)
type panicHandler interface {
HandlePanic()
}
type imapBackend struct {
panicHandler panicHandler
bridge bridger
updates *imapUpdates
eventListener listener.Listener
listWorkers int
users map[string]*imapUser
usersLocker sync.Locker
imapCache map[string]map[string]string
imapCachePath string
imapCacheLock *sync.RWMutex
}
type settingsProvider interface {
GetInt(string) int
}
// NewIMAPBackend returns struct implementing go-imap/backend interface.
func NewIMAPBackend(
panicHandler panicHandler,
eventListener listener.Listener,
cache cacheProvider,
setting settingsProvider,
bridge *bridge.Bridge,
) *imapBackend { //nolint[golint]
bridgeWrap := newBridgeWrap(bridge)
imapWorkers := setting.GetInt(settings.IMAPWorkers)
backend := newIMAPBackend(panicHandler, cache, bridgeWrap, eventListener, imapWorkers)
go backend.monitorDisconnectedUsers()
return backend
}
func newIMAPBackend(
panicHandler panicHandler,
cache cacheProvider,
bridge bridger,
eventListener listener.Listener,
listWorkers int,
) *imapBackend {
return &imapBackend{
panicHandler: panicHandler,
bridge: bridge,
updates: newIMAPUpdates(),
eventListener: eventListener,
users: map[string]*imapUser{},
usersLocker: &sync.Mutex{},
imapCachePath: cache.GetIMAPCachePath(),
imapCacheLock: &sync.RWMutex{},
listWorkers: listWorkers,
}
}
func (ib *imapBackend) getUser(address string) (*imapUser, error) {
ib.usersLocker.Lock()
defer ib.usersLocker.Unlock()
address = strings.ToLower(address)
imapUser, ok := ib.users[address]
if ok {
return imapUser, nil
}
return ib.createUser(address)
}
// createUser require that address MUST be in lowercase.
func (ib *imapBackend) createUser(address string) (*imapUser, error) {
log.WithField("address", address).Debug("Creating new IMAP user")
user, err := ib.bridge.GetUser(address)
if err != nil {
return nil, err
}
// Make sure you return the same user for all valid addresses when in combined mode.
if user.IsCombinedAddressMode() {
address = strings.ToLower(user.GetPrimaryAddress())
if combinedUser, ok := ib.users[address]; ok {
return combinedUser, nil
}
}
// Client can log in only using address so we can properly close all IMAP connections.
var addressID string
if addressID, err = user.GetAddressID(address); err != nil {
return nil, err
}
newUser, err := newIMAPUser(ib.panicHandler, ib, user, addressID, address)
if err != nil {
return nil, err
}
ib.users[address] = newUser
return newUser, nil
}
// deleteUser removes a user from the users map.
// This is a safe operation even if the user doesn't exist so it is no problem if it is done twice.
func (ib *imapBackend) deleteUser(address string) {
log.WithField("address", address).Debug("Deleting IMAP user")
ib.usersLocker.Lock()
defer ib.usersLocker.Unlock()
delete(ib.users, strings.ToLower(address))
}
// Login authenticates a user.
func (ib *imapBackend) Login(_ *imap.ConnInfo, username, password string) (goIMAPBackend.User, error) {
// Called from go-imap in goroutines - we need to handle panics for each function.
defer ib.panicHandler.HandlePanic()
imapUser, err := ib.getUser(username)
if err != nil {
log.WithError(err).Warn("Cannot get user")
return nil, err
}
if err := imapUser.user.CheckBridgeLogin(password); err != nil {
log.WithError(err).Error("Could not check bridge password")
if err := imapUser.Logout(); err != nil {
log.WithError(err).Warn("Could not logout user after unsuccessful login check")
}
// Apple Mail sometimes generates a lot of requests very quickly.
// It's therefore good to have a timeout after a bad login so that we can slow
// those requests down a little bit.
time.Sleep(10 * time.Second)
return nil, err
}
// The update channel should be nil until we try to login to IMAP for the first time
// so that it doesn't make bridge slow for users who are only using bridge for SMTP
// (otherwise the store will be locked for 1 sec per email during synchronization).
if store := imapUser.user.GetStore(); store != nil {
store.SetChangeNotifier(ib.updates)
}
return imapUser, nil
}
// Updates returns a channel of updates for IMAP IDLE extension.
func (ib *imapBackend) Updates() <-chan goIMAPBackend.Update {
// Called from go-imap in goroutines - we need to handle panics for each function.
defer ib.panicHandler.HandlePanic()
return ib.updates.ch
}
func (ib *imapBackend) CreateMessageLimit() *uint32 {
return nil
}
// monitorDisconnectedUsers removes users when it receives a close connection event for them.
func (ib *imapBackend) monitorDisconnectedUsers() {
ch := make(chan string)
ib.eventListener.Add(events.CloseConnectionEvent, ch)
for address := range ch {
// delete the user to ensure future imap login attempts use the latest bridge user
// (bridge user might be removed-readded so we want to use the new bridge user object).
ib.deleteUser(address)
}
}