forked from Silverfish/proton-bridge
chore: merge branch release/perth_narrows to devel
This commit is contained in:
@ -81,7 +81,7 @@ const (
|
||||
appUsage = "Proton Mail IMAP and SMTP Bridge"
|
||||
)
|
||||
|
||||
func New() *cli.App { //nolint:funlen
|
||||
func New() *cli.App {
|
||||
app := cli.NewApp()
|
||||
|
||||
app.Name = constants.FullAppName
|
||||
@ -156,7 +156,7 @@ func New() *cli.App { //nolint:funlen
|
||||
return app
|
||||
}
|
||||
|
||||
func run(c *cli.Context) error { //nolint:funlen
|
||||
func run(c *cli.Context) error {
|
||||
// Seed the default RNG from the math/rand package.
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
|
||||
@ -170,7 +170,7 @@ func run(c *cli.Context) error { //nolint:funlen
|
||||
identifier := useragent.New()
|
||||
|
||||
// Create a new Sentry client that will be used to report crashes etc.
|
||||
reporter := sentry.NewReporter(constants.FullAppName, constants.Version, identifier)
|
||||
reporter := sentry.NewReporter(constants.FullAppName, identifier)
|
||||
|
||||
// Determine the exe that should be used to restart/autostart the app.
|
||||
// By default, this is the launcher, if used. Otherwise, we try to get
|
||||
@ -208,9 +208,14 @@ func run(c *cli.Context) error { //nolint:funlen
|
||||
}
|
||||
|
||||
// Ensure we are the only instance running.
|
||||
return withSingleInstance(locations, version, func() error {
|
||||
settings, err := locations.ProvideSettingsPath()
|
||||
if err != nil {
|
||||
logrus.WithError(err).Error("Failed to get settings path")
|
||||
}
|
||||
|
||||
return withSingleInstance(settings, locations.GetLockFile(), version, func() error {
|
||||
// Unlock the encrypted vault.
|
||||
return WithVault(locations, func(vault *vault.Vault, insecure, corrupt bool) error {
|
||||
return WithVault(locations, func(v *vault.Vault, insecure, corrupt bool) error {
|
||||
// Report insecure vault.
|
||||
if insecure {
|
||||
_ = reporter.ReportMessageWithContext("Vault is insecure", map[string]interface{}{})
|
||||
@ -221,27 +226,39 @@ func run(c *cli.Context) error { //nolint:funlen
|
||||
_ = reporter.ReportMessageWithContext("Vault is corrupt", map[string]interface{}{})
|
||||
}
|
||||
|
||||
if !vault.Migrated() {
|
||||
// Force re-sync if last version <= 3.0.12 due to chances in the gluon cache format.
|
||||
if lastVersion := v.GetLastVersion(); lastVersion != nil {
|
||||
versionWithLZ4Cache := semver.MustParse("3.0.13")
|
||||
if lastVersion.LessThan(versionWithLZ4Cache) {
|
||||
if err := v.ForUser(1, func(user *vault.User) error {
|
||||
return user.ClearSyncStatus()
|
||||
}); err != nil {
|
||||
logrus.WithError(err).Error("Failed to force resync on user")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !v.Migrated() {
|
||||
// Migrate old settings into the vault.
|
||||
if err := migrateOldSettings(vault); err != nil {
|
||||
if err := migrateOldSettings(v); err != nil {
|
||||
logrus.WithError(err).Error("Failed to migrate old settings")
|
||||
}
|
||||
|
||||
// Migrate old accounts into the vault.
|
||||
if err := migrateOldAccounts(locations, vault); err != nil {
|
||||
if err := migrateOldAccounts(locations, v); err != nil {
|
||||
logrus.WithError(err).Error("Failed to migrate old accounts")
|
||||
}
|
||||
|
||||
// The vault has been migrated.
|
||||
if err := vault.SetMigrated(); err != nil {
|
||||
if err := v.SetMigrated(); err != nil {
|
||||
logrus.WithError(err).Error("Failed to mark vault as migrated")
|
||||
}
|
||||
}
|
||||
|
||||
// Load the cookies from the vault.
|
||||
return withCookieJar(vault, func(cookieJar http.CookieJar) error {
|
||||
return withCookieJar(v, func(cookieJar http.CookieJar) error {
|
||||
// Create a new bridge instance.
|
||||
return withBridge(c, exe, locations, version, identifier, crashHandler, reporter, vault, cookieJar, func(b *bridge.Bridge, eventCh <-chan events.Event) error {
|
||||
return withBridge(c, exe, locations, version, identifier, crashHandler, reporter, v, cookieJar, func(b *bridge.Bridge, eventCh <-chan events.Event) error {
|
||||
if insecure {
|
||||
logrus.Warn("The vault key could not be retrieved; the vault will not be encrypted")
|
||||
b.PushError(bridge.ErrVaultInsecure)
|
||||
@ -266,15 +283,15 @@ func run(c *cli.Context) error { //nolint:funlen
|
||||
}
|
||||
|
||||
// If there's another instance already running, try to raise it and exit.
|
||||
func withSingleInstance(locations *locations.Locations, version *semver.Version, fn func() error) error {
|
||||
func withSingleInstance(settingPath, lockFile string, version *semver.Version, fn func() error) error {
|
||||
logrus.Debug("Checking for other instances")
|
||||
defer logrus.Debug("Single instance stopped")
|
||||
|
||||
lock, err := checkSingleInstance(locations.GetLockFile(), version)
|
||||
lock, err := checkSingleInstance(settingPath, lockFile, version)
|
||||
if err != nil {
|
||||
logrus.Info("Another instance is already running; raising it")
|
||||
|
||||
if ok := focus.TryRaise(); !ok {
|
||||
if ok := focus.TryRaise(settingPath); !ok {
|
||||
return fmt.Errorf("another instance is already running but it could not be raised")
|
||||
}
|
||||
|
||||
|
||||
@ -23,6 +23,7 @@ import (
|
||||
"runtime"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/ProtonMail/gluon/imap"
|
||||
"github.com/ProtonMail/go-autostart"
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/bridge"
|
||||
@ -46,7 +47,7 @@ const vaultSecretName = "bridge-vault-key"
|
||||
var deleteOldGoIMAPFiles bool //nolint:gochecknoglobals
|
||||
|
||||
// withBridge creates creates and tears down the bridge.
|
||||
func withBridge( //nolint:funlen
|
||||
func withBridge(
|
||||
c *cli.Context,
|
||||
exe string,
|
||||
locations *locations.Locations,
|
||||
@ -110,6 +111,7 @@ func withBridge( //nolint:funlen
|
||||
// Crash and report stuff
|
||||
crashHandler,
|
||||
reporter,
|
||||
imap.DefaultEpochUIDValidityGenerator(),
|
||||
|
||||
// The logging stuff.
|
||||
c.String(flagLogIMAP) == "client" || c.String(flagLogIMAP) == "all",
|
||||
|
||||
@ -187,7 +187,6 @@ func migrateOldAccount(userID string, store *credentials.Store, v *vault.Vault)
|
||||
return nil
|
||||
}
|
||||
|
||||
// nolint:funlen
|
||||
func migratePrefsToVault(vault *vault.Vault, b []byte) error {
|
||||
var prefs struct {
|
||||
IMAPPort int `json:"user_port_imap,,string"`
|
||||
@ -265,14 +264,6 @@ func migratePrefsToVault(vault *vault.Vault, b []byte) error {
|
||||
errs = multierror.Append(errs, fmt.Errorf("failed to migrate show all mail: %w", err))
|
||||
}
|
||||
|
||||
if err := vault.SetSyncWorkers(prefs.FetchWorkers); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("failed to migrate sync workers: %w", err))
|
||||
}
|
||||
|
||||
if err := vault.SetSyncAttPool(prefs.AttachmentWorkers); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("failed to migrate sync attachment pool: %w", err))
|
||||
}
|
||||
|
||||
if err := vault.SetCookies([]byte(prefs.Cookies)); err != nil {
|
||||
errs = multierror.Append(errs, fmt.Errorf("failed to migrate cookies: %w", err))
|
||||
}
|
||||
|
||||
@ -68,8 +68,6 @@ func TestMigratePrefsToVault(t *testing.T) {
|
||||
require.True(t, vault.GetAutostart())
|
||||
|
||||
// Check that the other app settings have been migrated.
|
||||
require.Equal(t, 16, vault.SyncWorkers())
|
||||
require.Equal(t, 16, vault.SyncAttPool())
|
||||
require.False(t, vault.GetProxyAllowed())
|
||||
require.False(t, vault.GetShowAllMail())
|
||||
|
||||
|
||||
@ -34,7 +34,7 @@ import (
|
||||
//
|
||||
// For macOS and Linux when already running version is older than this instance
|
||||
// it will kill old and continue with this new bridge (i.e. no error returned).
|
||||
func checkSingleInstance(lockFilePath string, curVersion *semver.Version) (*os.File, error) {
|
||||
func checkSingleInstance(settingPath, lockFilePath string, curVersion *semver.Version) (*os.File, error) {
|
||||
if lock, err := singleinstance.CreateLockFile(lockFilePath); err == nil {
|
||||
logrus.WithField("path", lockFilePath).Debug("Created lock file; no other instance is running")
|
||||
return lock, nil
|
||||
@ -44,7 +44,7 @@ func checkSingleInstance(lockFilePath string, curVersion *semver.Version) (*os.F
|
||||
|
||||
// We couldn't create the lock file, so another instance is probably running.
|
||||
// Check if it's an older version of the app.
|
||||
lastVersion, ok := focus.TryVersion()
|
||||
lastVersion, ok := focus.TryVersion(settingPath)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to determine version of running instance")
|
||||
}
|
||||
|
||||
@ -32,14 +32,12 @@ func defaultAPIOptions(
|
||||
version *semver.Version,
|
||||
cookieJar http.CookieJar,
|
||||
transport http.RoundTripper,
|
||||
poolSize int,
|
||||
) []proton.Option {
|
||||
return []proton.Option{
|
||||
proton.WithHostURL(apiURL),
|
||||
proton.WithAppVersion(constants.AppVersion(version.Original())),
|
||||
proton.WithCookieJar(cookieJar),
|
||||
proton.WithTransport(transport),
|
||||
proton.WithAttPoolSize(poolSize),
|
||||
proton.WithLogger(logrus.StandardLogger()),
|
||||
}
|
||||
}
|
||||
|
||||
@ -32,7 +32,6 @@ func newAPIOptions(
|
||||
version *semver.Version,
|
||||
cookieJar http.CookieJar,
|
||||
transport http.RoundTripper,
|
||||
poolSize int,
|
||||
) []proton.Option {
|
||||
return defaultAPIOptions(apiURL, version, cookieJar, transport, poolSize)
|
||||
return defaultAPIOptions(apiURL, version, cookieJar, transport)
|
||||
}
|
||||
|
||||
@ -33,9 +33,8 @@ func newAPIOptions(
|
||||
version *semver.Version,
|
||||
cookieJar http.CookieJar,
|
||||
transport http.RoundTripper,
|
||||
poolSize int,
|
||||
) []proton.Option {
|
||||
opt := defaultAPIOptions(apiURL, version, cookieJar, transport, poolSize)
|
||||
opt := defaultAPIOptions(apiURL, version, cookieJar, transport)
|
||||
|
||||
if host := os.Getenv("BRIDGE_API_HOST"); host != "" {
|
||||
opt = append(opt, proton.WithHostURL(host))
|
||||
|
||||
@ -31,6 +31,7 @@ import (
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/ProtonMail/gluon"
|
||||
imapEvents "github.com/ProtonMail/gluon/events"
|
||||
"github.com/ProtonMail/gluon/imap"
|
||||
"github.com/ProtonMail/gluon/reporter"
|
||||
"github.com/ProtonMail/gluon/watcher"
|
||||
"github.com/ProtonMail/go-proton-api"
|
||||
@ -124,10 +125,12 @@ type Bridge struct {
|
||||
|
||||
// goUpdate triggers a check/install of updates.
|
||||
goUpdate func()
|
||||
|
||||
uidValidityGenerator imap.UIDValidityGenerator
|
||||
}
|
||||
|
||||
// New creates a new bridge.
|
||||
func New( //nolint:funlen
|
||||
func New(
|
||||
locator Locator, // the locator to provide paths to store data
|
||||
vault *vault.Vault, // the bridge's encrypted data store
|
||||
autostarter Autostarter, // the autostarter to manage autostart settings
|
||||
@ -142,12 +145,13 @@ func New( //nolint:funlen
|
||||
proxyCtl ProxyController, // the DoH controller
|
||||
crashHandler async.PanicHandler,
|
||||
reporter reporter.Reporter,
|
||||
uidValidityGenerator imap.UIDValidityGenerator,
|
||||
|
||||
logIMAPClient, logIMAPServer bool, // whether to log IMAP client/server activity
|
||||
logSMTP bool, // whether to log SMTP activity
|
||||
) (*Bridge, <-chan events.Event, error) {
|
||||
// api is the user's API manager.
|
||||
api := proton.New(newAPIOptions(apiURL, curVersion, cookieJar, roundTripper, vault.SyncAttPool())...)
|
||||
api := proton.New(newAPIOptions(apiURL, curVersion, cookieJar, roundTripper)...)
|
||||
|
||||
// tasks holds all the bridge's background tasks.
|
||||
tasks := async.NewGroup(context.Background(), crashHandler)
|
||||
@ -171,6 +175,7 @@ func New( //nolint:funlen
|
||||
api,
|
||||
identifier,
|
||||
proxyCtl,
|
||||
uidValidityGenerator,
|
||||
logIMAPClient, logIMAPServer, logSMTP,
|
||||
)
|
||||
if err != nil {
|
||||
@ -185,22 +190,9 @@ func New( //nolint:funlen
|
||||
return nil, nil, fmt.Errorf("failed to initialize bridge: %w", err)
|
||||
}
|
||||
|
||||
// Start serving IMAP.
|
||||
if err := bridge.serveIMAP(); err != nil {
|
||||
logrus.WithError(err).Error("IMAP error")
|
||||
bridge.PushError(ErrServeIMAP)
|
||||
}
|
||||
|
||||
// Start serving SMTP.
|
||||
if err := bridge.serveSMTP(); err != nil {
|
||||
logrus.WithError(err).Error("SMTP error")
|
||||
bridge.PushError(ErrServeSMTP)
|
||||
}
|
||||
|
||||
return bridge, eventCh, nil
|
||||
}
|
||||
|
||||
// nolint:funlen
|
||||
func newBridge(
|
||||
tasks *async.Group,
|
||||
imapEventCh chan imapEvents.Event,
|
||||
@ -216,6 +208,7 @@ func newBridge(
|
||||
api *proton.Manager,
|
||||
identifier Identifier,
|
||||
proxyCtl ProxyController,
|
||||
uidValidityGenerator imap.UIDValidityGenerator,
|
||||
|
||||
logIMAPClient, logIMAPServer, logSMTP bool,
|
||||
) (*Bridge, error) {
|
||||
@ -254,12 +247,13 @@ func newBridge(
|
||||
logIMAPServer,
|
||||
imapEventCh,
|
||||
tasks,
|
||||
uidValidityGenerator,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create IMAP server: %w", err)
|
||||
}
|
||||
|
||||
focusService, err := focus.NewService(curVersion)
|
||||
focusService, err := focus.NewService(locator, curVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create focus service: %w", err)
|
||||
}
|
||||
@ -300,6 +294,8 @@ func newBridge(
|
||||
lastVersion: lastVersion,
|
||||
|
||||
tasks: tasks,
|
||||
|
||||
uidValidityGenerator: uidValidityGenerator,
|
||||
}
|
||||
|
||||
bridge.smtpServer = newSMTPServer(bridge, tlsConfig, logSMTP)
|
||||
@ -307,7 +303,6 @@ func newBridge(
|
||||
return bridge, nil
|
||||
}
|
||||
|
||||
// nolint:funlen
|
||||
func (bridge *Bridge) init(tlsReporter TLSReporter) error {
|
||||
// Enable or disable the proxy at startup.
|
||||
if bridge.vault.GetProxyAllowed() {
|
||||
@ -376,16 +371,32 @@ func (bridge *Bridge) init(tlsReporter TLSReporter) error {
|
||||
})
|
||||
})
|
||||
|
||||
// Attempt to lazy load users when triggered.
|
||||
// We need to load users before we can start the IMAP and SMTP servers.
|
||||
// We must only start the servers once.
|
||||
var once sync.Once
|
||||
|
||||
// Attempt to load users from the vault when triggered.
|
||||
bridge.goLoad = bridge.tasks.Trigger(func(ctx context.Context) {
|
||||
if err := bridge.loadUsers(ctx); err != nil {
|
||||
logrus.WithError(err).Error("Failed to load users")
|
||||
if netErr := new(proton.NetError); !errors.As(err, &netErr) {
|
||||
sentry.ReportError(bridge.reporter, "Failed to load users", err)
|
||||
}
|
||||
} else {
|
||||
bridge.publish(events.AllUsersLoaded{})
|
||||
return
|
||||
}
|
||||
|
||||
bridge.publish(events.AllUsersLoaded{})
|
||||
|
||||
// Once all users have been loaded, start the bridge's IMAP and SMTP servers.
|
||||
once.Do(func() {
|
||||
if err := bridge.serveIMAP(); err != nil {
|
||||
logrus.WithError(err).Error("Failed to start IMAP server")
|
||||
}
|
||||
|
||||
if err := bridge.serveSMTP(); err != nil {
|
||||
logrus.WithError(err).Error("Failed to start SMTP server")
|
||||
}
|
||||
})
|
||||
})
|
||||
defer bridge.goLoad()
|
||||
|
||||
|
||||
@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -29,6 +30,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/ProtonMail/gluon/imap"
|
||||
"github.com/ProtonMail/go-proton-api"
|
||||
"github.com/ProtonMail/go-proton-api/server"
|
||||
"github.com/ProtonMail/go-proton-api/server/backend"
|
||||
@ -121,8 +123,11 @@ func TestBridge_Focus(t *testing.T) {
|
||||
raiseCh, done := bridge.GetEvents(events.Raise{})
|
||||
defer done()
|
||||
|
||||
settingsFolder, err := locator.ProvideSettingsPath()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Simulate a focus event.
|
||||
focus.TryRaise()
|
||||
focus.TryRaise(settingsFolder)
|
||||
|
||||
// Wait for the event.
|
||||
require.IsType(t, events.Raise{}, <-raiseCh)
|
||||
@ -496,6 +501,21 @@ func TestBridge_InitGluonDirectory(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBridge_LoginFailed(t *testing.T) {
|
||||
withEnv(t, func(ctx context.Context, s *server.Server, netCtl *proton.NetCtl, locator bridge.Locator, vaultKey []byte) {
|
||||
withBridge(ctx, t, s.GetHostURL(), netCtl, locator, vaultKey, func(bridge *bridge.Bridge, mocks *bridge.Mocks) {
|
||||
failCh, done := chToType[events.Event, events.IMAPLoginFailed](bridge.GetEvents(events.IMAPLoginFailed{}))
|
||||
defer done()
|
||||
|
||||
imapClient, err := client.Dial(net.JoinHostPort(constants.Host, fmt.Sprint(bridge.GetIMAPPort())))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Error(t, imapClient.Login("badUser", "badPass"))
|
||||
require.Equal(t, "badUser", (<-failCh).Username)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
func TestBridge_ChangeCacheDirectory(t *testing.T) {
|
||||
withEnv(t, func(ctx context.Context, s *server.Server, netCtl *proton.NetCtl, locator bridge.Locator, vaultKey []byte) {
|
||||
userID, addrID, err := s.CreateUser("imap", password)
|
||||
@ -657,6 +677,9 @@ func withMocks(t *testing.T, tests func(*bridge.Mocks)) {
|
||||
tests(mocks)
|
||||
}
|
||||
|
||||
// Needs to be global to survive bridge shutdown/startup in unit tests as they happen to fast.
|
||||
var testUIDValidityGenerator = imap.DefaultEpochUIDValidityGenerator()
|
||||
|
||||
// withBridge creates a new bridge which points to the given API URL and uses the given keychain, and closes it when done.
|
||||
func withBridgeNoMocks(
|
||||
ctx context.Context,
|
||||
@ -702,6 +725,7 @@ func withBridgeNoMocks(
|
||||
mocks.ProxyCtl,
|
||||
mocks.CrashHandler,
|
||||
mocks.Reporter,
|
||||
testUIDValidityGenerator,
|
||||
|
||||
// The logging stuff.
|
||||
os.Getenv("BRIDGE_LOG_IMAP_CLIENT") == "1",
|
||||
@ -713,6 +737,10 @@ func withBridgeNoMocks(
|
||||
|
||||
// Wait for bridge to finish loading users.
|
||||
waitForEvent(t, eventCh, events.AllUsersLoaded{})
|
||||
// Wait for bridge to start the IMAP server.
|
||||
waitForEvent(t, eventCh, events.IMAPServerReady{})
|
||||
// Wait for bridge to start the SMTP server.
|
||||
waitForEvent(t, eventCh, events.SMTPServerReady{})
|
||||
|
||||
// Set random IMAP and SMTP ports for the tests.
|
||||
require.NoError(t, bridge.SetIMAPPort(0))
|
||||
@ -742,7 +770,7 @@ func withBridge(
|
||||
})
|
||||
}
|
||||
|
||||
func waitForEvent[T any](t *testing.T, eventCh <-chan events.Event, wantEvent T) {
|
||||
func waitForEvent[T any](t *testing.T, eventCh <-chan events.Event, _ T) {
|
||||
t.Helper()
|
||||
|
||||
for event := range eventCh {
|
||||
|
||||
@ -37,7 +37,7 @@ const (
|
||||
MaxCompressedFilesCount = 6
|
||||
)
|
||||
|
||||
func (bridge *Bridge) ReportBug(ctx context.Context, osType, osVersion, description, username, email, client string, attachLogs bool) error { //nolint:funlen
|
||||
func (bridge *Bridge) ReportBug(ctx context.Context, osType, osVersion, description, username, email, client string, attachLogs bool) error {
|
||||
var account string
|
||||
|
||||
if info, err := bridge.QueryUserInfo(username); err == nil {
|
||||
|
||||
@ -22,10 +22,7 @@ import "errors"
|
||||
var (
|
||||
ErrVaultInsecure = errors.New("the vault is insecure")
|
||||
ErrVaultCorrupt = errors.New("the vault is corrupt")
|
||||
|
||||
ErrServeIMAP = errors.New("failed to serve IMAP")
|
||||
ErrServeSMTP = errors.New("failed to serve SMTP")
|
||||
ErrWatchUpdates = errors.New("failed to watch for updates")
|
||||
ErrWatchUpdates = errors.New("failed to watch for updates")
|
||||
|
||||
ErrNoSuchUser = errors.New("no such user")
|
||||
ErrUserAlreadyExists = errors.New("user already exists")
|
||||
|
||||
@ -28,10 +28,12 @@ import (
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/ProtonMail/gluon"
|
||||
imapEvents "github.com/ProtonMail/gluon/events"
|
||||
"github.com/ProtonMail/gluon/imap"
|
||||
"github.com/ProtonMail/gluon/reporter"
|
||||
"github.com/ProtonMail/gluon/store"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/async"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/events"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/logging"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/user"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/vault"
|
||||
@ -44,26 +46,42 @@ const (
|
||||
)
|
||||
|
||||
func (bridge *Bridge) serveIMAP() error {
|
||||
if bridge.imapServer == nil {
|
||||
return fmt.Errorf("no imap server instance running")
|
||||
}
|
||||
port, err := func() (int, error) {
|
||||
if bridge.imapServer == nil {
|
||||
return 0, fmt.Errorf("no IMAP server instance running")
|
||||
}
|
||||
|
||||
logrus.Info("Starting IMAP server")
|
||||
logrus.Info("Starting IMAP server")
|
||||
|
||||
imapListener, err := newListener(bridge.vault.GetIMAPPort(), bridge.vault.GetIMAPSSL(), bridge.tlsConfig)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create IMAP listener: %w", err)
|
||||
}
|
||||
|
||||
bridge.imapListener = imapListener
|
||||
|
||||
if err := bridge.imapServer.Serve(context.Background(), bridge.imapListener); err != nil {
|
||||
return 0, fmt.Errorf("failed to serve IMAP: %w", err)
|
||||
}
|
||||
|
||||
if err := bridge.vault.SetIMAPPort(getPort(imapListener.Addr())); err != nil {
|
||||
return 0, fmt.Errorf("failed to store IMAP port in vault: %w", err)
|
||||
}
|
||||
|
||||
return getPort(imapListener.Addr()), nil
|
||||
}()
|
||||
|
||||
imapListener, err := newListener(bridge.vault.GetIMAPPort(), bridge.vault.GetIMAPSSL(), bridge.tlsConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create IMAP listener: %w", err)
|
||||
bridge.publish(events.IMAPServerError{
|
||||
Error: err,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
bridge.imapListener = imapListener
|
||||
|
||||
if err := bridge.imapServer.Serve(context.Background(), bridge.imapListener); err != nil {
|
||||
return fmt.Errorf("failed to serve IMAP: %w", err)
|
||||
}
|
||||
|
||||
if err := bridge.vault.SetIMAPPort(getPort(imapListener.Addr())); err != nil {
|
||||
return fmt.Errorf("failed to store IMAP port in vault: %w", err)
|
||||
}
|
||||
bridge.publish(events.IMAPServerReady{
|
||||
Port: port,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -75,6 +93,8 @@ func (bridge *Bridge) restartIMAP() error {
|
||||
if err := bridge.imapListener.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close IMAP listener: %w", err)
|
||||
}
|
||||
|
||||
bridge.publish(events.IMAPServerStopped{})
|
||||
}
|
||||
|
||||
return bridge.serveIMAP()
|
||||
@ -87,6 +107,7 @@ func (bridge *Bridge) closeIMAP(ctx context.Context) error {
|
||||
if err := bridge.imapServer.Close(ctx); err != nil {
|
||||
return fmt.Errorf("failed to close IMAP server: %w", err)
|
||||
}
|
||||
|
||||
bridge.imapServer = nil
|
||||
}
|
||||
|
||||
@ -96,12 +117,12 @@ func (bridge *Bridge) closeIMAP(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
bridge.publish(events.IMAPServerStopped{})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// addIMAPUser connects the given user to gluon.
|
||||
//
|
||||
//nolint:funlen
|
||||
func (bridge *Bridge) addIMAPUser(ctx context.Context, user *user.User) error {
|
||||
if bridge.imapServer == nil {
|
||||
return fmt.Errorf("no imap server instance running")
|
||||
@ -242,6 +263,13 @@ func (bridge *Bridge) handleIMAPEvent(event imapEvents.Event) {
|
||||
if event.IMAPID.Name != "" && event.IMAPID.Version != "" {
|
||||
bridge.identifier.SetClient(event.IMAPID.Name, event.IMAPID.Version)
|
||||
}
|
||||
|
||||
case imapEvents.LoginFailed:
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"sessionID": event.SessionID,
|
||||
"username": event.Username,
|
||||
}).Info("Received IMAP login failure notification")
|
||||
bridge.publish(events.IMAPLoginFailed{Username: event.Username})
|
||||
}
|
||||
}
|
||||
|
||||
@ -261,7 +289,6 @@ func ApplyGluonConfigPathSuffix(basePath string) string {
|
||||
return filepath.Join(basePath, "backend", "db")
|
||||
}
|
||||
|
||||
// nolint:funlen
|
||||
func newIMAPServer(
|
||||
gluonCacheDir, gluonConfigDir string,
|
||||
version *semver.Version,
|
||||
@ -270,6 +297,7 @@ func newIMAPServer(
|
||||
logClient, logServer bool,
|
||||
eventCh chan<- imapEvents.Event,
|
||||
tasks *async.Group,
|
||||
uidValidityGenerator imap.UIDValidityGenerator,
|
||||
) (*gluon.Server, error) {
|
||||
gluonCacheDir = ApplyGluonCachePathSuffix(gluonCacheDir)
|
||||
gluonConfigDir = ApplyGluonConfigPathSuffix(gluonConfigDir)
|
||||
@ -313,6 +341,7 @@ func newIMAPServer(
|
||||
gluon.WithLogger(imapClientLog, imapServerLog),
|
||||
getGluonVersionInfo(version),
|
||||
gluon.WithReporter(reporter),
|
||||
gluon.WithUIDValidityGenerator(uidValidityGenerator),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -348,7 +377,6 @@ func (*storeBuilder) New(path, userID string, passphrase []byte) (store.Store, e
|
||||
return store.NewOnDiskStore(
|
||||
filepath.Join(path, userID),
|
||||
passphrase,
|
||||
store.WithCompressor(new(store.GZipCompressor)),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@ -57,6 +57,7 @@ func TestBridge_Refresh(t *testing.T) {
|
||||
require.Equal(t, userID, (<-syncCh).UserID)
|
||||
})
|
||||
|
||||
var uidValidities = make(map[string]uint32, len(names))
|
||||
// If we then connect an IMAP client, it should see all the labels with UID validity of 1.
|
||||
withBridge(ctx, t, s.GetHostURL(), netCtl, locator, storeKey, func(b *bridge.Bridge, mocks *bridge.Mocks) {
|
||||
mocks.Reporter.EXPECT().ReportMessageWithContext(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
@ -73,7 +74,7 @@ func TestBridge_Refresh(t *testing.T) {
|
||||
for _, name := range names {
|
||||
status, err := client.Select("Folders/"+name, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint32(1000), status.UidValidity)
|
||||
uidValidities[name] = status.UidValidity
|
||||
}
|
||||
})
|
||||
|
||||
@ -106,7 +107,7 @@ func TestBridge_Refresh(t *testing.T) {
|
||||
for _, name := range names {
|
||||
status, err := client.Select("Folders/"+name, false)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint32(1001), status.UidValidity)
|
||||
require.Greater(t, status.UidValidity, uidValidities[name])
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
@ -131,26 +131,21 @@ func (bridge *Bridge) SetGluonDir(ctx context.Context, newGluonDir string) error
|
||||
return fmt.Errorf("new gluon dir is the same as the old one")
|
||||
}
|
||||
|
||||
if err := bridge.stopEventLoops(); err != nil {
|
||||
return err
|
||||
if err := bridge.closeIMAP(context.Background()); err != nil {
|
||||
return fmt.Errorf("failed to close IMAP: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
err := bridge.startEventLoops(ctx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := bridge.moveGluonCacheDir(currentGluonDir, newGluonDir); err != nil {
|
||||
logrus.WithError(err).Error("failed to move GluonCacheDir")
|
||||
|
||||
if err := bridge.vault.SetGluonDir(currentGluonDir); err != nil {
|
||||
panic(err)
|
||||
return fmt.Errorf("failed to revert GluonCacheDir: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
gluonDataDir, err := bridge.GetGluonDataDir()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to get Gluon Database directory: %w", err))
|
||||
return fmt.Errorf("failed to get Gluon Database directory: %w", err)
|
||||
}
|
||||
|
||||
imapServer, err := newIMAPServer(
|
||||
@ -163,13 +158,24 @@ func (bridge *Bridge) SetGluonDir(ctx context.Context, newGluonDir string) error
|
||||
bridge.logIMAPServer,
|
||||
bridge.imapEventCh,
|
||||
bridge.tasks,
|
||||
bridge.uidValidityGenerator,
|
||||
)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to create new IMAP server: %w", err))
|
||||
return fmt.Errorf("failed to create new IMAP server: %w", err)
|
||||
}
|
||||
|
||||
bridge.imapServer = imapServer
|
||||
|
||||
for _, user := range bridge.users {
|
||||
if err := bridge.addIMAPUser(ctx, user); err != nil {
|
||||
return fmt.Errorf("failed to add users to new IMAP server: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := bridge.serveIMAP(); err != nil {
|
||||
return fmt.Errorf("failed to serve IMAP: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, bridge.usersLock)
|
||||
}
|
||||
@ -191,34 +197,6 @@ func (bridge *Bridge) moveGluonCacheDir(oldGluonDir, newGluonDir string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bridge *Bridge) stopEventLoops() error {
|
||||
if err := bridge.closeIMAP(context.Background()); err != nil {
|
||||
return fmt.Errorf("failed to close IMAP: %w", err)
|
||||
}
|
||||
|
||||
if err := bridge.closeSMTP(); err != nil {
|
||||
return fmt.Errorf("failed to close SMTP: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bridge *Bridge) startEventLoops(ctx context.Context) error {
|
||||
for _, user := range bridge.users {
|
||||
if err := bridge.addIMAPUser(ctx, user); err != nil {
|
||||
return fmt.Errorf("failed to add users to new IMAP server: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := bridge.serveIMAP(); err != nil {
|
||||
panic(fmt.Errorf("failed to serve IMAP: %w", err))
|
||||
}
|
||||
|
||||
if err := bridge.serveSMTP(); err != nil {
|
||||
panic(fmt.Errorf("failed to serve SMTP: %w", err))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bridge *Bridge) GetProxyAllowed() bool {
|
||||
return bridge.vault.GetProxyAllowed()
|
||||
}
|
||||
|
||||
@ -22,6 +22,7 @@ import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/events"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/logging"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
@ -31,25 +32,41 @@ import (
|
||||
)
|
||||
|
||||
func (bridge *Bridge) serveSMTP() error {
|
||||
logrus.Info("Starting SMTP server")
|
||||
port, err := func() (int, error) {
|
||||
logrus.Info("Starting SMTP server")
|
||||
|
||||
smtpListener, err := newListener(bridge.vault.GetSMTPPort(), bridge.vault.GetSMTPSSL(), bridge.tlsConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create SMTP listener: %w", err)
|
||||
}
|
||||
|
||||
bridge.smtpListener = smtpListener
|
||||
|
||||
bridge.tasks.Once(func(context.Context) {
|
||||
if err := bridge.smtpServer.Serve(smtpListener); err != nil {
|
||||
logrus.WithError(err).Info("SMTP server stopped")
|
||||
smtpListener, err := newListener(bridge.vault.GetSMTPPort(), bridge.vault.GetSMTPSSL(), bridge.tlsConfig)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to create SMTP listener: %w", err)
|
||||
}
|
||||
})
|
||||
|
||||
if err := bridge.vault.SetSMTPPort(getPort(smtpListener.Addr())); err != nil {
|
||||
return fmt.Errorf("failed to store SMTP port in vault: %w", err)
|
||||
bridge.smtpListener = smtpListener
|
||||
|
||||
bridge.tasks.Once(func(context.Context) {
|
||||
if err := bridge.smtpServer.Serve(smtpListener); err != nil {
|
||||
logrus.WithError(err).Info("SMTP server stopped")
|
||||
}
|
||||
})
|
||||
|
||||
if err := bridge.vault.SetSMTPPort(getPort(smtpListener.Addr())); err != nil {
|
||||
return 0, fmt.Errorf("failed to store SMTP port in vault: %w", err)
|
||||
}
|
||||
|
||||
return getPort(smtpListener.Addr()), nil
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
bridge.publish(events.SMTPServerError{
|
||||
Error: err,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
bridge.publish(events.SMTPServerReady{
|
||||
Port: port,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -60,6 +77,8 @@ func (bridge *Bridge) restartSMTP() error {
|
||||
return fmt.Errorf("failed to close SMTP: %w", err)
|
||||
}
|
||||
|
||||
bridge.publish(events.SMTPServerStopped{})
|
||||
|
||||
bridge.smtpServer = newSMTPServer(bridge, bridge.tlsConfig, bridge.logSMTP)
|
||||
|
||||
return bridge.serveSMTP()
|
||||
@ -82,6 +101,8 @@ func (bridge *Bridge) closeSMTP() error {
|
||||
logrus.WithError(err).Debug("Failed to close SMTP server (expected -- we close the listener ourselves)")
|
||||
}
|
||||
|
||||
bridge.publish(events.SMTPServerStopped{})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@ -431,7 +431,7 @@ func createMessages(ctx context.Context, t *testing.T, c *proton.Client, addrID,
|
||||
_, ok := addrKRs[addrID]
|
||||
require.True(t, ok)
|
||||
|
||||
res, err := stream.Collect(ctx, c.ImportMessages(
|
||||
str, err := c.ImportMessages(
|
||||
ctx,
|
||||
addrKRs[addrID],
|
||||
runtime.NumCPU(),
|
||||
@ -446,7 +446,10 @@ func createMessages(ctx context.Context, t *testing.T, c *proton.Client, addrID,
|
||||
Message: message,
|
||||
}
|
||||
})...,
|
||||
))
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err := stream.Collect(ctx, str)
|
||||
require.NoError(t, err)
|
||||
|
||||
return xslices.Map(res, func(res proton.ImportRes) string {
|
||||
|
||||
@ -32,19 +32,7 @@ func (bridge *Bridge) CheckForUpdates() {
|
||||
}
|
||||
|
||||
func (bridge *Bridge) InstallUpdate(version updater.VersionInfo) {
|
||||
log := logrus.WithFields(logrus.Fields{
|
||||
"version": version.Version,
|
||||
"current": bridge.curVersion,
|
||||
"channel": bridge.vault.GetUpdateChannel(),
|
||||
})
|
||||
|
||||
select {
|
||||
case bridge.installCh <- installJob{version: version, silent: false}:
|
||||
log.Info("The update will be installed manually")
|
||||
|
||||
default:
|
||||
log.Info("An update is already being installed")
|
||||
}
|
||||
bridge.installCh <- installJob{version: version, silent: false}
|
||||
}
|
||||
|
||||
func (bridge *Bridge) handleUpdate(version updater.VersionInfo) {
|
||||
@ -89,17 +77,7 @@ func (bridge *Bridge) handleUpdate(version updater.VersionInfo) {
|
||||
|
||||
default:
|
||||
safe.RLock(func() {
|
||||
if version.Version.GreaterThan(bridge.newVersion) {
|
||||
log.Info("An update is available")
|
||||
|
||||
select {
|
||||
case bridge.installCh <- installJob{version: version, silent: true}:
|
||||
log.Info("The update will be installed silently")
|
||||
|
||||
default:
|
||||
log.Info("An update is already being installed")
|
||||
}
|
||||
}
|
||||
bridge.installCh <- installJob{version: version, silent: true}
|
||||
}, bridge.newVersionLock)
|
||||
}
|
||||
}
|
||||
@ -117,6 +95,12 @@ func (bridge *Bridge) installUpdate(ctx context.Context, job installJob) {
|
||||
"channel": bridge.vault.GetUpdateChannel(),
|
||||
})
|
||||
|
||||
if !job.version.Version.GreaterThan(bridge.newVersion) {
|
||||
return
|
||||
}
|
||||
|
||||
log.WithField("silent", job.silent).Info("An update is available")
|
||||
|
||||
bridge.publish(events.UpdateAvailable{
|
||||
Version: job.version,
|
||||
Compatible: true,
|
||||
@ -142,6 +126,7 @@ func (bridge *Bridge) installUpdate(ctx context.Context, job installJob) {
|
||||
Silent: job.silent,
|
||||
Error: err,
|
||||
})
|
||||
|
||||
default:
|
||||
log.Info("The update was installed successfully")
|
||||
|
||||
|
||||
@ -380,6 +380,7 @@ func (bridge *Bridge) loadUser(ctx context.Context, user *vault.User) error {
|
||||
logrus.WithError(err).Warn("Failed to clear user secrets")
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to create API client: %w", err)
|
||||
}
|
||||
|
||||
@ -462,8 +463,8 @@ func (bridge *Bridge) addUserWithVault(
|
||||
bridge.reporter,
|
||||
apiUser,
|
||||
bridge.crashHandler,
|
||||
bridge.vault.SyncWorkers(),
|
||||
bridge.vault.GetShowAllMail(),
|
||||
bridge.vault.GetMaxSyncMemory(),
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create user: %w", err)
|
||||
|
||||
@ -20,6 +20,7 @@ package bridge_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
@ -113,12 +114,13 @@ func TestBridge_User_BadMessage_NoBadEvent(t *testing.T) {
|
||||
|
||||
var messageIDs []string
|
||||
|
||||
// Create 10 more messages for the user, generating events.
|
||||
withClient(ctx, t, s, "user", password, func(ctx context.Context, c *proton.Client) {
|
||||
messageIDs = createNumMessages(ctx, t, c, addrID, proton.InboxLabel, 10)
|
||||
})
|
||||
|
||||
// If bridge attempts to sync the new messages, it should get a BadRequest error.
|
||||
s.AddStatusHook(func(req *http.Request) (int, bool) {
|
||||
if len(messageIDs) < 3 {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
if strings.Contains(req.URL.Path, "/mail/v4/messages/"+messageIDs[2]) {
|
||||
return http.StatusUnprocessableEntity, true
|
||||
}
|
||||
@ -126,11 +128,6 @@ func TestBridge_User_BadMessage_NoBadEvent(t *testing.T) {
|
||||
return 0, false
|
||||
})
|
||||
|
||||
// Create 10 more messages for the user, generating events.
|
||||
withClient(ctx, t, s, "user", password, func(ctx context.Context, c *proton.Client) {
|
||||
messageIDs = createNumMessages(ctx, t, c, addrID, proton.InboxLabel, 10)
|
||||
})
|
||||
|
||||
// Remove messages
|
||||
withClient(ctx, t, s, "user", password, func(ctx context.Context, c *proton.Client) {
|
||||
require.NoError(t, c.DeleteMessage(ctx, messageIDs...))
|
||||
@ -295,6 +292,63 @@ func TestBridge_User_Network_NoBadEvents(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBridge_User_DropConn_NoBadEvent(t *testing.T) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
dropListener := proton.NewListener(l, proton.NewDropConn)
|
||||
defer func() { _ = dropListener.Close() }()
|
||||
|
||||
withEnv(t, func(ctx context.Context, s *server.Server, netCtl *proton.NetCtl, locator bridge.Locator, storeKey []byte) {
|
||||
// Create a user.
|
||||
_, addrID, err := s.CreateUser("user", password)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create 10 messages for the user.
|
||||
withClient(ctx, t, s, "user", password, func(ctx context.Context, c *proton.Client) {
|
||||
createNumMessages(ctx, t, c, addrID, proton.InboxLabel, 10)
|
||||
})
|
||||
|
||||
withBridge(ctx, t, s.GetHostURL(), netCtl, locator, storeKey, func(bridge *bridge.Bridge, mocks *bridge.Mocks) {
|
||||
userLoginAndSync(ctx, t, bridge, "user", password)
|
||||
|
||||
mocks.Reporter.EXPECT().ReportMessageWithContext(gomock.Any(), gomock.Any()).AnyTimes()
|
||||
|
||||
// Create 10 more messages for the user, generating events.
|
||||
withClient(ctx, t, s, "user", password, func(ctx context.Context, c *proton.Client) {
|
||||
createNumMessages(ctx, t, c, addrID, proton.InboxLabel, 10)
|
||||
})
|
||||
|
||||
var count int
|
||||
|
||||
// The first 10 times bridge attempts to sync any of the messages, drop the connection.
|
||||
s.AddStatusHook(func(req *http.Request) (int, bool) {
|
||||
if strings.Contains(req.URL.Path, "/mail/v4/messages") {
|
||||
if count++; count < 10 {
|
||||
dropListener.DropAll()
|
||||
}
|
||||
}
|
||||
|
||||
return 0, false
|
||||
})
|
||||
|
||||
info, err := bridge.QueryUserInfo("user")
|
||||
require.NoError(t, err)
|
||||
|
||||
client, err := client.Dial(fmt.Sprintf("%v:%v", constants.Host, bridge.GetIMAPPort()))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Login(info.Addresses[0], string(info.BridgePass)))
|
||||
defer func() { _ = client.Logout() }()
|
||||
|
||||
// The IMAP client will eventually see 20 messages.
|
||||
require.Eventually(t, func() bool {
|
||||
status, err := client.Status("INBOX", []imap.StatusItem{imap.StatusMessages})
|
||||
return err == nil && status.Messages == 20
|
||||
}, 10*time.Second, 100*time.Millisecond)
|
||||
})
|
||||
}, server.WithListener(dropListener))
|
||||
}
|
||||
|
||||
// userLoginAndSync logs in user and waits until user is fully synced.
|
||||
func userLoginAndSync(
|
||||
ctx context.Context,
|
||||
|
||||
@ -20,6 +20,8 @@ package bridge_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -61,6 +63,50 @@ func TestBridge_Login(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestBridge_Login_DropConn(t *testing.T) {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
|
||||
dropListener := proton.NewListener(l, proton.NewDropConn)
|
||||
defer func() { _ = dropListener.Close() }()
|
||||
|
||||
withEnv(t, func(ctx context.Context, s *server.Server, netCtl *proton.NetCtl, locator bridge.Locator, storeKey []byte) {
|
||||
withBridge(ctx, t, s.GetHostURL(), netCtl, locator, storeKey, func(bridge *bridge.Bridge, mocks *bridge.Mocks) {
|
||||
// Login the user.
|
||||
userID, err := bridge.LoginFull(ctx, username, password, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The user is now connected.
|
||||
require.Equal(t, []string{userID}, bridge.GetUserIDs())
|
||||
require.Equal(t, []string{userID}, getConnectedUserIDs(t, bridge))
|
||||
})
|
||||
|
||||
// Whether to allow the user to be created.
|
||||
var allowUser bool
|
||||
|
||||
s.AddStatusHook(func(req *http.Request) (int, bool) {
|
||||
// Drop any request to the users endpoint.
|
||||
if !allowUser && req.URL.Path == "/core/v4/users" {
|
||||
dropListener.DropAll()
|
||||
}
|
||||
|
||||
// After the ping request, allow the user to be created.
|
||||
if req.URL.Path == "/tests/ping" {
|
||||
allowUser = true
|
||||
}
|
||||
|
||||
return 0, false
|
||||
})
|
||||
|
||||
withBridge(ctx, t, s.GetHostURL(), netCtl, locator, storeKey, func(bridge *bridge.Bridge, mocks *bridge.Mocks) {
|
||||
// The user is eventually connected.
|
||||
require.Eventually(t, func() bool {
|
||||
return len(bridge.GetUserIDs()) == 1 && len(getConnectedUserIDs(t, bridge)) == 1
|
||||
}, 5*time.Second, 100*time.Millisecond)
|
||||
})
|
||||
}, server.WithListener(dropListener))
|
||||
}
|
||||
|
||||
func TestBridge_LoginTwice(t *testing.T) {
|
||||
withEnv(t, func(ctx context.Context, s *server.Server, netCtl *proton.NetCtl, locator bridge.Locator, storeKey []byte) {
|
||||
withBridge(ctx, t, s.GetHostURL(), netCtl, locator, storeKey, func(bridge *bridge.Bridge, mocks *bridge.Mocks) {
|
||||
|
||||
@ -44,6 +44,9 @@ var (
|
||||
|
||||
// DSNSentry client keys to be able to report crashes to Sentry.
|
||||
DSNSentry = ""
|
||||
|
||||
// BuildEnv tags used at build time.
|
||||
BuildEnv = ""
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
76
internal/events/serve.go
Normal file
76
internal/events/serve.go
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package events
|
||||
|
||||
import "fmt"
|
||||
|
||||
type IMAPServerReady struct {
|
||||
eventBase
|
||||
|
||||
Port int
|
||||
}
|
||||
|
||||
func (event IMAPServerReady) String() string {
|
||||
return fmt.Sprintf("IMAPServerReady: Port %d", event.Port)
|
||||
}
|
||||
|
||||
type IMAPServerStopped struct {
|
||||
eventBase
|
||||
}
|
||||
|
||||
func (event IMAPServerStopped) String() string {
|
||||
return "IMAPServerStopped"
|
||||
}
|
||||
|
||||
type IMAPServerError struct {
|
||||
eventBase
|
||||
|
||||
Error error
|
||||
}
|
||||
|
||||
func (event IMAPServerError) String() string {
|
||||
return fmt.Sprintf("IMAPServerError: %v", event.Error)
|
||||
}
|
||||
|
||||
type SMTPServerReady struct {
|
||||
eventBase
|
||||
|
||||
Port int
|
||||
}
|
||||
|
||||
func (event SMTPServerReady) String() string {
|
||||
return fmt.Sprintf("SMTPServerReady: Port %d", event.Port)
|
||||
}
|
||||
|
||||
type SMTPServerStopped struct {
|
||||
eventBase
|
||||
}
|
||||
|
||||
func (event SMTPServerStopped) String() string {
|
||||
return "SMTPServerStopped"
|
||||
}
|
||||
|
||||
type SMTPServerError struct {
|
||||
eventBase
|
||||
|
||||
Error error
|
||||
}
|
||||
|
||||
func (event SMTPServerError) String() string {
|
||||
return fmt.Sprintf("SMTPServerError: %v", event.Error)
|
||||
}
|
||||
@ -156,3 +156,26 @@ type AddressModeChanged struct {
|
||||
func (event AddressModeChanged) String() string {
|
||||
return fmt.Sprintf("AddressModeChanged: UserID: %s, AddressMode: %s", event.UserID, event.AddressMode)
|
||||
}
|
||||
|
||||
// UsedSpaceChanged is emitted when the storage space used by the user has changed.
|
||||
type UsedSpaceChanged struct {
|
||||
eventBase
|
||||
|
||||
UserID string
|
||||
|
||||
UsedSpace int
|
||||
}
|
||||
|
||||
func (event UsedSpaceChanged) String() string {
|
||||
return fmt.Sprintf("UsedSpaceChanged: UserID: %s, UsedSpace: %v", event.UserID, event.UsedSpace)
|
||||
}
|
||||
|
||||
type IMAPLoginFailed struct {
|
||||
eventBase
|
||||
|
||||
Username string
|
||||
}
|
||||
|
||||
func (event IMAPLoginFailed) String() string {
|
||||
return fmt.Sprintf("IMAPLoginFailed: Username: %s", event.Username)
|
||||
}
|
||||
|
||||
@ -21,9 +21,11 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/focus/proto"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/service"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
@ -32,10 +34,10 @@ import (
|
||||
|
||||
// TryRaise tries to raise the application by dialing the focus service.
|
||||
// It returns true if the service is running and the application was told to raise.
|
||||
func TryRaise() bool {
|
||||
func TryRaise(settingsPath string) bool {
|
||||
var raised bool
|
||||
|
||||
if err := withClientConn(context.Background(), func(ctx context.Context, client proto.FocusClient) error {
|
||||
if err := withClientConn(context.Background(), settingsPath, func(ctx context.Context, client proto.FocusClient) error {
|
||||
if _, err := client.Raise(ctx, &emptypb.Empty{}); err != nil {
|
||||
return fmt.Errorf("failed to call client.Raise: %w", err)
|
||||
}
|
||||
@ -53,10 +55,10 @@ func TryRaise() bool {
|
||||
|
||||
// TryVersion tries to determine the version of the running application instance.
|
||||
// It returns the version and true if the version could be determined.
|
||||
func TryVersion() (*semver.Version, bool) {
|
||||
func TryVersion(settingsPath string) (*semver.Version, bool) {
|
||||
var version *semver.Version
|
||||
|
||||
if err := withClientConn(context.Background(), func(ctx context.Context, client proto.FocusClient) error {
|
||||
if err := withClientConn(context.Background(), settingsPath, func(ctx context.Context, client proto.FocusClient) error {
|
||||
raw, err := client.Version(ctx, &emptypb.Empty{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to call client.Version: %w", err)
|
||||
@ -78,10 +80,15 @@ func TryVersion() (*semver.Version, bool) {
|
||||
return version, true
|
||||
}
|
||||
|
||||
func withClientConn(ctx context.Context, fn func(context.Context, proto.FocusClient) error) error {
|
||||
func withClientConn(ctx context.Context, settingsPath string, fn func(context.Context, proto.FocusClient) error) error {
|
||||
var config = service.Config{}
|
||||
err := config.Load(filepath.Join(settingsPath, serverConfigFileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cc, err := grpc.DialContext(
|
||||
ctx,
|
||||
net.JoinHostPort(Host, fmt.Sprint(Port)),
|
||||
net.JoinHostPort(Host, fmt.Sprint(config.Port)),
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
)
|
||||
if err != nil {
|
||||
|
||||
@ -18,19 +18,25 @@
|
||||
package focus
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/locations"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFocus_Raise(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
locations := locations.New(newTestLocationsProvider(tmpDir), "config-name")
|
||||
// Start the focus service.
|
||||
service, err := NewService(semver.MustParse("1.2.3"))
|
||||
service, err := NewService(locations, semver.MustParse("1.2.3"))
|
||||
require.NoError(t, err)
|
||||
|
||||
settingsFolder, err := locations.ProvideSettingsPath()
|
||||
require.NoError(t, err)
|
||||
// Try to dial it, it should succeed.
|
||||
require.True(t, TryRaise())
|
||||
require.True(t, TryRaise(settingsFolder))
|
||||
|
||||
// The service should report a raise call.
|
||||
<-service.GetRaiseCh()
|
||||
@ -39,16 +45,60 @@ func TestFocus_Raise(t *testing.T) {
|
||||
service.Close()
|
||||
|
||||
// Try to dial it, it should fail.
|
||||
require.False(t, TryRaise())
|
||||
require.False(t, TryRaise(settingsFolder))
|
||||
}
|
||||
|
||||
func TestFocus_Version(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
locations := locations.New(newTestLocationsProvider(tmpDir), "config-name")
|
||||
// Start the focus service.
|
||||
_, err := NewService(semver.MustParse("1.2.3"))
|
||||
_, err := NewService(locations, semver.MustParse("1.2.3"))
|
||||
require.NoError(t, err)
|
||||
|
||||
settingsFolder, err := locations.ProvideSettingsPath()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Try to dial it, it should succeed.
|
||||
version, ok := TryVersion()
|
||||
version, ok := TryVersion(settingsFolder)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "1.2.3", version.String())
|
||||
}
|
||||
|
||||
type TestLocationsProvider struct {
|
||||
config, data, cache string
|
||||
}
|
||||
|
||||
func newTestLocationsProvider(dir string) *TestLocationsProvider {
|
||||
config, err := os.MkdirTemp(dir, "config")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
data, err := os.MkdirTemp(dir, "data")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cache, err := os.MkdirTemp(dir, "cache")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &TestLocationsProvider{
|
||||
config: config,
|
||||
data: data,
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
func (provider *TestLocationsProvider) UserConfig() string {
|
||||
return provider.config
|
||||
}
|
||||
|
||||
func (provider *TestLocationsProvider) UserData() string {
|
||||
return provider.data
|
||||
}
|
||||
|
||||
func (provider *TestLocationsProvider) UserCache() string {
|
||||
return provider.cache
|
||||
}
|
||||
|
||||
@ -25,16 +25,16 @@ import (
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/focus/proto"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/service"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// Host is the local host to listen on.
|
||||
const Host = "127.0.0.1"
|
||||
|
||||
// Port is the port to listen on.
|
||||
var Port = 1042 // nolint:gochecknoglobals
|
||||
const (
|
||||
Host = "127.0.0.1"
|
||||
serverConfigFileName = "grpcFocusServerConfig.json"
|
||||
)
|
||||
|
||||
// Service is a gRPC service that can be used to raise the application.
|
||||
type Service struct {
|
||||
@ -47,26 +47,39 @@ type Service struct {
|
||||
|
||||
// NewService creates a new focus service.
|
||||
// It listens on the local host and port 1042 (by default).
|
||||
func NewService(version *semver.Version) (*Service, error) {
|
||||
service := &Service{
|
||||
func NewService(locator service.Locator, version *semver.Version) (*Service, error) {
|
||||
serv := &Service{
|
||||
server: grpc.NewServer(),
|
||||
raiseCh: make(chan struct{}, 1),
|
||||
version: version,
|
||||
}
|
||||
|
||||
proto.RegisterFocusServer(service.server, service)
|
||||
proto.RegisterFocusServer(serv.server, serv)
|
||||
|
||||
if listener, err := net.Listen("tcp", net.JoinHostPort(Host, fmt.Sprint(Port))); err != nil {
|
||||
logrus.WithError(err).Warn("Failed to start focus service")
|
||||
if listener, err := net.Listen("tcp", net.JoinHostPort(Host, fmt.Sprint(0))); err != nil {
|
||||
logrus.WithError(err).Warn("Failed to start focus serv")
|
||||
} else {
|
||||
config := service.Config{}
|
||||
// retrieve the port assigned by the system, so that we can put it in the config file.
|
||||
address, ok := listener.Addr().(*net.TCPAddr)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("could not retrieve gRPC service listener address")
|
||||
}
|
||||
config.Port = address.Port
|
||||
if path, err := service.SaveGRPCServerConfigFile(locator, &config, serverConfigFileName); err != nil {
|
||||
logrus.WithError(err).WithField("path", path).Warn("Could not write focus gRPC service config file")
|
||||
} else {
|
||||
logrus.WithField("path", path).Info("Successfully saved gRPC Focus service config file")
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := service.server.Serve(listener); err != nil {
|
||||
if err := serv.server.Serve(listener); err != nil {
|
||||
fmt.Printf("failed to serve: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
return service, nil
|
||||
return serv, nil
|
||||
}
|
||||
|
||||
// Raise implements the gRPC FocusService interface; it raises the application.
|
||||
|
||||
2
internal/frontend/.gitignore
vendored
2
internal/frontend/.gitignore
vendored
@ -10,5 +10,5 @@ rcc_cgo_*.go
|
||||
*.qmlc
|
||||
|
||||
# Generated file
|
||||
bridge-gui/bridge-gui/Version.h
|
||||
bridge-gui/bridge-gui/BuildConfig.h
|
||||
bridge-gui/bridge-gui/Resources.rc
|
||||
|
||||
@ -52,6 +52,8 @@ UsersTab::UsersTab(QWidget *parent)
|
||||
connect(ui_.tableUserList, &QTableView::doubleClicked, this, &UsersTab::onEditUserButton);
|
||||
connect(ui_.buttonRemoveUser, &QPushButton::clicked, this, &UsersTab::onRemoveUserButton);
|
||||
connect(ui_.buttonUserBadEvent, &QPushButton::clicked, this, &UsersTab::onSendUserBadEvent);
|
||||
connect(ui_.buttonImapLoginFailed, &QPushButton::clicked, this, &UsersTab::onSendIMAPLoginFailedEvent);
|
||||
connect(ui_.buttonUsedBytesChanged, &QPushButton::clicked, this, &UsersTab::onSendUsedBytesChangedEvent);
|
||||
connect(ui_.checkUsernamePasswordError, &QCheckBox::toggled, this, &UsersTab::updateGUIState);
|
||||
|
||||
users_.append(randomUser());
|
||||
@ -158,16 +160,66 @@ void UsersTab::onSendUserBadEvent() {
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
//
|
||||
//****************************************************************************************************************************************************
|
||||
void UsersTab::onSendUsedBytesChangedEvent() {
|
||||
SPUser const user = selectedUser();
|
||||
int const index = this->selectedIndex();
|
||||
|
||||
if (!user) {
|
||||
app().log().error(QString("%1 failed. Unkown user.").arg(__FUNCTION__));
|
||||
return;
|
||||
}
|
||||
|
||||
if (UserState::Connected != user->state()) {
|
||||
app().log().error(QString("%1 failed. User is not connected").arg(__FUNCTION__));
|
||||
}
|
||||
|
||||
qint64 const usedBytes = qint64(ui_.spinUsedBytes->value());
|
||||
user->setUsedBytes(usedBytes);
|
||||
users_.touch(index);
|
||||
|
||||
GRPCService &grpc = app().grpc();
|
||||
if (grpc.isStreaming()) {
|
||||
QString const userID = user->id();
|
||||
grpc.sendEvent(newUsedBytesChangedEvent(userID, usedBytes));
|
||||
}
|
||||
|
||||
this->updateGUIState();
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
//
|
||||
//****************************************************************************************************************************************************
|
||||
void UsersTab::onSendIMAPLoginFailedEvent() {
|
||||
GRPCService &grpc = app().grpc();
|
||||
if (grpc.isStreaming()) {
|
||||
grpc.sendEvent(newIMAPLoginFailedEvent(ui_.editIMAPLoginFailedUsername->text()));
|
||||
}
|
||||
|
||||
this->updateGUIState();
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
//
|
||||
//****************************************************************************************************************************************************
|
||||
void UsersTab::updateGUIState() {
|
||||
SPUser const user = selectedUser();
|
||||
bool const hasSelectedUser = user.get();
|
||||
UserState const state = user ? user->state() : UserState::SignedOut;
|
||||
|
||||
ui_.buttonEditUser->setEnabled(hasSelectedUser);
|
||||
ui_.buttonRemoveUser->setEnabled(hasSelectedUser);
|
||||
ui_.groupBoxBadEvent->setEnabled(hasSelectedUser && (UserState::SignedOut != user->state()));
|
||||
ui_.groupBoxBadEvent->setEnabled(hasSelectedUser && (UserState::SignedOut != state));
|
||||
ui_.groupBoxUsedSpace->setEnabled(hasSelectedUser && (UserState::Connected == state));
|
||||
ui_.editUsernamePasswordError->setEnabled(ui_.checkUsernamePasswordError->isChecked());
|
||||
ui_.spinUsedBytes->setValue(user ? user->usedBytes() : 0.0);
|
||||
|
||||
if (user)
|
||||
ui_.editIMAPLoginFailedUsername->setText(user->primaryEmailOrUsername());
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -61,6 +61,8 @@ private slots:
|
||||
void onRemoveUserButton(); ///< Remove the currently selected user.
|
||||
void onSelectionChanged(QItemSelection, QItemSelection); ///< Slot for the change of the selection.
|
||||
void onSendUserBadEvent(); ///< Slot for the 'Send Bad Event Error' button.
|
||||
void onSendUsedBytesChangedEvent(); ///< Slot for the 'Send Used Bytes Changed Event' button.
|
||||
void onSendIMAPLoginFailedEvent(); ///< Slot for the 'Send IMAP Login failure Event' button.
|
||||
void updateGUIState(); ///< Update the GUI state.
|
||||
|
||||
private: // member functions.
|
||||
|
||||
@ -80,13 +80,6 @@
|
||||
<layout class="QVBoxLayout" name="verticalLayout_3">
|
||||
<item>
|
||||
<layout class="QHBoxLayout" name="horizontalLayout_3">
|
||||
<item>
|
||||
<widget class="QLabel" name="labelUserBadEvent">
|
||||
<property name="text">
|
||||
<string>Message: </string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QLineEdit" name="editUserBadEvent">
|
||||
<property name="minimumSize">
|
||||
@ -96,18 +89,102 @@
|
||||
</size>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string>Bad event error.</string>
|
||||
<string/>
|
||||
</property>
|
||||
<property name="placeholderText">
|
||||
<string>error message</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="buttonUserBadEvent">
|
||||
<property name="text">
|
||||
<string>Send</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QGroupBox" name="groupBoxUsedSpace">
|
||||
<property name="minimumSize">
|
||||
<size>
|
||||
<width>0</width>
|
||||
<height>0</height>
|
||||
</size>
|
||||
</property>
|
||||
<property name="title">
|
||||
<string>Used Bytes Changed</string>
|
||||
</property>
|
||||
<layout class="QVBoxLayout" name="verticalLayout_4">
|
||||
<item>
|
||||
<widget class="QPushButton" name="buttonUserBadEvent">
|
||||
<property name="text">
|
||||
<string>Send Bad Event Error</string>
|
||||
</property>
|
||||
</widget>
|
||||
<layout class="QHBoxLayout" name="hBoxUsedBytes" stretch="1,0">
|
||||
<item>
|
||||
<widget class="QDoubleSpinBox" name="spinUsedBytes">
|
||||
<property name="buttonSymbols">
|
||||
<enum>QAbstractSpinBox::NoButtons</enum>
|
||||
</property>
|
||||
<property name="decimals">
|
||||
<number>0</number>
|
||||
</property>
|
||||
<property name="maximum">
|
||||
<double>1000000000000000.000000000000000</double>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="buttonUsedBytesChanged">
|
||||
<property name="text">
|
||||
<string>Send</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QGroupBox" name="groupBoxIMAPLoginFailed">
|
||||
<property name="minimumSize">
|
||||
<size>
|
||||
<width>0</width>
|
||||
<height>0</height>
|
||||
</size>
|
||||
</property>
|
||||
<property name="title">
|
||||
<string>IMAP Login Failure</string>
|
||||
</property>
|
||||
<layout class="QVBoxLayout" name="verticalLayout_8">
|
||||
<item>
|
||||
<layout class="QHBoxLayout" name="horizontalLayout_7">
|
||||
<item>
|
||||
<widget class="QLineEdit" name="editIMAPLoginFailedUsername">
|
||||
<property name="minimumSize">
|
||||
<size>
|
||||
<width>200</width>
|
||||
<height>0</height>
|
||||
</size>
|
||||
</property>
|
||||
<property name="text">
|
||||
<string/>
|
||||
</property>
|
||||
<property name="placeholderText">
|
||||
<string>username or primary email</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
<item>
|
||||
<widget class="QPushButton" name="buttonImapLoginFailed">
|
||||
<property name="text">
|
||||
<string>Send</string>
|
||||
</property>
|
||||
</widget>
|
||||
</item>
|
||||
</layout>
|
||||
</item>
|
||||
</layout>
|
||||
</widget>
|
||||
|
||||
@ -24,5 +24,7 @@
|
||||
#define PROJECT_VER "@BRIDGE_APP_VERSION@"
|
||||
#define PROJECT_REVISION "@BRIDGE_REVISION@"
|
||||
#define PROJECT_BUILD_TIME "@BRIDGE_BUILD_TIME@"
|
||||
#define PROJECT_DSN_SENTRY "@BRIDGE_DSN_SENTRY@"
|
||||
#define PROJECT_BUILD_ENV "@BRIDGE_BUILD_ENV@"
|
||||
|
||||
#endif // BRIDGE_GUI_VERSION_H
|
||||
@ -85,20 +85,12 @@ message(STATUS "Using Qt ${Qt6_VERSION}")
|
||||
#*****************************************************************************************************************************************************
|
||||
find_package(sentry CONFIG REQUIRED)
|
||||
|
||||
set(DSN_SENTRY "https://ea31dfe8574849108fb8ba044fec3620@api.protonmail.ch/core/v4/reports/sentry/7")
|
||||
set(SENTRY_CONFIG_GENERATED_FILE_DIR ${CMAKE_CURRENT_BINARY_DIR}/sentry-generated)
|
||||
set(SENTRY_CONFIG_FILE ${SENTRY_CONFIG_GENERATED_FILE_DIR}/project_sentry_config.h)
|
||||
file(GENERATE OUTPUT ${SENTRY_CONFIG_FILE} CONTENT
|
||||
"// AUTO GENERATED FILE, DO NOT MODIFY\n#pragma once\nconst char* SentryDNS=\"${DSN_SENTRY}\";\nconst char* SentryProductID=\"bridge-mail@${BRIDGE_APP_VERSION}\";\n"
|
||||
)
|
||||
|
||||
|
||||
|
||||
#*****************************************************************************************************************************************************
|
||||
# Source files and output
|
||||
#*****************************************************************************************************************************************************
|
||||
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/Version.h.in ${CMAKE_CURRENT_SOURCE_DIR}/Version.h)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/BuildConfig.h.in ${CMAKE_CURRENT_SOURCE_DIR}/BuildConfig.h)
|
||||
|
||||
if (NOT TARGET bridgepp)
|
||||
add_subdirectory(../bridgepp bridgepp)
|
||||
@ -122,7 +114,7 @@ add_executable(bridge-gui
|
||||
EventStreamWorker.cpp EventStreamWorker.h
|
||||
main.cpp
|
||||
Pch.h
|
||||
Version.h
|
||||
BuildConfig.h
|
||||
QMLBackend.cpp QMLBackend.h
|
||||
UserList.cpp UserList.h
|
||||
SentryUtils.cpp SentryUtils.h
|
||||
|
||||
@ -18,7 +18,7 @@
|
||||
|
||||
#include "QMLBackend.h"
|
||||
#include "EventStreamWorker.h"
|
||||
#include "Version.h"
|
||||
#include "BuildConfig.h"
|
||||
#include <bridgepp/GRPC/GRPCClient.h>
|
||||
#include <bridgepp/Exception/Exception.h>
|
||||
#include <bridgepp/Worker/Overseer.h>
|
||||
@ -467,6 +467,7 @@ bool QMLBackend::isDoHEnabled() const {
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \return The value for the 'isAutomaticUpdateOn' property.
|
||||
//****************************************************************************************************************************************************
|
||||
@ -875,8 +876,9 @@ void QMLBackend::onLoginAlreadyLoggedIn(QString const &userID) {
|
||||
void QMLBackend::onUserBadEvent(QString const &userID, QString const &errorMessage) {
|
||||
HANDLE_EXCEPTION(
|
||||
SPUser const user = users_->getUserWithID(userID);
|
||||
if (!user)
|
||||
if (!user) {
|
||||
app().log().error(QString("Received bad event for unknown user %1").arg(user->id()));
|
||||
}
|
||||
user->setState(UserState::SignedOut);
|
||||
emit userBadEvent(
|
||||
tr("Internal error: %1 was automatically logged out. Please log in again or report this problem if the issue persists.").arg(user->primaryEmailOrUsername()),
|
||||
@ -888,6 +890,24 @@ void QMLBackend::onUserBadEvent(QString const &userID, QString const &errorMessa
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] username The username (or primary email address)
|
||||
//****************************************************************************************************************************************************
|
||||
void QMLBackend::onIMAPLoginFailed(QString const &username) {
|
||||
HANDLE_EXCEPTION(
|
||||
SPUser const user = users_->getUserWithUsernameOrEmail(username);
|
||||
if ((!user) || (user->state() != UserState::SignedOut)) { // We want to pop-up only if a signed-out user has been detected
|
||||
return;
|
||||
}
|
||||
if (user->isInIMAPLoginFailureCooldown())
|
||||
return;
|
||||
user->startImapLoginFailureCooldown(60 * 60 * 1000); // 1 hour cooldown during which we will not display this notification to this user again.
|
||||
emit selectUser(user->id());
|
||||
emit imapLoginWhileSignedOut(username);
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
//
|
||||
//****************************************************************************************************************************************************
|
||||
@ -996,5 +1016,7 @@ void QMLBackend::connectGrpcEvents() {
|
||||
// user events
|
||||
connect(client, &GRPCClient::userDisconnected, this, &QMLBackend::userDisconnected);
|
||||
connect(client, &GRPCClient::userBadEvent, this, &QMLBackend::onUserBadEvent);
|
||||
connect(client, &GRPCClient::imapLoginFailed, this, &QMLBackend::onIMAPLoginFailed);
|
||||
|
||||
users_->connectGRPCEvents();
|
||||
}
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
|
||||
|
||||
#include "MacOS/DockIcon.h"
|
||||
#include "Version.h"
|
||||
#include "BuildConfig.h"
|
||||
#include "UserList.h"
|
||||
#include <bridgepp/GRPC/GRPCClient.h>
|
||||
#include <bridgepp/GRPC/GRPCUtils.h>
|
||||
@ -180,6 +180,7 @@ public slots: // slot for signals received from gRPC that need transformation in
|
||||
void onLoginFinished(QString const &userID, bool wasSignedOut); ///< Slot for LoginFinished gRPC event.
|
||||
void onLoginAlreadyLoggedIn(QString const &userID); ///< Slot for the LoginAlreadyLoggedIn gRPC event.
|
||||
void onUserBadEvent(QString const& userID, QString const& errorMessage); ///< Slot for the userBadEvent gRPC event.
|
||||
void onIMAPLoginFailed(QString const& username); ///< Slot the the imapLoginFailed event.
|
||||
|
||||
signals: // Signals received from the Go backend, to be forwarded to QML
|
||||
void toggleAutostartFinished(); ///< Signal for the 'toggleAutostartFinished' gRPC stream event.
|
||||
@ -233,6 +234,7 @@ signals: // Signals received from the Go backend, to be forwarded to QML
|
||||
void hideMainWindow(); ///< Signal for the 'hideMainWindow' gRPC stream event.
|
||||
void genericError(QString const &title, QString const &description); ///< Signal for the 'genericError' gRPC stream event.
|
||||
void selectUser(QString const); ///< Signal that request the given user account to be displayed.
|
||||
void imapLoginWhileSignedOut(QString const& username); ///< Signal for the notification of IMAP login attempt on a signed out account.
|
||||
|
||||
// This signal is emitted when an exception is intercepted is calls triggered by QML. QML engine would intercept the exception otherwise.
|
||||
void fatalError(QString const &function, QString const &message) const; ///< Signal emitted when an fatal error occurs.
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
#include "SentryUtils.h"
|
||||
#include "Version.h"
|
||||
#include "BuildConfig.h"
|
||||
#include <bridgepp/BridgeUtils.h>
|
||||
|
||||
#include <QByteArray>
|
||||
@ -31,13 +31,39 @@ QByteArray getProtectedHostname() {
|
||||
return hostname.toHex();
|
||||
}
|
||||
|
||||
QString getApiOS() {
|
||||
#if defined(Q_OS_DARWIN)
|
||||
return "macos";
|
||||
#elif defined(Q_OS_WINDOWS)
|
||||
return "windows";
|
||||
#else
|
||||
return "linux";
|
||||
#endif
|
||||
}
|
||||
|
||||
QString appVersion(const QString& version) {
|
||||
return QString("%1-bridge@%2").arg(getApiOS()).arg(version);
|
||||
}
|
||||
|
||||
void setSentryReportScope() {
|
||||
sentry_set_tag("OS", bridgepp::goos().toUtf8());
|
||||
sentry_set_tag("Client", PROJECT_FULL_NAME);
|
||||
sentry_set_tag("Version", PROJECT_VER);
|
||||
sentry_set_tag("UserAgent", QString("/ (%1)").arg(bridgepp::goos()).toUtf8());
|
||||
sentry_set_tag("HostArch", QSysInfo::currentCpuArchitecture().toUtf8());
|
||||
sentry_set_tag("server_name", getProtectedHostname());
|
||||
sentry_set_tag("Version", QByteArray(PROJECT_REVISION).toHex());
|
||||
sentry_set_tag("HostArch", QSysInfo::currentCpuArchitecture().toUtf8());
|
||||
sentry_set_tag("server_name", getProtectedHostname());
|
||||
}
|
||||
|
||||
sentry_options_t* newSentryOptions(const char *sentryDNS, const char *cacheDir) {
|
||||
sentry_options_t *sentryOptions = sentry_options_new();
|
||||
sentry_options_set_dsn(sentryOptions, sentryDNS);
|
||||
sentry_options_set_database_path(sentryOptions, cacheDir);
|
||||
sentry_options_set_release(sentryOptions, appVersion(PROJECT_VER).toUtf8());
|
||||
sentry_options_set_max_breadcrumbs(sentryOptions, 50);
|
||||
sentry_options_set_environment(sentryOptions, PROJECT_BUILD_ENV);
|
||||
// Enable this for debugging sentry.
|
||||
// sentry_options_set_debug(sentryOptions, 1);
|
||||
|
||||
return sentryOptions;
|
||||
}
|
||||
|
||||
sentry_uuid_t reportSentryEvent(sentry_level_t level, const char *message) {
|
||||
@ -51,3 +77,5 @@ sentry_uuid_t reportSentryException(sentry_level_t level, const char *message, c
|
||||
sentry_event_add_exception(event, sentry_value_new_exception(exceptionType, exception));
|
||||
return sentry_capture_event(event);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -22,6 +22,7 @@
|
||||
#include <sentry.h>
|
||||
|
||||
void setSentryReportScope();
|
||||
sentry_options_t* newSentryOptions(const char * sentryDNS, const char * cacheDir);
|
||||
sentry_uuid_t reportSentryEvent(sentry_level_t level, const char *message);
|
||||
sentry_uuid_t reportSentryException(sentry_level_t level, const char *message, const char *exceptionType, const char *exception);
|
||||
|
||||
|
||||
@ -38,6 +38,7 @@ void UserList::connectGRPCEvents() const {
|
||||
GRPCClient &client = app().grpc();
|
||||
connect(&client, &GRPCClient::userChanged, this, &UserList::onUserChanged);
|
||||
connect(&client, &GRPCClient::toggleSplitModeFinished, this, &UserList::onToggleSplitModeFinished);
|
||||
connect(&client, &GRPCClient::usedBytesChanged, this, &UserList::onUsedBytesChanged);
|
||||
}
|
||||
|
||||
|
||||
@ -148,6 +149,19 @@ bridgepp::SPUser UserList::getUserWithID(QString const &userID) const {
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] username The username or email.
|
||||
/// \return The user with the given ID.
|
||||
/// \return A null pointer if the user could not be found.
|
||||
//****************************************************************************************************************************************************
|
||||
bridgepp::SPUser UserList::getUserWithUsernameOrEmail(QString const &username) const {
|
||||
QList<SPUser>::const_iterator it = std::find_if(users_.begin(), users_.end(), [username](SPUser const &user) -> bool {
|
||||
return user && ((username.compare(user->username(), Qt::CaseInsensitive) == 0) || user->addresses().contains(username, Qt::CaseInsensitive));
|
||||
});
|
||||
return (it == users_.end()) ? nullptr : *it;
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] row The row.
|
||||
//****************************************************************************************************************************************************
|
||||
@ -223,3 +237,17 @@ void UserList::onToggleSplitModeFinished(QString const &userID) {
|
||||
int UserList::count() const {
|
||||
return users_.size();
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] userID The userID.
|
||||
/// \param[in] usedBytes The used space, in bytes.
|
||||
//****************************************************************************************************************************************************
|
||||
void UserList::onUsedBytesChanged(QString const &userID, qint64 usedBytes) {
|
||||
int const index = this->rowOfUserID(userID);
|
||||
if (index < 0) {
|
||||
app().log().error(QString("Received usedBytesChanged event for unknown userID %1").arg(userID));
|
||||
return;
|
||||
}
|
||||
users_[index]->setUsedBytes(usedBytes);
|
||||
}
|
||||
|
||||
@ -44,6 +44,7 @@ public: // member functions.
|
||||
void appendUser(bridgepp::SPUser const &user); ///< Add a new user.
|
||||
void updateUserAtRow(int row, bridgepp::User const &user); ///< Update the user at given row.
|
||||
bridgepp::SPUser getUserWithID(QString const &userID) const; ///< Retrieve the user with the given ID.
|
||||
bridgepp::SPUser getUserWithUsernameOrEmail(QString const& username) const; ///< Retrieve the user with the given primary email address or username
|
||||
|
||||
// the userCount property.
|
||||
Q_PROPERTY(int count READ count NOTIFY countChanged)
|
||||
@ -59,6 +60,7 @@ public:
|
||||
public slots: ///< handler for signals coming from the gRPC service
|
||||
void onUserChanged(QString const &userID);
|
||||
void onToggleSplitModeFinished(QString const &userID);
|
||||
void onUsedBytesChanged(QString const &userID, qint64 usedBytes); ///< Slot for usedBytesChanged events.
|
||||
|
||||
private: // data members
|
||||
QList<bridgepp::SPUser> users_; ///< The user list.
|
||||
|
||||
@ -76,6 +76,15 @@ function check_exit() {
|
||||
Write-host "Running build for version $bridgeVersion - $buildConfig in $buildDir"
|
||||
|
||||
$REVISION_HASH = git rev-parse --short=10 HEAD
|
||||
$bridgeDsnSentry = ($env:BRIDGE_DSN_SENTRY)
|
||||
$bridgeBuidTime = ($env:BRIDGE_BUILD_TIME)
|
||||
|
||||
$bridgeBuildEnv = ($env:BRIDGE_BUILD_ENV)
|
||||
if ($null -eq $bridgeBuildEnv)
|
||||
{
|
||||
$bridgeBuildEnv = "dev"
|
||||
}
|
||||
|
||||
git submodule update --init --recursive $vcpkgRoot
|
||||
. $vcpkgBootstrap -disableMetrics
|
||||
. $vcpkgExe install sentry-native:x64-windows grpc:x64-windows --clean-after-build
|
||||
@ -85,6 +94,9 @@ git submodule update --init --recursive $vcpkgRoot
|
||||
-DBRIDGE_VENDOR="$bridgeVendor" `
|
||||
-DBRIDGE_REVISION=$REVISION_HASH `
|
||||
-DBRIDGE_APP_VERSION="$bridgeVersion" `
|
||||
-DBRIDGE_BUILD_TIME="$bridgeBuidTime" `
|
||||
-DBRIDGE_DSN_SENTRY="$bridgeDsnSentry" `
|
||||
-DBRIDGE_BUILD_ENV="$bridgeBuildEnv" `
|
||||
-S . -B $buildDir
|
||||
|
||||
check_exit "CMake failed"
|
||||
|
||||
@ -56,6 +56,9 @@ BUILD_CONFIG=${BRIDGE_GUI_BUILD_CONFIG:-Debug}
|
||||
BUILD_DIR=$(echo "./cmake-build-${BUILD_CONFIG}" | tr '[:upper:]' '[:lower:]')
|
||||
VCPKG_ROOT="${BRIDGE_REPO_ROOT}/extern/vcpkg"
|
||||
BRIDGE_REVISION=$(git rev-parse --short=10 HEAD)
|
||||
BRIDGE_DSN_SENTRY=${BRIDGE_DSN_SENTRY}
|
||||
BRIDGE_BUILD_TIME=${BRIDGE_BUILD_TIME}
|
||||
BRIDGE_BUILD_ENV= ${BRIDGE_BUILD_ENV:-"dev"}
|
||||
git submodule update --init --recursive ${VCPKG_ROOT}
|
||||
check_exit "Failed to initialize vcpkg as a submodule."
|
||||
|
||||
@ -94,6 +97,9 @@ cmake \
|
||||
-DBRIDGE_APP_FULL_NAME="${BRIDGE_APP_FULL_NAME}" \
|
||||
-DBRIDGE_VENDOR="${BRIDGE_VENDOR}" \
|
||||
-DBRIDGE_REVISION="${BRIDGE_REVISION}" \
|
||||
-DBRIDGE_DSN_SENTRY="${BRIDGE_DSN_SENTRY}" \
|
||||
-DBRIDGE_BRIDGE_TIME="${BRIDGE_BRIDGE_TIME}" \
|
||||
-DBRIDGE_BUILD_ENV="${BRIDGE_BUILD_ENV}" \
|
||||
-DBRIDGE_APP_VERSION="${BRIDGE_APP_VERSION}" "${BRIDGE_CMAKE_MACOS_OPTS}" \
|
||||
-G Ninja \
|
||||
-S . \
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
#include "CommandLine.h"
|
||||
#include "QMLBackend.h"
|
||||
#include "SentryUtils.h"
|
||||
#include "Version.h"
|
||||
#include "BuildConfig.h"
|
||||
#include <bridgepp/BridgeUtils.h>
|
||||
#include <bridgepp/Exception/Exception.h>
|
||||
#include <bridgepp/FocusGRPC/FocusGRPCClient.h>
|
||||
@ -29,7 +29,6 @@
|
||||
#include <bridgepp/ProcessMonitor.h>
|
||||
#include <sentry.h>
|
||||
#include <SentryUtils.h>
|
||||
#include <project_sentry_config.h>
|
||||
|
||||
|
||||
#ifdef Q_OS_MACOS
|
||||
@ -229,8 +228,21 @@ bool isBridgeRunning() {
|
||||
void focusOtherInstance() {
|
||||
try {
|
||||
FocusGRPCClient client;
|
||||
GRPCConfig sc;
|
||||
QString const path = FocusGRPCClient::grpcFocusServerConfigPath();
|
||||
QFile file(path);
|
||||
if (file.exists()) {
|
||||
if (!sc.load(path)) {
|
||||
throw Exception("The gRPC focus service configuration file is invalid.");
|
||||
}
|
||||
}
|
||||
else {
|
||||
throw Exception("Server did not provide gRPC Focus service configuration.");
|
||||
}
|
||||
|
||||
|
||||
QString error;
|
||||
if (!client.connectToServer(5000, &error)) {
|
||||
if (!client.connectToServer(5000, sc.port, &error)) {
|
||||
throw Exception(QString("Could not connect to bridge focus service for a raise call: %1").arg(error));
|
||||
}
|
||||
if (!client.raise().ok()) {
|
||||
@ -292,15 +304,8 @@ void closeBridgeApp() {
|
||||
//****************************************************************************************************************************************************
|
||||
int main(int argc, char *argv[]) {
|
||||
// Init sentry.
|
||||
sentry_options_t *sentryOptions = sentry_options_new();
|
||||
sentry_options_set_dsn(sentryOptions, SentryDNS);
|
||||
{
|
||||
const QString sentryCachePath = sentryCacheDir();
|
||||
sentry_options_set_database_path(sentryOptions, sentryCachePath.toStdString().c_str());
|
||||
}
|
||||
sentry_options_set_release(sentryOptions, QByteArray(PROJECT_REVISION).toHex());
|
||||
// Enable this for debugging sentry.
|
||||
// sentry_options_set_debug(sentryOptions, 1);
|
||||
sentry_options_t *sentryOptions = newSentryOptions(PROJECT_DSN_SENTRY, sentryCacheDir().toStdString().c_str());
|
||||
|
||||
if (sentry_init(sentryOptions) != 0) {
|
||||
std::cerr << "Failed to initialize sentry" << std::endl;
|
||||
}
|
||||
@ -344,6 +349,7 @@ int main(int argc, char *argv[]) {
|
||||
}
|
||||
|
||||
// before launching bridge, we remove any trailing service config file, because we need to make sure we get a newly generated one.
|
||||
FocusGRPCClient::removeServiceConfigFile();
|
||||
GRPCClient::removeServiceConfigFile();
|
||||
launchBridge(cliOptions.bridgeArgs);
|
||||
}
|
||||
|
||||
@ -81,6 +81,7 @@ QtObject {
|
||||
root.apiCertIssue,
|
||||
root.noActiveKeyForRecipient,
|
||||
root.userBadEvent,
|
||||
root.imapLoginWhileSignedOut,
|
||||
root.genericError
|
||||
]
|
||||
|
||||
@ -1147,6 +1148,34 @@ QtObject {
|
||||
|
||||
}
|
||||
|
||||
property Notification imapLoginWhileSignedOut: Notification {
|
||||
title: qsTr("IMAP Login failed")
|
||||
brief: title
|
||||
description: "#PlaceHolderText"
|
||||
icon: "./icons/ic-exclamation-circle-filled.svg"
|
||||
type: Notification.NotificationType.Danger
|
||||
group: Notifications.Group.Connection
|
||||
|
||||
Connections {
|
||||
target: Backend
|
||||
function onImapLoginWhileSignedOut(username) {
|
||||
root.imapLoginWhileSignedOut.description = qsTr("An email client tried to connect to the account %1, but this account is signed " +
|
||||
"out. Please sign-in to continue.").arg(username)
|
||||
root.imapLoginWhileSignedOut.active = true
|
||||
}
|
||||
}
|
||||
|
||||
action: [
|
||||
Action {
|
||||
text: qsTr("OK")
|
||||
|
||||
onTriggered: {
|
||||
root.imapLoginWhileSignedOut.active = false
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
property Notification genericError: Notification {
|
||||
title: "#PlaceholderText#"
|
||||
description: "#PlaceholderText#"
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
|
||||
|
||||
#include "FocusGRPCClient.h"
|
||||
#include "../BridgeUtils.h"
|
||||
#include "../Exception/Exception.h"
|
||||
|
||||
|
||||
@ -29,7 +30,6 @@ namespace {
|
||||
|
||||
|
||||
Empty empty; ///< Empty protobuf message, re-used across calls.
|
||||
qint64 const port = 1042; ///< The port for the focus service.
|
||||
QString const hostname = "127.0.0.1"; ///< The hostname of the focus service.
|
||||
|
||||
|
||||
@ -39,12 +39,43 @@ QString const hostname = "127.0.0.1"; ///< The hostname of the focus service.
|
||||
namespace bridgepp {
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \return the gRPC Focus server config file name
|
||||
//****************************************************************************************************************************************************
|
||||
QString grpcFocusServerConfigFilename() {
|
||||
return "grpcFocusServerConfig.json";
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \return The absolute path of the focus service config path.
|
||||
//****************************************************************************************************************************************************
|
||||
QString FocusGRPCClient::grpcFocusServerConfigPath() {
|
||||
return QDir(userConfigDir()).absoluteFilePath(grpcFocusServerConfigFilename());
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
//
|
||||
//****************************************************************************************************************************************************
|
||||
void FocusGRPCClient::removeServiceConfigFile() {
|
||||
QString const path = grpcFocusServerConfigPath();
|
||||
if (!QFile(path).exists()) {
|
||||
return;
|
||||
}
|
||||
if (!QFile().remove(path)) {
|
||||
throw Exception("Could not remove gRPC focus service config file.");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] timeoutMs The timeout for the connexion.
|
||||
/// \param[in] port The gRPC server port.
|
||||
/// \param[out] outError if not null and the function returns false.
|
||||
/// \return true iff the connexion was successfully established.
|
||||
//****************************************************************************************************************************************************
|
||||
bool FocusGRPCClient::connectToServer(qint64 timeoutMs, QString *outError) {
|
||||
bool FocusGRPCClient::connectToServer(qint64 timeoutMs, quint16 port, QString *outError) {
|
||||
try {
|
||||
QString const address = QString("%1:%2").arg(hostname).arg(port);
|
||||
channel_ = grpc::CreateChannel(address.toStdString(), grpc::InsecureChannelCredentials());
|
||||
|
||||
@ -31,6 +31,9 @@ namespace bridgepp {
|
||||
/// \brief Focus GRPC client class
|
||||
//**********************************************************************************************************************
|
||||
class FocusGRPCClient {
|
||||
public: // static member functions
|
||||
static void removeServiceConfigFile(); ///< Delete the service config file.
|
||||
static QString grpcFocusServerConfigPath(); ///< Return the path of the gRPC Focus server config file.
|
||||
public: // member functions.
|
||||
FocusGRPCClient() = default; ///< Default constructor.
|
||||
FocusGRPCClient(FocusGRPCClient const &) = delete; ///< Disabled copy-constructor.
|
||||
@ -38,7 +41,7 @@ public: // member functions.
|
||||
~FocusGRPCClient() = default; ///< Destructor.
|
||||
FocusGRPCClient &operator=(FocusGRPCClient const &) = delete; ///< Disabled assignment operator.
|
||||
FocusGRPCClient &operator=(FocusGRPCClient &&) = delete; ///< Disabled move assignment operator.
|
||||
bool connectToServer(qint64 timeoutMs, QString *outError = nullptr); ///< Connect to the focus server
|
||||
bool connectToServer(qint64 timeoutMs, quint16 port, QString *outError = nullptr); ///< Connect to the focus server
|
||||
|
||||
grpc::Status raise(); ///< Performs the 'raise' call.
|
||||
grpc::Status version(QString &outVersion); ///< Performs the 'version' call.
|
||||
|
||||
@ -574,6 +574,33 @@ SPStreamEvent newUserBadEvent(QString const &userID, QString const &errorMessage
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] userID The userID.
|
||||
/// \param[in] usedBytes The number of used bytes.
|
||||
//****************************************************************************************************************************************************
|
||||
SPStreamEvent newUsedBytesChangedEvent(QString const &userID, qint64 usedBytes) {
|
||||
auto event = new grpc::UsedBytesChangedEvent;
|
||||
event->set_userid(userID.toStdString());
|
||||
event->set_usedbytes(usedBytes);
|
||||
auto userEvent = new grpc::UserEvent;
|
||||
userEvent->set_allocated_usedbyteschangedevent(event);
|
||||
return wrapUserEvent(userEvent);
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] username The username that was provided for the failed IMAP login attempt.
|
||||
/// \return The event.
|
||||
//****************************************************************************************************************************************************
|
||||
SPStreamEvent newIMAPLoginFailedEvent(QString const &username) {
|
||||
auto event = new grpc::ImapLoginFailedEvent;
|
||||
event->set_username(username.toStdString());
|
||||
auto userEvent = new grpc::UserEvent;
|
||||
userEvent->set_allocated_imaploginfailedevent(event);
|
||||
return wrapUserEvent(userEvent);
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] errorCode The error errorCode.
|
||||
/// \return The event.
|
||||
|
||||
@ -78,6 +78,8 @@ SPStreamEvent newToggleSplitModeFinishedEvent(QString const &userID); ///< Creat
|
||||
SPStreamEvent newUserDisconnectedEvent(QString const &username); ///< Create a new UserDisconnectedEvent event.
|
||||
SPStreamEvent newUserChangedEvent(QString const &userID); ///< Create a new UserChangedEvent event.
|
||||
SPStreamEvent newUserBadEvent(QString const &userID, QString const& errorMessage); ///< Create a new UserBadEvent event.
|
||||
SPStreamEvent newUsedBytesChangedEvent(QString const &userID, qint64 usedBytes); ///< Create a new UsedBytesChangedEvent event.
|
||||
SPStreamEvent newIMAPLoginFailedEvent(QString const &username); ///< Create a new ImapLoginFailedEvent event.
|
||||
|
||||
// Generic error event
|
||||
SPStreamEvent newGenericErrorEvent(grpc::ErrorCode errorCode); ///< Create a new GenericErrrorEvent event.
|
||||
|
||||
@ -1374,6 +1374,21 @@ void GRPCClient::processUserEvent(UserEvent const &event) {
|
||||
emit userBadEvent(userID, errorMessage);
|
||||
break;
|
||||
}
|
||||
case UserEvent::kUsedBytesChangedEvent: {
|
||||
UsedBytesChangedEvent const& e = event.usedbyteschangedevent();
|
||||
QString const userID = QString::fromStdString(e.userid());
|
||||
qint64 const usedBytes = e.usedbytes();
|
||||
this->logTrace(QString("User event received: UsedBytesChangedEvent (userID = %1, usedBytes = %2).").arg(userID).arg(usedBytes));
|
||||
emit usedBytesChanged(userID, usedBytes);
|
||||
break;
|
||||
}
|
||||
case UserEvent::kImapLoginFailedEvent: {
|
||||
ImapLoginFailedEvent const& e = event.imaploginfailedevent();
|
||||
QString const username = QString::fromStdString(e.username());
|
||||
this->logTrace(QString("User event received: IMAPLoginFailed (username = %1).:").arg(username));
|
||||
emit imapLoginFailed(username);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
this->logError("Unknown User event received.");
|
||||
}
|
||||
|
||||
@ -179,6 +179,8 @@ signals:
|
||||
void userDisconnected(QString const &username);
|
||||
void userChanged(QString const &userID);
|
||||
void userBadEvent(QString const &userID, QString const& errorMessage);
|
||||
void usedBytesChanged(QString const &userID, qint64 usedBytes);
|
||||
void imapLoginFailed(QString const& username);
|
||||
|
||||
public: // keychain related calls
|
||||
grpc::Status availableKeychains(QStringList &outKeychains);
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -100,6 +100,9 @@ extern GuiReadyResponseDefaultTypeInternal _GuiReadyResponse_default_instance_;
|
||||
class HasNoKeychainEvent;
|
||||
struct HasNoKeychainEventDefaultTypeInternal;
|
||||
extern HasNoKeychainEventDefaultTypeInternal _HasNoKeychainEvent_default_instance_;
|
||||
class ImapLoginFailedEvent;
|
||||
struct ImapLoginFailedEventDefaultTypeInternal;
|
||||
extern ImapLoginFailedEventDefaultTypeInternal _ImapLoginFailedEvent_default_instance_;
|
||||
class ImapSmtpSettings;
|
||||
struct ImapSmtpSettingsDefaultTypeInternal;
|
||||
extern ImapSmtpSettingsDefaultTypeInternal _ImapSmtpSettings_default_instance_;
|
||||
@ -202,6 +205,9 @@ extern UpdateSilentRestartNeededDefaultTypeInternal _UpdateSilentRestartNeeded_d
|
||||
class UpdateVersionChanged;
|
||||
struct UpdateVersionChangedDefaultTypeInternal;
|
||||
extern UpdateVersionChangedDefaultTypeInternal _UpdateVersionChanged_default_instance_;
|
||||
class UsedBytesChangedEvent;
|
||||
struct UsedBytesChangedEventDefaultTypeInternal;
|
||||
extern UsedBytesChangedEventDefaultTypeInternal _UsedBytesChangedEvent_default_instance_;
|
||||
class User;
|
||||
struct UserDefaultTypeInternal;
|
||||
extern UserDefaultTypeInternal _User_default_instance_;
|
||||
@ -242,6 +248,7 @@ template<> ::grpc::EventStreamRequest* Arena::CreateMaybeMessage<::grpc::EventSt
|
||||
template<> ::grpc::GenericErrorEvent* Arena::CreateMaybeMessage<::grpc::GenericErrorEvent>(Arena*);
|
||||
template<> ::grpc::GuiReadyResponse* Arena::CreateMaybeMessage<::grpc::GuiReadyResponse>(Arena*);
|
||||
template<> ::grpc::HasNoKeychainEvent* Arena::CreateMaybeMessage<::grpc::HasNoKeychainEvent>(Arena*);
|
||||
template<> ::grpc::ImapLoginFailedEvent* Arena::CreateMaybeMessage<::grpc::ImapLoginFailedEvent>(Arena*);
|
||||
template<> ::grpc::ImapSmtpSettings* Arena::CreateMaybeMessage<::grpc::ImapSmtpSettings>(Arena*);
|
||||
template<> ::grpc::InternetStatusEvent* Arena::CreateMaybeMessage<::grpc::InternetStatusEvent>(Arena*);
|
||||
template<> ::grpc::KeychainEvent* Arena::CreateMaybeMessage<::grpc::KeychainEvent>(Arena*);
|
||||
@ -276,6 +283,7 @@ template<> ::grpc::UpdateManualReadyEvent* Arena::CreateMaybeMessage<::grpc::Upd
|
||||
template<> ::grpc::UpdateManualRestartNeededEvent* Arena::CreateMaybeMessage<::grpc::UpdateManualRestartNeededEvent>(Arena*);
|
||||
template<> ::grpc::UpdateSilentRestartNeeded* Arena::CreateMaybeMessage<::grpc::UpdateSilentRestartNeeded>(Arena*);
|
||||
template<> ::grpc::UpdateVersionChanged* Arena::CreateMaybeMessage<::grpc::UpdateVersionChanged>(Arena*);
|
||||
template<> ::grpc::UsedBytesChangedEvent* Arena::CreateMaybeMessage<::grpc::UsedBytesChangedEvent>(Arena*);
|
||||
template<> ::grpc::User* Arena::CreateMaybeMessage<::grpc::User>(Arena*);
|
||||
template<> ::grpc::UserBadEvent* Arena::CreateMaybeMessage<::grpc::UserBadEvent>(Arena*);
|
||||
template<> ::grpc::UserChangedEvent* Arena::CreateMaybeMessage<::grpc::UserChangedEvent>(Arena*);
|
||||
@ -9077,6 +9085,8 @@ class UserEvent final :
|
||||
kUserDisconnected = 2,
|
||||
kUserChanged = 3,
|
||||
kUserBadEvent = 4,
|
||||
kUsedBytesChangedEvent = 5,
|
||||
kImapLoginFailedEvent = 6,
|
||||
EVENT_NOT_SET = 0,
|
||||
};
|
||||
|
||||
@ -9162,6 +9172,8 @@ class UserEvent final :
|
||||
kUserDisconnectedFieldNumber = 2,
|
||||
kUserChangedFieldNumber = 3,
|
||||
kUserBadEventFieldNumber = 4,
|
||||
kUsedBytesChangedEventFieldNumber = 5,
|
||||
kImapLoginFailedEventFieldNumber = 6,
|
||||
};
|
||||
// .grpc.ToggleSplitModeFinishedEvent toggleSplitModeFinished = 1;
|
||||
bool has_togglesplitmodefinished() const;
|
||||
@ -9235,6 +9247,42 @@ class UserEvent final :
|
||||
::grpc::UserBadEvent* userbadevent);
|
||||
::grpc::UserBadEvent* unsafe_arena_release_userbadevent();
|
||||
|
||||
// .grpc.UsedBytesChangedEvent usedBytesChangedEvent = 5;
|
||||
bool has_usedbyteschangedevent() const;
|
||||
private:
|
||||
bool _internal_has_usedbyteschangedevent() const;
|
||||
public:
|
||||
void clear_usedbyteschangedevent();
|
||||
const ::grpc::UsedBytesChangedEvent& usedbyteschangedevent() const;
|
||||
PROTOBUF_NODISCARD ::grpc::UsedBytesChangedEvent* release_usedbyteschangedevent();
|
||||
::grpc::UsedBytesChangedEvent* mutable_usedbyteschangedevent();
|
||||
void set_allocated_usedbyteschangedevent(::grpc::UsedBytesChangedEvent* usedbyteschangedevent);
|
||||
private:
|
||||
const ::grpc::UsedBytesChangedEvent& _internal_usedbyteschangedevent() const;
|
||||
::grpc::UsedBytesChangedEvent* _internal_mutable_usedbyteschangedevent();
|
||||
public:
|
||||
void unsafe_arena_set_allocated_usedbyteschangedevent(
|
||||
::grpc::UsedBytesChangedEvent* usedbyteschangedevent);
|
||||
::grpc::UsedBytesChangedEvent* unsafe_arena_release_usedbyteschangedevent();
|
||||
|
||||
// .grpc.ImapLoginFailedEvent imapLoginFailedEvent = 6;
|
||||
bool has_imaploginfailedevent() const;
|
||||
private:
|
||||
bool _internal_has_imaploginfailedevent() const;
|
||||
public:
|
||||
void clear_imaploginfailedevent();
|
||||
const ::grpc::ImapLoginFailedEvent& imaploginfailedevent() const;
|
||||
PROTOBUF_NODISCARD ::grpc::ImapLoginFailedEvent* release_imaploginfailedevent();
|
||||
::grpc::ImapLoginFailedEvent* mutable_imaploginfailedevent();
|
||||
void set_allocated_imaploginfailedevent(::grpc::ImapLoginFailedEvent* imaploginfailedevent);
|
||||
private:
|
||||
const ::grpc::ImapLoginFailedEvent& _internal_imaploginfailedevent() const;
|
||||
::grpc::ImapLoginFailedEvent* _internal_mutable_imaploginfailedevent();
|
||||
public:
|
||||
void unsafe_arena_set_allocated_imaploginfailedevent(
|
||||
::grpc::ImapLoginFailedEvent* imaploginfailedevent);
|
||||
::grpc::ImapLoginFailedEvent* unsafe_arena_release_imaploginfailedevent();
|
||||
|
||||
void clear_event();
|
||||
EventCase event_case() const;
|
||||
// @@protoc_insertion_point(class_scope:grpc.UserEvent)
|
||||
@ -9244,6 +9292,8 @@ class UserEvent final :
|
||||
void set_has_userdisconnected();
|
||||
void set_has_userchanged();
|
||||
void set_has_userbadevent();
|
||||
void set_has_usedbyteschangedevent();
|
||||
void set_has_imaploginfailedevent();
|
||||
|
||||
inline bool has_event() const;
|
||||
inline void clear_has_event();
|
||||
@ -9259,6 +9309,8 @@ class UserEvent final :
|
||||
::grpc::UserDisconnectedEvent* userdisconnected_;
|
||||
::grpc::UserChangedEvent* userchanged_;
|
||||
::grpc::UserBadEvent* userbadevent_;
|
||||
::grpc::UsedBytesChangedEvent* usedbyteschangedevent_;
|
||||
::grpc::ImapLoginFailedEvent* imaploginfailedevent_;
|
||||
} event_;
|
||||
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
|
||||
uint32_t _oneof_case_[1];
|
||||
@ -9897,6 +9949,323 @@ class UserBadEvent final :
|
||||
};
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
class UsedBytesChangedEvent final :
|
||||
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:grpc.UsedBytesChangedEvent) */ {
|
||||
public:
|
||||
inline UsedBytesChangedEvent() : UsedBytesChangedEvent(nullptr) {}
|
||||
~UsedBytesChangedEvent() override;
|
||||
explicit PROTOBUF_CONSTEXPR UsedBytesChangedEvent(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
|
||||
|
||||
UsedBytesChangedEvent(const UsedBytesChangedEvent& from);
|
||||
UsedBytesChangedEvent(UsedBytesChangedEvent&& from) noexcept
|
||||
: UsedBytesChangedEvent() {
|
||||
*this = ::std::move(from);
|
||||
}
|
||||
|
||||
inline UsedBytesChangedEvent& operator=(const UsedBytesChangedEvent& from) {
|
||||
CopyFrom(from);
|
||||
return *this;
|
||||
}
|
||||
inline UsedBytesChangedEvent& operator=(UsedBytesChangedEvent&& from) noexcept {
|
||||
if (this == &from) return *this;
|
||||
if (GetOwningArena() == from.GetOwningArena()
|
||||
#ifdef PROTOBUF_FORCE_COPY_IN_MOVE
|
||||
&& GetOwningArena() != nullptr
|
||||
#endif // !PROTOBUF_FORCE_COPY_IN_MOVE
|
||||
) {
|
||||
InternalSwap(&from);
|
||||
} else {
|
||||
CopyFrom(from);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
|
||||
return GetDescriptor();
|
||||
}
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
|
||||
return default_instance().GetMetadata().descriptor;
|
||||
}
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
|
||||
return default_instance().GetMetadata().reflection;
|
||||
}
|
||||
static const UsedBytesChangedEvent& default_instance() {
|
||||
return *internal_default_instance();
|
||||
}
|
||||
static inline const UsedBytesChangedEvent* internal_default_instance() {
|
||||
return reinterpret_cast<const UsedBytesChangedEvent*>(
|
||||
&_UsedBytesChangedEvent_default_instance_);
|
||||
}
|
||||
static constexpr int kIndexInFileMessages =
|
||||
57;
|
||||
|
||||
friend void swap(UsedBytesChangedEvent& a, UsedBytesChangedEvent& b) {
|
||||
a.Swap(&b);
|
||||
}
|
||||
inline void Swap(UsedBytesChangedEvent* other) {
|
||||
if (other == this) return;
|
||||
#ifdef PROTOBUF_FORCE_COPY_IN_SWAP
|
||||
if (GetOwningArena() != nullptr &&
|
||||
GetOwningArena() == other->GetOwningArena()) {
|
||||
#else // PROTOBUF_FORCE_COPY_IN_SWAP
|
||||
if (GetOwningArena() == other->GetOwningArena()) {
|
||||
#endif // !PROTOBUF_FORCE_COPY_IN_SWAP
|
||||
InternalSwap(other);
|
||||
} else {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
|
||||
}
|
||||
}
|
||||
void UnsafeArenaSwap(UsedBytesChangedEvent* other) {
|
||||
if (other == this) return;
|
||||
GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
|
||||
InternalSwap(other);
|
||||
}
|
||||
|
||||
// implements Message ----------------------------------------------
|
||||
|
||||
UsedBytesChangedEvent* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
|
||||
return CreateMaybeMessage<UsedBytesChangedEvent>(arena);
|
||||
}
|
||||
using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
|
||||
void CopyFrom(const UsedBytesChangedEvent& from);
|
||||
using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
|
||||
void MergeFrom( const UsedBytesChangedEvent& from) {
|
||||
UsedBytesChangedEvent::MergeImpl(*this, from);
|
||||
}
|
||||
private:
|
||||
static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg);
|
||||
public:
|
||||
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
|
||||
bool IsInitialized() const final;
|
||||
|
||||
size_t ByteSizeLong() const final;
|
||||
const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
|
||||
uint8_t* _InternalSerialize(
|
||||
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
|
||||
int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
|
||||
|
||||
private:
|
||||
void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
|
||||
void SharedDtor();
|
||||
void SetCachedSize(int size) const final;
|
||||
void InternalSwap(UsedBytesChangedEvent* other);
|
||||
|
||||
private:
|
||||
friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
|
||||
static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
|
||||
return "grpc.UsedBytesChangedEvent";
|
||||
}
|
||||
protected:
|
||||
explicit UsedBytesChangedEvent(::PROTOBUF_NAMESPACE_ID::Arena* arena,
|
||||
bool is_message_owned = false);
|
||||
public:
|
||||
|
||||
static const ClassData _class_data_;
|
||||
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;
|
||||
|
||||
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
|
||||
|
||||
// nested types ----------------------------------------------------
|
||||
|
||||
// accessors -------------------------------------------------------
|
||||
|
||||
enum : int {
|
||||
kUserIDFieldNumber = 1,
|
||||
kUsedBytesFieldNumber = 2,
|
||||
};
|
||||
// string userID = 1;
|
||||
void clear_userid();
|
||||
const std::string& userid() const;
|
||||
template <typename ArgT0 = const std::string&, typename... ArgT>
|
||||
void set_userid(ArgT0&& arg0, ArgT... args);
|
||||
std::string* mutable_userid();
|
||||
PROTOBUF_NODISCARD std::string* release_userid();
|
||||
void set_allocated_userid(std::string* userid);
|
||||
private:
|
||||
const std::string& _internal_userid() const;
|
||||
inline PROTOBUF_ALWAYS_INLINE void _internal_set_userid(const std::string& value);
|
||||
std::string* _internal_mutable_userid();
|
||||
public:
|
||||
|
||||
// int64 usedBytes = 2;
|
||||
void clear_usedbytes();
|
||||
int64_t usedbytes() const;
|
||||
void set_usedbytes(int64_t value);
|
||||
private:
|
||||
int64_t _internal_usedbytes() const;
|
||||
void _internal_set_usedbytes(int64_t value);
|
||||
public:
|
||||
|
||||
// @@protoc_insertion_point(class_scope:grpc.UsedBytesChangedEvent)
|
||||
private:
|
||||
class _Internal;
|
||||
|
||||
template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
|
||||
typedef void InternalArenaConstructable_;
|
||||
typedef void DestructorSkippable_;
|
||||
struct Impl_ {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr userid_;
|
||||
int64_t usedbytes_;
|
||||
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
|
||||
};
|
||||
union { Impl_ _impl_; };
|
||||
friend struct ::TableStruct_bridge_2eproto;
|
||||
};
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
class ImapLoginFailedEvent final :
|
||||
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:grpc.ImapLoginFailedEvent) */ {
|
||||
public:
|
||||
inline ImapLoginFailedEvent() : ImapLoginFailedEvent(nullptr) {}
|
||||
~ImapLoginFailedEvent() override;
|
||||
explicit PROTOBUF_CONSTEXPR ImapLoginFailedEvent(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
|
||||
|
||||
ImapLoginFailedEvent(const ImapLoginFailedEvent& from);
|
||||
ImapLoginFailedEvent(ImapLoginFailedEvent&& from) noexcept
|
||||
: ImapLoginFailedEvent() {
|
||||
*this = ::std::move(from);
|
||||
}
|
||||
|
||||
inline ImapLoginFailedEvent& operator=(const ImapLoginFailedEvent& from) {
|
||||
CopyFrom(from);
|
||||
return *this;
|
||||
}
|
||||
inline ImapLoginFailedEvent& operator=(ImapLoginFailedEvent&& from) noexcept {
|
||||
if (this == &from) return *this;
|
||||
if (GetOwningArena() == from.GetOwningArena()
|
||||
#ifdef PROTOBUF_FORCE_COPY_IN_MOVE
|
||||
&& GetOwningArena() != nullptr
|
||||
#endif // !PROTOBUF_FORCE_COPY_IN_MOVE
|
||||
) {
|
||||
InternalSwap(&from);
|
||||
} else {
|
||||
CopyFrom(from);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
|
||||
return GetDescriptor();
|
||||
}
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
|
||||
return default_instance().GetMetadata().descriptor;
|
||||
}
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
|
||||
return default_instance().GetMetadata().reflection;
|
||||
}
|
||||
static const ImapLoginFailedEvent& default_instance() {
|
||||
return *internal_default_instance();
|
||||
}
|
||||
static inline const ImapLoginFailedEvent* internal_default_instance() {
|
||||
return reinterpret_cast<const ImapLoginFailedEvent*>(
|
||||
&_ImapLoginFailedEvent_default_instance_);
|
||||
}
|
||||
static constexpr int kIndexInFileMessages =
|
||||
58;
|
||||
|
||||
friend void swap(ImapLoginFailedEvent& a, ImapLoginFailedEvent& b) {
|
||||
a.Swap(&b);
|
||||
}
|
||||
inline void Swap(ImapLoginFailedEvent* other) {
|
||||
if (other == this) return;
|
||||
#ifdef PROTOBUF_FORCE_COPY_IN_SWAP
|
||||
if (GetOwningArena() != nullptr &&
|
||||
GetOwningArena() == other->GetOwningArena()) {
|
||||
#else // PROTOBUF_FORCE_COPY_IN_SWAP
|
||||
if (GetOwningArena() == other->GetOwningArena()) {
|
||||
#endif // !PROTOBUF_FORCE_COPY_IN_SWAP
|
||||
InternalSwap(other);
|
||||
} else {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
|
||||
}
|
||||
}
|
||||
void UnsafeArenaSwap(ImapLoginFailedEvent* other) {
|
||||
if (other == this) return;
|
||||
GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
|
||||
InternalSwap(other);
|
||||
}
|
||||
|
||||
// implements Message ----------------------------------------------
|
||||
|
||||
ImapLoginFailedEvent* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
|
||||
return CreateMaybeMessage<ImapLoginFailedEvent>(arena);
|
||||
}
|
||||
using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
|
||||
void CopyFrom(const ImapLoginFailedEvent& from);
|
||||
using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
|
||||
void MergeFrom( const ImapLoginFailedEvent& from) {
|
||||
ImapLoginFailedEvent::MergeImpl(*this, from);
|
||||
}
|
||||
private:
|
||||
static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg);
|
||||
public:
|
||||
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
|
||||
bool IsInitialized() const final;
|
||||
|
||||
size_t ByteSizeLong() const final;
|
||||
const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
|
||||
uint8_t* _InternalSerialize(
|
||||
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
|
||||
int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
|
||||
|
||||
private:
|
||||
void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
|
||||
void SharedDtor();
|
||||
void SetCachedSize(int size) const final;
|
||||
void InternalSwap(ImapLoginFailedEvent* other);
|
||||
|
||||
private:
|
||||
friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
|
||||
static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
|
||||
return "grpc.ImapLoginFailedEvent";
|
||||
}
|
||||
protected:
|
||||
explicit ImapLoginFailedEvent(::PROTOBUF_NAMESPACE_ID::Arena* arena,
|
||||
bool is_message_owned = false);
|
||||
public:
|
||||
|
||||
static const ClassData _class_data_;
|
||||
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;
|
||||
|
||||
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
|
||||
|
||||
// nested types ----------------------------------------------------
|
||||
|
||||
// accessors -------------------------------------------------------
|
||||
|
||||
enum : int {
|
||||
kUsernameFieldNumber = 1,
|
||||
};
|
||||
// string username = 1;
|
||||
void clear_username();
|
||||
const std::string& username() const;
|
||||
template <typename ArgT0 = const std::string&, typename... ArgT>
|
||||
void set_username(ArgT0&& arg0, ArgT... args);
|
||||
std::string* mutable_username();
|
||||
PROTOBUF_NODISCARD std::string* release_username();
|
||||
void set_allocated_username(std::string* username);
|
||||
private:
|
||||
const std::string& _internal_username() const;
|
||||
inline PROTOBUF_ALWAYS_INLINE void _internal_set_username(const std::string& value);
|
||||
std::string* _internal_mutable_username();
|
||||
public:
|
||||
|
||||
// @@protoc_insertion_point(class_scope:grpc.ImapLoginFailedEvent)
|
||||
private:
|
||||
class _Internal;
|
||||
|
||||
template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
|
||||
typedef void InternalArenaConstructable_;
|
||||
typedef void DestructorSkippable_;
|
||||
struct Impl_ {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr username_;
|
||||
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
|
||||
};
|
||||
union { Impl_ _impl_; };
|
||||
friend struct ::TableStruct_bridge_2eproto;
|
||||
};
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
class GenericErrorEvent final :
|
||||
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:grpc.GenericErrorEvent) */ {
|
||||
public:
|
||||
@ -9945,7 +10314,7 @@ class GenericErrorEvent final :
|
||||
&_GenericErrorEvent_default_instance_);
|
||||
}
|
||||
static constexpr int kIndexInFileMessages =
|
||||
57;
|
||||
59;
|
||||
|
||||
friend void swap(GenericErrorEvent& a, GenericErrorEvent& b) {
|
||||
a.Swap(&b);
|
||||
@ -15728,6 +16097,154 @@ inline ::grpc::UserBadEvent* UserEvent::mutable_userbadevent() {
|
||||
return _msg;
|
||||
}
|
||||
|
||||
// .grpc.UsedBytesChangedEvent usedBytesChangedEvent = 5;
|
||||
inline bool UserEvent::_internal_has_usedbyteschangedevent() const {
|
||||
return event_case() == kUsedBytesChangedEvent;
|
||||
}
|
||||
inline bool UserEvent::has_usedbyteschangedevent() const {
|
||||
return _internal_has_usedbyteschangedevent();
|
||||
}
|
||||
inline void UserEvent::set_has_usedbyteschangedevent() {
|
||||
_impl_._oneof_case_[0] = kUsedBytesChangedEvent;
|
||||
}
|
||||
inline void UserEvent::clear_usedbyteschangedevent() {
|
||||
if (_internal_has_usedbyteschangedevent()) {
|
||||
if (GetArenaForAllocation() == nullptr) {
|
||||
delete _impl_.event_.usedbyteschangedevent_;
|
||||
}
|
||||
clear_has_event();
|
||||
}
|
||||
}
|
||||
inline ::grpc::UsedBytesChangedEvent* UserEvent::release_usedbyteschangedevent() {
|
||||
// @@protoc_insertion_point(field_release:grpc.UserEvent.usedBytesChangedEvent)
|
||||
if (_internal_has_usedbyteschangedevent()) {
|
||||
clear_has_event();
|
||||
::grpc::UsedBytesChangedEvent* temp = _impl_.event_.usedbyteschangedevent_;
|
||||
if (GetArenaForAllocation() != nullptr) {
|
||||
temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
|
||||
}
|
||||
_impl_.event_.usedbyteschangedevent_ = nullptr;
|
||||
return temp;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
inline const ::grpc::UsedBytesChangedEvent& UserEvent::_internal_usedbyteschangedevent() const {
|
||||
return _internal_has_usedbyteschangedevent()
|
||||
? *_impl_.event_.usedbyteschangedevent_
|
||||
: reinterpret_cast< ::grpc::UsedBytesChangedEvent&>(::grpc::_UsedBytesChangedEvent_default_instance_);
|
||||
}
|
||||
inline const ::grpc::UsedBytesChangedEvent& UserEvent::usedbyteschangedevent() const {
|
||||
// @@protoc_insertion_point(field_get:grpc.UserEvent.usedBytesChangedEvent)
|
||||
return _internal_usedbyteschangedevent();
|
||||
}
|
||||
inline ::grpc::UsedBytesChangedEvent* UserEvent::unsafe_arena_release_usedbyteschangedevent() {
|
||||
// @@protoc_insertion_point(field_unsafe_arena_release:grpc.UserEvent.usedBytesChangedEvent)
|
||||
if (_internal_has_usedbyteschangedevent()) {
|
||||
clear_has_event();
|
||||
::grpc::UsedBytesChangedEvent* temp = _impl_.event_.usedbyteschangedevent_;
|
||||
_impl_.event_.usedbyteschangedevent_ = nullptr;
|
||||
return temp;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
inline void UserEvent::unsafe_arena_set_allocated_usedbyteschangedevent(::grpc::UsedBytesChangedEvent* usedbyteschangedevent) {
|
||||
clear_event();
|
||||
if (usedbyteschangedevent) {
|
||||
set_has_usedbyteschangedevent();
|
||||
_impl_.event_.usedbyteschangedevent_ = usedbyteschangedevent;
|
||||
}
|
||||
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:grpc.UserEvent.usedBytesChangedEvent)
|
||||
}
|
||||
inline ::grpc::UsedBytesChangedEvent* UserEvent::_internal_mutable_usedbyteschangedevent() {
|
||||
if (!_internal_has_usedbyteschangedevent()) {
|
||||
clear_event();
|
||||
set_has_usedbyteschangedevent();
|
||||
_impl_.event_.usedbyteschangedevent_ = CreateMaybeMessage< ::grpc::UsedBytesChangedEvent >(GetArenaForAllocation());
|
||||
}
|
||||
return _impl_.event_.usedbyteschangedevent_;
|
||||
}
|
||||
inline ::grpc::UsedBytesChangedEvent* UserEvent::mutable_usedbyteschangedevent() {
|
||||
::grpc::UsedBytesChangedEvent* _msg = _internal_mutable_usedbyteschangedevent();
|
||||
// @@protoc_insertion_point(field_mutable:grpc.UserEvent.usedBytesChangedEvent)
|
||||
return _msg;
|
||||
}
|
||||
|
||||
// .grpc.ImapLoginFailedEvent imapLoginFailedEvent = 6;
|
||||
inline bool UserEvent::_internal_has_imaploginfailedevent() const {
|
||||
return event_case() == kImapLoginFailedEvent;
|
||||
}
|
||||
inline bool UserEvent::has_imaploginfailedevent() const {
|
||||
return _internal_has_imaploginfailedevent();
|
||||
}
|
||||
inline void UserEvent::set_has_imaploginfailedevent() {
|
||||
_impl_._oneof_case_[0] = kImapLoginFailedEvent;
|
||||
}
|
||||
inline void UserEvent::clear_imaploginfailedevent() {
|
||||
if (_internal_has_imaploginfailedevent()) {
|
||||
if (GetArenaForAllocation() == nullptr) {
|
||||
delete _impl_.event_.imaploginfailedevent_;
|
||||
}
|
||||
clear_has_event();
|
||||
}
|
||||
}
|
||||
inline ::grpc::ImapLoginFailedEvent* UserEvent::release_imaploginfailedevent() {
|
||||
// @@protoc_insertion_point(field_release:grpc.UserEvent.imapLoginFailedEvent)
|
||||
if (_internal_has_imaploginfailedevent()) {
|
||||
clear_has_event();
|
||||
::grpc::ImapLoginFailedEvent* temp = _impl_.event_.imaploginfailedevent_;
|
||||
if (GetArenaForAllocation() != nullptr) {
|
||||
temp = ::PROTOBUF_NAMESPACE_ID::internal::DuplicateIfNonNull(temp);
|
||||
}
|
||||
_impl_.event_.imaploginfailedevent_ = nullptr;
|
||||
return temp;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
inline const ::grpc::ImapLoginFailedEvent& UserEvent::_internal_imaploginfailedevent() const {
|
||||
return _internal_has_imaploginfailedevent()
|
||||
? *_impl_.event_.imaploginfailedevent_
|
||||
: reinterpret_cast< ::grpc::ImapLoginFailedEvent&>(::grpc::_ImapLoginFailedEvent_default_instance_);
|
||||
}
|
||||
inline const ::grpc::ImapLoginFailedEvent& UserEvent::imaploginfailedevent() const {
|
||||
// @@protoc_insertion_point(field_get:grpc.UserEvent.imapLoginFailedEvent)
|
||||
return _internal_imaploginfailedevent();
|
||||
}
|
||||
inline ::grpc::ImapLoginFailedEvent* UserEvent::unsafe_arena_release_imaploginfailedevent() {
|
||||
// @@protoc_insertion_point(field_unsafe_arena_release:grpc.UserEvent.imapLoginFailedEvent)
|
||||
if (_internal_has_imaploginfailedevent()) {
|
||||
clear_has_event();
|
||||
::grpc::ImapLoginFailedEvent* temp = _impl_.event_.imaploginfailedevent_;
|
||||
_impl_.event_.imaploginfailedevent_ = nullptr;
|
||||
return temp;
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
inline void UserEvent::unsafe_arena_set_allocated_imaploginfailedevent(::grpc::ImapLoginFailedEvent* imaploginfailedevent) {
|
||||
clear_event();
|
||||
if (imaploginfailedevent) {
|
||||
set_has_imaploginfailedevent();
|
||||
_impl_.event_.imaploginfailedevent_ = imaploginfailedevent;
|
||||
}
|
||||
// @@protoc_insertion_point(field_unsafe_arena_set_allocated:grpc.UserEvent.imapLoginFailedEvent)
|
||||
}
|
||||
inline ::grpc::ImapLoginFailedEvent* UserEvent::_internal_mutable_imaploginfailedevent() {
|
||||
if (!_internal_has_imaploginfailedevent()) {
|
||||
clear_event();
|
||||
set_has_imaploginfailedevent();
|
||||
_impl_.event_.imaploginfailedevent_ = CreateMaybeMessage< ::grpc::ImapLoginFailedEvent >(GetArenaForAllocation());
|
||||
}
|
||||
return _impl_.event_.imaploginfailedevent_;
|
||||
}
|
||||
inline ::grpc::ImapLoginFailedEvent* UserEvent::mutable_imaploginfailedevent() {
|
||||
::grpc::ImapLoginFailedEvent* _msg = _internal_mutable_imaploginfailedevent();
|
||||
// @@protoc_insertion_point(field_mutable:grpc.UserEvent.imapLoginFailedEvent)
|
||||
return _msg;
|
||||
}
|
||||
|
||||
inline bool UserEvent::has_event() const {
|
||||
return event_case() != EVENT_NOT_SET;
|
||||
}
|
||||
@ -16005,6 +16522,134 @@ inline void UserBadEvent::set_allocated_errormessage(std::string* errormessage)
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// UsedBytesChangedEvent
|
||||
|
||||
// string userID = 1;
|
||||
inline void UsedBytesChangedEvent::clear_userid() {
|
||||
_impl_.userid_.ClearToEmpty();
|
||||
}
|
||||
inline const std::string& UsedBytesChangedEvent::userid() const {
|
||||
// @@protoc_insertion_point(field_get:grpc.UsedBytesChangedEvent.userID)
|
||||
return _internal_userid();
|
||||
}
|
||||
template <typename ArgT0, typename... ArgT>
|
||||
inline PROTOBUF_ALWAYS_INLINE
|
||||
void UsedBytesChangedEvent::set_userid(ArgT0&& arg0, ArgT... args) {
|
||||
|
||||
_impl_.userid_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
|
||||
// @@protoc_insertion_point(field_set:grpc.UsedBytesChangedEvent.userID)
|
||||
}
|
||||
inline std::string* UsedBytesChangedEvent::mutable_userid() {
|
||||
std::string* _s = _internal_mutable_userid();
|
||||
// @@protoc_insertion_point(field_mutable:grpc.UsedBytesChangedEvent.userID)
|
||||
return _s;
|
||||
}
|
||||
inline const std::string& UsedBytesChangedEvent::_internal_userid() const {
|
||||
return _impl_.userid_.Get();
|
||||
}
|
||||
inline void UsedBytesChangedEvent::_internal_set_userid(const std::string& value) {
|
||||
|
||||
_impl_.userid_.Set(value, GetArenaForAllocation());
|
||||
}
|
||||
inline std::string* UsedBytesChangedEvent::_internal_mutable_userid() {
|
||||
|
||||
return _impl_.userid_.Mutable(GetArenaForAllocation());
|
||||
}
|
||||
inline std::string* UsedBytesChangedEvent::release_userid() {
|
||||
// @@protoc_insertion_point(field_release:grpc.UsedBytesChangedEvent.userID)
|
||||
return _impl_.userid_.Release();
|
||||
}
|
||||
inline void UsedBytesChangedEvent::set_allocated_userid(std::string* userid) {
|
||||
if (userid != nullptr) {
|
||||
|
||||
} else {
|
||||
|
||||
}
|
||||
_impl_.userid_.SetAllocated(userid, GetArenaForAllocation());
|
||||
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
|
||||
if (_impl_.userid_.IsDefault()) {
|
||||
_impl_.userid_.Set("", GetArenaForAllocation());
|
||||
}
|
||||
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
|
||||
// @@protoc_insertion_point(field_set_allocated:grpc.UsedBytesChangedEvent.userID)
|
||||
}
|
||||
|
||||
// int64 usedBytes = 2;
|
||||
inline void UsedBytesChangedEvent::clear_usedbytes() {
|
||||
_impl_.usedbytes_ = int64_t{0};
|
||||
}
|
||||
inline int64_t UsedBytesChangedEvent::_internal_usedbytes() const {
|
||||
return _impl_.usedbytes_;
|
||||
}
|
||||
inline int64_t UsedBytesChangedEvent::usedbytes() const {
|
||||
// @@protoc_insertion_point(field_get:grpc.UsedBytesChangedEvent.usedBytes)
|
||||
return _internal_usedbytes();
|
||||
}
|
||||
inline void UsedBytesChangedEvent::_internal_set_usedbytes(int64_t value) {
|
||||
|
||||
_impl_.usedbytes_ = value;
|
||||
}
|
||||
inline void UsedBytesChangedEvent::set_usedbytes(int64_t value) {
|
||||
_internal_set_usedbytes(value);
|
||||
// @@protoc_insertion_point(field_set:grpc.UsedBytesChangedEvent.usedBytes)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// ImapLoginFailedEvent
|
||||
|
||||
// string username = 1;
|
||||
inline void ImapLoginFailedEvent::clear_username() {
|
||||
_impl_.username_.ClearToEmpty();
|
||||
}
|
||||
inline const std::string& ImapLoginFailedEvent::username() const {
|
||||
// @@protoc_insertion_point(field_get:grpc.ImapLoginFailedEvent.username)
|
||||
return _internal_username();
|
||||
}
|
||||
template <typename ArgT0, typename... ArgT>
|
||||
inline PROTOBUF_ALWAYS_INLINE
|
||||
void ImapLoginFailedEvent::set_username(ArgT0&& arg0, ArgT... args) {
|
||||
|
||||
_impl_.username_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
|
||||
// @@protoc_insertion_point(field_set:grpc.ImapLoginFailedEvent.username)
|
||||
}
|
||||
inline std::string* ImapLoginFailedEvent::mutable_username() {
|
||||
std::string* _s = _internal_mutable_username();
|
||||
// @@protoc_insertion_point(field_mutable:grpc.ImapLoginFailedEvent.username)
|
||||
return _s;
|
||||
}
|
||||
inline const std::string& ImapLoginFailedEvent::_internal_username() const {
|
||||
return _impl_.username_.Get();
|
||||
}
|
||||
inline void ImapLoginFailedEvent::_internal_set_username(const std::string& value) {
|
||||
|
||||
_impl_.username_.Set(value, GetArenaForAllocation());
|
||||
}
|
||||
inline std::string* ImapLoginFailedEvent::_internal_mutable_username() {
|
||||
|
||||
return _impl_.username_.Mutable(GetArenaForAllocation());
|
||||
}
|
||||
inline std::string* ImapLoginFailedEvent::release_username() {
|
||||
// @@protoc_insertion_point(field_release:grpc.ImapLoginFailedEvent.username)
|
||||
return _impl_.username_.Release();
|
||||
}
|
||||
inline void ImapLoginFailedEvent::set_allocated_username(std::string* username) {
|
||||
if (username != nullptr) {
|
||||
|
||||
} else {
|
||||
|
||||
}
|
||||
_impl_.username_.SetAllocated(username, GetArenaForAllocation());
|
||||
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
|
||||
if (_impl_.username_.IsDefault()) {
|
||||
_impl_.username_.Set("", GetArenaForAllocation());
|
||||
}
|
||||
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
|
||||
// @@protoc_insertion_point(field_set_allocated:grpc.ImapLoginFailedEvent.username)
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// GenericErrorEvent
|
||||
|
||||
// .grpc.ErrorCode code = 1;
|
||||
@ -16144,6 +16789,10 @@ inline void GenericErrorEvent::set_code(::grpc::ErrorCode value) {
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
// -------------------------------------------------------------------
|
||||
|
||||
|
||||
// @@protoc_insertion_point(namespace_scope)
|
||||
|
||||
|
||||
@ -34,7 +34,8 @@ SPUser User::newUser(QObject *parent) {
|
||||
/// \param[in] parent The parent object.
|
||||
//****************************************************************************************************************************************************
|
||||
User::User(QObject *parent)
|
||||
: QObject(parent) {
|
||||
: QObject(parent)
|
||||
, imapFailureCooldownEndTime_(QDateTime::currentDateTime()) {
|
||||
|
||||
}
|
||||
|
||||
@ -311,4 +312,24 @@ QString User::stateToString(UserState state) {
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// We display a notification and pop the application window if an IMAP client tries to connect to a signed out account, but we do not want to
|
||||
/// do it repeatedly, as it's an intrusive action. This function let's you define a period of time during which the notification should not be
|
||||
/// displayed.
|
||||
///
|
||||
/// \param durationMSecs The duration of the period in milliseconds.
|
||||
//****************************************************************************************************************************************************
|
||||
void User::startImapLoginFailureCooldown(qint64 durationMSecs) {
|
||||
imapFailureCooldownEndTime_ = QDateTime::currentDateTime().addMSecs(durationMSecs);
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \return true if we currently are in a cooldown period for the notification
|
||||
//****************************************************************************************************************************************************
|
||||
bool User::isInIMAPLoginFailureCooldown() const {
|
||||
return QDateTime::currentDateTime() < imapFailureCooldownEndTime_;
|
||||
}
|
||||
|
||||
|
||||
} // namespace bridgepp
|
||||
|
||||
@ -74,6 +74,8 @@ public: // member functions.
|
||||
User &operator=(User &&) = delete; ///< Disabled move assignment operator.
|
||||
void update(User const &user); ///< Update the user.
|
||||
Q_INVOKABLE QString primaryEmailOrUsername() const; ///< Return the user primary email, or, if unknown its username.
|
||||
void startImapLoginFailureCooldown(qint64 durationMSecs); ///< Start the user cooldown period for the IMAP login attempt while signed-out notification.
|
||||
bool isInIMAPLoginFailureCooldown() const; ///< Check if the user in a IMAP login failure notification.
|
||||
|
||||
public slots:
|
||||
// slots for QML generated calls
|
||||
@ -137,6 +139,7 @@ private: // member functions.
|
||||
User(QObject *parent); ///< Default constructor.
|
||||
|
||||
private: // data members.
|
||||
QDateTime imapFailureCooldownEndTime_; ///< The end date/time for the IMAP login failure notification cooldown period.
|
||||
QString id_; ///< The userID.
|
||||
QString username_; ///< The username
|
||||
QString password_; ///< The IMAP password of the user.
|
||||
|
||||
@ -115,7 +115,7 @@ func (f *frontendCLI) showAccountAddressInfo(user bridge.UserInfo, address strin
|
||||
f.Println("")
|
||||
}
|
||||
|
||||
func (f *frontendCLI) loginAccount(c *ishell.Context) { //nolint:funlen
|
||||
func (f *frontendCLI) loginAccount(c *ishell.Context) {
|
||||
f.ShowPrompt(false)
|
||||
defer f.ShowPrompt(true)
|
||||
|
||||
|
||||
@ -40,7 +40,7 @@ type frontendCLI struct {
|
||||
}
|
||||
|
||||
// New returns a new CLI frontend configured with the given options.
|
||||
func New(bridge *bridge.Bridge, restarter *restarter.Restarter, eventCh <-chan events.Event) *frontendCLI { //nolint:funlen,revive
|
||||
func New(bridge *bridge.Bridge, restarter *restarter.Restarter, eventCh <-chan events.Event) *frontendCLI { //nolint:revive
|
||||
fe := &frontendCLI{
|
||||
Shell: ishell.New(),
|
||||
bridge: bridge,
|
||||
@ -261,7 +261,7 @@ func New(bridge *bridge.Bridge, restarter *restarter.Restarter, eventCh <-chan e
|
||||
return fe
|
||||
}
|
||||
|
||||
func (f *frontendCLI) watchEvents(eventCh <-chan events.Event) { // nolint:funlen
|
||||
func (f *frontendCLI) watchEvents(eventCh <-chan events.Event) { // nolint:gocyclo
|
||||
// GODT-1949: Better error events.
|
||||
for _, err := range f.bridge.GetErrors() {
|
||||
switch {
|
||||
@ -270,12 +270,6 @@ func (f *frontendCLI) watchEvents(eventCh <-chan events.Event) { // nolint:funle
|
||||
|
||||
case errors.Is(err, bridge.ErrVaultInsecure):
|
||||
f.notifyCredentialsError()
|
||||
|
||||
case errors.Is(err, bridge.ErrServeIMAP):
|
||||
f.Println("IMAP server error:", err)
|
||||
|
||||
case errors.Is(err, bridge.ErrServeSMTP):
|
||||
f.Println("SMTP server error:", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -287,6 +281,12 @@ func (f *frontendCLI) watchEvents(eventCh <-chan events.Event) { // nolint:funle
|
||||
case events.ConnStatusDown:
|
||||
f.notifyInternetOff()
|
||||
|
||||
case events.IMAPServerError:
|
||||
f.Println("IMAP server error:", event.Error)
|
||||
|
||||
case events.SMTPServerError:
|
||||
f.Println("SMTP server error:", event.Error)
|
||||
|
||||
case events.UserDeauth:
|
||||
user, err := f.bridge.GetUserInfo(event.UserID)
|
||||
if err != nil {
|
||||
@ -303,6 +303,9 @@ func (f *frontendCLI) watchEvents(eventCh <-chan events.Event) { // nolint:funle
|
||||
|
||||
f.Printf("User %s received a bad event and was logged out.\n", user.Username)
|
||||
|
||||
case events.IMAPLoginFailed:
|
||||
f.Printf("An IMAP login attempt failed for user %v\n", event.Username)
|
||||
|
||||
case events.UserAddressUpdated:
|
||||
user, err := f.bridge.GetUserInfo(event.UserID)
|
||||
if err != nil {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -448,6 +448,8 @@ message UserEvent {
|
||||
UserDisconnectedEvent userDisconnected = 2;
|
||||
UserChangedEvent userChanged = 3;
|
||||
UserBadEvent userBadEvent = 4;
|
||||
UsedBytesChangedEvent usedBytesChangedEvent = 5;
|
||||
ImapLoginFailedEvent imapLoginFailedEvent = 6;
|
||||
}
|
||||
}
|
||||
|
||||
@ -468,6 +470,15 @@ message UserBadEvent {
|
||||
string errorMessage = 2;
|
||||
}
|
||||
|
||||
message UsedBytesChangedEvent {
|
||||
string userID = 1;
|
||||
int64 usedBytes = 2;
|
||||
}
|
||||
|
||||
message ImapLoginFailedEvent {
|
||||
string username = 1;
|
||||
}
|
||||
|
||||
//**********************************************************
|
||||
// Generic errors
|
||||
//**********************************************************
|
||||
|
||||
@ -177,6 +177,14 @@ func NewUserBadEvent(userID string, errorMessage string) *StreamEvent {
|
||||
return userEvent(&UserEvent{Event: &UserEvent_UserBadEvent{UserBadEvent: &UserBadEvent{UserID: userID, ErrorMessage: errorMessage}}})
|
||||
}
|
||||
|
||||
func NewUsedBytesChangedEvent(userID string, usedBytes int) *StreamEvent {
|
||||
return userEvent(&UserEvent{Event: &UserEvent_UsedBytesChangedEvent{UsedBytesChangedEvent: &UsedBytesChangedEvent{UserID: userID, UsedBytes: int64(usedBytes)}}})
|
||||
}
|
||||
|
||||
func newIMAPLoginFailedEvent(username string) *StreamEvent {
|
||||
return userEvent(&UserEvent{Event: &UserEvent_ImapLoginFailedEvent{ImapLoginFailedEvent: &ImapLoginFailedEvent{Username: username}}})
|
||||
}
|
||||
|
||||
func NewGenericErrorEvent(errorCode ErrorCode) *StreamEvent {
|
||||
return genericErrorEvent(&GenericErrorEvent{Code: errorCode})
|
||||
}
|
||||
|
||||
@ -25,6 +25,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -37,6 +38,7 @@ import (
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/certs"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/events"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/safe"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/service"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/updater"
|
||||
"github.com/bradenaw/juniper/xslices"
|
||||
"github.com/elastic/go-sysinfo"
|
||||
@ -93,12 +95,10 @@ type Service struct { // nolint:structcheck
|
||||
}
|
||||
|
||||
// NewService returns a new instance of the service.
|
||||
//
|
||||
// nolint:funlen
|
||||
func NewService(
|
||||
panicHandler CrashHandler,
|
||||
restarter Restarter,
|
||||
locations Locator,
|
||||
locations service.Locator,
|
||||
bridge *bridge.Bridge,
|
||||
eventCh <-chan events.Event,
|
||||
quitCh <-chan struct{},
|
||||
@ -110,7 +110,7 @@ func NewService(
|
||||
logrus.WithError(err).Panic("Could not generate gRPC TLS config")
|
||||
}
|
||||
|
||||
config := Config{
|
||||
config := service.Config{
|
||||
Cert: string(certPEM),
|
||||
Token: uuid.NewString(),
|
||||
}
|
||||
@ -141,7 +141,7 @@ func NewService(
|
||||
config.Port = address.Port
|
||||
}
|
||||
|
||||
if path, err := saveGRPCServerConfigFile(locations, &config); err != nil {
|
||||
if path, err := service.SaveGRPCServerConfigFile(locations, &config, serverConfigFileName); err != nil {
|
||||
logrus.WithError(err).WithField("path", path).Panic("Could not write gRPC service config file")
|
||||
} else {
|
||||
logrus.WithField("path", path).Info("Successfully saved gRPC service config file")
|
||||
@ -245,7 +245,7 @@ func (s *Service) WaitUntilFrontendIsReady() {
|
||||
s.initializing.Wait()
|
||||
}
|
||||
|
||||
// nolint:funlen,gocyclo
|
||||
// nolint:gocyclo
|
||||
func (s *Service) watchEvents() {
|
||||
// GODT-1949 Better error events.
|
||||
for _, err := range s.bridge.GetErrors() {
|
||||
@ -255,12 +255,6 @@ func (s *Service) watchEvents() {
|
||||
|
||||
case errors.Is(err, bridge.ErrVaultInsecure):
|
||||
_ = s.SendEvent(NewKeychainHasNoKeychainEvent())
|
||||
|
||||
case errors.Is(err, bridge.ErrServeIMAP):
|
||||
_ = s.SendEvent(NewMailServerSettingsErrorEvent(MailServerSettingsErrorType_IMAP_PORT_STARTUP_ERROR))
|
||||
|
||||
case errors.Is(err, bridge.ErrServeSMTP):
|
||||
_ = s.SendEvent(NewMailServerSettingsErrorEvent(MailServerSettingsErrorType_SMTP_PORT_STARTUP_ERROR))
|
||||
}
|
||||
}
|
||||
|
||||
@ -272,6 +266,12 @@ func (s *Service) watchEvents() {
|
||||
case events.ConnStatusDown:
|
||||
_ = s.SendEvent(NewInternetStatusEvent(false))
|
||||
|
||||
case events.IMAPServerError:
|
||||
_ = s.SendEvent(NewMailServerSettingsErrorEvent(MailServerSettingsErrorType_IMAP_PORT_STARTUP_ERROR))
|
||||
|
||||
case events.SMTPServerError:
|
||||
_ = s.SendEvent(NewMailServerSettingsErrorEvent(MailServerSettingsErrorType_SMTP_PORT_STARTUP_ERROR))
|
||||
|
||||
case events.Raise:
|
||||
_ = s.SendEvent(NewShowMainWindowEvent())
|
||||
|
||||
@ -305,6 +305,12 @@ func (s *Service) watchEvents() {
|
||||
case events.AddressModeChanged:
|
||||
_ = s.SendEvent(NewUserChangedEvent(event.UserID))
|
||||
|
||||
case events.UsedSpaceChanged:
|
||||
_ = s.SendEvent(NewUsedBytesChangedEvent(event.UserID, event.UsedSpace))
|
||||
|
||||
case events.IMAPLoginFailed:
|
||||
_ = s.SendEvent(newIMAPLoginFailedEvent(event.Username))
|
||||
|
||||
case events.UserDeauth:
|
||||
// This is the event the GUI cares about.
|
||||
_ = s.SendEvent(NewUserChangedEvent(event.UserID))
|
||||
@ -481,17 +487,6 @@ func newTLSConfig() (*tls.Config, []byte, error) {
|
||||
}, certPEM, nil
|
||||
}
|
||||
|
||||
func saveGRPCServerConfigFile(locations Locator, config *Config) (string, error) {
|
||||
settingsPath, err := locations.ProvideSettingsPath()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
configPath := filepath.Join(settingsPath, serverConfigFileName)
|
||||
|
||||
return configPath, config.save(configPath)
|
||||
}
|
||||
|
||||
// validateServerToken verify that the server token provided by the client is valid.
|
||||
func validateServerToken(ctx context.Context, wantToken string) error {
|
||||
values, ok := metadata.FromIncomingContext(ctx)
|
||||
@ -577,10 +572,17 @@ func (s *Service) monitorParentPID() {
|
||||
func computeFileSocketPath() (string, error) {
|
||||
tempPath := os.TempDir()
|
||||
for i := 0; i < 1000; i++ {
|
||||
path := filepath.Join(tempPath, fmt.Sprintf("bridge_%v.sock", uuid.NewString()))
|
||||
path := filepath.Join(tempPath, fmt.Sprintf("bridge%04d", rand.Intn(10000))) // nolint:gosec
|
||||
if _, err := os.Stat(path); errors.Is(err, fs.ErrNotExist) {
|
||||
return path, nil
|
||||
}
|
||||
|
||||
if err := os.Remove(path); err != nil {
|
||||
logrus.WithField("path", path).WithError(err).Warning("Could not remove existing socket file")
|
||||
continue
|
||||
}
|
||||
|
||||
return path, nil
|
||||
}
|
||||
|
||||
return "", errors.New("unable to find a suitable file socket in user config folder")
|
||||
|
||||
@ -32,6 +32,7 @@ import (
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/events"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/frontend/theme"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/safe"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/service"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/updater"
|
||||
"github.com/ProtonMail/proton-bridge/v3/pkg/keychain"
|
||||
"github.com/ProtonMail/proton-bridge/v3/pkg/ports"
|
||||
@ -51,8 +52,8 @@ func (s *Service) CheckTokens(ctx context.Context, clientConfigPath *wrapperspb.
|
||||
path := clientConfigPath.Value
|
||||
logEntry := s.log.WithField("path", path)
|
||||
|
||||
var clientConfig Config
|
||||
if err := clientConfig.load(path); err != nil {
|
||||
var clientConfig service.Config
|
||||
if err := clientConfig.Load(path); err != nil {
|
||||
logEntry.WithError(err).Error("Could not read gRPC client config file")
|
||||
|
||||
return nil, err
|
||||
|
||||
@ -110,7 +110,7 @@ func (s *Service) SendEvent(event *StreamEvent) error {
|
||||
}
|
||||
|
||||
// StartEventTest sends all the known event via gRPC.
|
||||
func (s *Service) StartEventTest() error { //nolint:funlen
|
||||
func (s *Service) StartEventTest() error {
|
||||
const dummyAddress = "dummy@proton.me"
|
||||
events := []*StreamEvent{
|
||||
// app
|
||||
@ -174,6 +174,7 @@ func (s *Service) StartEventTest() error { //nolint:funlen
|
||||
NewUserToggleSplitModeFinishedEvent("userID"),
|
||||
NewUserDisconnectedEvent("username"),
|
||||
NewUserChangedEvent("userID"),
|
||||
NewUsedBytesChangedEvent("userID", 1000),
|
||||
}
|
||||
|
||||
for _, event := range events {
|
||||
|
||||
@ -26,7 +26,3 @@ type Restarter interface {
|
||||
AddFlags(flags ...string)
|
||||
Override(exe string)
|
||||
}
|
||||
|
||||
type Locator interface {
|
||||
ProvideSettingsPath() (string, error)
|
||||
}
|
||||
|
||||
@ -26,6 +26,7 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"github.com/ProtonMail/gluon/reporter"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
"github.com/ProtonMail/proton-bridge/v3/pkg/restarter"
|
||||
@ -38,13 +39,23 @@ var skippedFunctions = []string{} //nolint:gochecknoglobals
|
||||
func init() { //nolint:gochecknoinits
|
||||
sentrySyncTransport := sentry.NewHTTPSyncTransport()
|
||||
sentrySyncTransport.Timeout = time.Second * 3
|
||||
appVersion := constants.Version
|
||||
version, _ := semver.NewVersion(appVersion)
|
||||
if version != nil {
|
||||
appVersion = version.Original()
|
||||
}
|
||||
|
||||
if err := sentry.Init(sentry.ClientOptions{
|
||||
Dsn: constants.DSNSentry,
|
||||
Release: constants.Revision,
|
||||
BeforeSend: EnhanceSentryEvent,
|
||||
Transport: sentrySyncTransport,
|
||||
}); err != nil {
|
||||
options := sentry.ClientOptions{
|
||||
Dsn: constants.DSNSentry,
|
||||
Release: constants.AppVersion(appVersion),
|
||||
BeforeSend: EnhanceSentryEvent,
|
||||
Transport: sentrySyncTransport,
|
||||
ServerName: getProtectedHostname(),
|
||||
Environment: constants.BuildEnv,
|
||||
MaxBreadcrumbs: 50,
|
||||
}
|
||||
|
||||
if err := sentry.Init(options); err != nil {
|
||||
logrus.WithError(err).Error("Failed to initialize sentry options")
|
||||
}
|
||||
|
||||
@ -80,10 +91,10 @@ func getProtectedHostname() string {
|
||||
}
|
||||
|
||||
// NewReporter creates new sentry reporter with appName and appVersion to report.
|
||||
func NewReporter(appName, appVersion string, identifier Identifier) *Reporter {
|
||||
func NewReporter(appName string, identifier Identifier) *Reporter {
|
||||
return &Reporter{
|
||||
appName: appName,
|
||||
appVersion: appVersion,
|
||||
appVersion: constants.Revision,
|
||||
identifier: identifier,
|
||||
hostArch: getHostArch(),
|
||||
serverName: getProtectedHostname(),
|
||||
@ -138,12 +149,11 @@ func (r *Reporter) scopedReport(context map[string]interface{}, doReport func())
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"OS": runtime.GOOS,
|
||||
"Client": r.appName,
|
||||
"Version": r.appVersion,
|
||||
"UserAgent": r.identifier.GetUserAgent(),
|
||||
"HostArch": r.hostArch,
|
||||
"server_name": r.serverName,
|
||||
"OS": runtime.GOOS,
|
||||
"Client": r.appName,
|
||||
"Version": r.appVersion,
|
||||
"UserAgent": r.identifier.GetUserAgent(),
|
||||
"HostArch": r.hostArch,
|
||||
}
|
||||
|
||||
sentry.WithScope(func(scope *sentry.Scope) {
|
||||
|
||||
@ -15,11 +15,12 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package grpc
|
||||
package service
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// Config is a structure containing the service configuration data that are exchanged by the gRPC server and client.
|
||||
@ -53,8 +54,8 @@ func (s *Config) _save(path string) error {
|
||||
return json.NewEncoder(f).Encode(s)
|
||||
}
|
||||
|
||||
// load loads a gRPC service configuration from file.
|
||||
func (s *Config) load(path string) error {
|
||||
// Load loads a gRPC service configuration from file.
|
||||
func (s *Config) Load(path string) error {
|
||||
f, err := os.Open(path) //nolint:errcheck,gosec
|
||||
if err != nil {
|
||||
return err
|
||||
@ -64,3 +65,15 @@ func (s *Config) load(path string) error {
|
||||
|
||||
return json.NewDecoder(f).Decode(s)
|
||||
}
|
||||
|
||||
// SaveGRPCServerConfigFile save GRPC configuration file.
|
||||
func SaveGRPCServerConfigFile(locations Locator, config *Config, filename string) (string, error) {
|
||||
settingsPath, err := locations.ProvideSettingsPath()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
configPath := filepath.Join(settingsPath, filename)
|
||||
|
||||
return configPath, config.save(configPath)
|
||||
}
|
||||
@ -15,7 +15,7 @@
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package grpc
|
||||
package service
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
@ -46,11 +46,11 @@ func TestConfig(t *testing.T) {
|
||||
require.NoError(t, conf1.save(tempFilePath))
|
||||
|
||||
conf2 := Config{}
|
||||
require.NoError(t, conf2.load(tempFilePath))
|
||||
require.NoError(t, conf2.Load(tempFilePath))
|
||||
require.Equal(t, conf1, conf2)
|
||||
|
||||
// failure to load
|
||||
require.Error(t, conf2.load(tempFilePath+"_"))
|
||||
require.Error(t, conf2.Load(tempFilePath+"_"))
|
||||
|
||||
// failure to save
|
||||
require.Error(t, conf2.save(filepath.Join(tempDir, "non/existing/folder", tempFileName)))
|
||||
22
internal/service/types.go
Normal file
22
internal/service/types.go
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package service
|
||||
|
||||
type Locator interface {
|
||||
ProvideSettingsPath() (string, error)
|
||||
}
|
||||
@ -142,7 +142,7 @@ func checksum(path string) (hash string) {
|
||||
|
||||
// srcDir including app folder.
|
||||
// dstDir including app folder.
|
||||
func copyRecursively(srcDir, dstDir string) error { //nolint:funlen
|
||||
func copyRecursively(srcDir, dstDir string) error {
|
||||
return filepath.Walk(srcDir, func(srcPath string, srcInfo os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
package user
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -65,6 +66,10 @@ func (user *User) handleAPIEvent(ctx context.Context, event proton.Event) error
|
||||
}
|
||||
}
|
||||
|
||||
if event.UsedSpace != nil {
|
||||
user.handleUsedSpaceChange(*event.UsedSpace)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -409,15 +414,13 @@ func (user *User) handleDeleteLabelEvent(ctx context.Context, event proton.Label
|
||||
}
|
||||
|
||||
// handleMessageEvents handles the given message events.
|
||||
func (user *User) handleMessageEvents(ctx context.Context, messageEvents []proton.MessageEvent) error { //nolint:funlen
|
||||
func (user *User) handleMessageEvents(ctx context.Context, messageEvents []proton.MessageEvent) error {
|
||||
for _, event := range messageEvents {
|
||||
ctx = logging.WithLogrusField(ctx, "messageID", event.ID)
|
||||
|
||||
switch event.Action {
|
||||
case proton.EventCreate:
|
||||
updates, err := user.handleCreateMessageEvent(
|
||||
logging.WithLogrusField(ctx, "action", "create message"),
|
||||
event)
|
||||
updates, err := user.handleCreateMessageEvent(logging.WithLogrusField(ctx, "action", "create message"), event)
|
||||
if err != nil {
|
||||
if rerr := user.reporter.ReportMessageWithContext("Failed to apply create message event", reporter.Context{
|
||||
"error": err,
|
||||
@ -501,7 +504,7 @@ func (user *User) handleMessageEvents(ctx context.Context, messageEvents []proto
|
||||
}
|
||||
|
||||
func (user *User) handleCreateMessageEvent(ctx context.Context, event proton.MessageEvent) ([]imap.Update, error) {
|
||||
full, err := user.client.GetFullMessage(ctx, event.Message.ID)
|
||||
full, err := user.client.GetFullMessage(ctx, event.Message.ID, newProtonAPIScheduler(), proton.NewDefaultAttachmentAllocator())
|
||||
if err != nil {
|
||||
// If the message is not found, it means that it has been deleted before we could fetch it.
|
||||
if apiErr := new(proton.APIError); errors.As(err, &apiErr) && apiErr.Status == http.StatusUnprocessableEntity {
|
||||
@ -520,7 +523,7 @@ func (user *User) handleCreateMessageEvent(ctx context.Context, event proton.Mes
|
||||
|
||||
var update imap.Update
|
||||
if err := withAddrKR(user.apiUser, user.apiAddrs[event.Message.AddressID], user.vault.KeyPass(), func(_, addrKR *crypto.KeyRing) error {
|
||||
res := buildRFC822(user.apiLabels, full, addrKR)
|
||||
res := buildRFC822(user.apiLabels, full, addrKR, new(bytes.Buffer))
|
||||
|
||||
if res.err != nil {
|
||||
user.log.WithError(err).Error("Failed to build RFC822 message")
|
||||
@ -598,7 +601,7 @@ func (user *User) handleUpdateDraftEvent(ctx context.Context, event proton.Messa
|
||||
"subject": logging.Sensitive(event.Message.Subject),
|
||||
}).Info("Handling draft updated event")
|
||||
|
||||
full, err := user.client.GetFullMessage(ctx, event.Message.ID)
|
||||
full, err := user.client.GetFullMessage(ctx, event.Message.ID, newProtonAPIScheduler(), proton.NewDefaultAttachmentAllocator())
|
||||
if err != nil {
|
||||
// If the message is not found, it means that it has been deleted before we could fetch it.
|
||||
if apiErr := new(proton.APIError); errors.As(err, &apiErr) && apiErr.Status == http.StatusUnprocessableEntity {
|
||||
@ -612,7 +615,7 @@ func (user *User) handleUpdateDraftEvent(ctx context.Context, event proton.Messa
|
||||
var update imap.Update
|
||||
|
||||
if err := withAddrKR(user.apiUser, user.apiAddrs[event.Message.AddressID], user.vault.KeyPass(), func(_, addrKR *crypto.KeyRing) error {
|
||||
res := buildRFC822(user.apiLabels, full, addrKR)
|
||||
res := buildRFC822(user.apiLabels, full, addrKR, new(bytes.Buffer))
|
||||
|
||||
if res.err != nil {
|
||||
logrus.WithError(err).Error("Failed to build RFC822 message")
|
||||
@ -653,6 +656,20 @@ func (user *User) handleUpdateDraftEvent(ctx context.Context, event proton.Messa
|
||||
}, user.apiUserLock, user.apiAddrsLock, user.apiLabelsLock, user.updateChLock)
|
||||
}
|
||||
|
||||
func (user *User) handleUsedSpaceChange(usedSpace int) {
|
||||
safe.Lock(func() {
|
||||
if user.apiUser.UsedSpace == usedSpace {
|
||||
return
|
||||
}
|
||||
|
||||
user.apiUser.UsedSpace = usedSpace
|
||||
user.eventCh.Enqueue(events.UsedSpaceChanged{
|
||||
UserID: user.apiUser.ID,
|
||||
UsedSpace: usedSpace,
|
||||
})
|
||||
}, user.apiUserLock)
|
||||
}
|
||||
|
||||
func getMailboxName(label proton.Label) []string {
|
||||
var name []string
|
||||
|
||||
|
||||
@ -264,8 +264,6 @@ func (conn *imapConnector) DeleteMailbox(ctx context.Context, labelID imap.Mailb
|
||||
}
|
||||
|
||||
// CreateMessage creates a new message on the remote.
|
||||
//
|
||||
// nolint:funlen
|
||||
func (conn *imapConnector) CreateMessage(
|
||||
ctx context.Context,
|
||||
mailboxID imap.MailboxID,
|
||||
@ -275,6 +273,10 @@ func (conn *imapConnector) CreateMessage(
|
||||
) (imap.Message, []byte, error) {
|
||||
defer conn.goPollAPIEvents(false)
|
||||
|
||||
if mailboxID == proton.AllMailLabel {
|
||||
return imap.Message{}, nil, fmt.Errorf("not allowed")
|
||||
}
|
||||
|
||||
// Compute the hash of the message (to match it against SMTP messages).
|
||||
hash, err := getMessageHash(literal)
|
||||
if err != nil {
|
||||
@ -288,7 +290,7 @@ func (conn *imapConnector) CreateMessage(
|
||||
conn.log.WithField("messageID", messageID).Warn("Message already sent")
|
||||
|
||||
// Query the server-side message.
|
||||
full, err := conn.client.GetFullMessage(ctx, messageID)
|
||||
full, err := conn.client.GetFullMessage(ctx, messageID, newProtonAPIScheduler(), proton.NewDefaultAttachmentAllocator())
|
||||
if err != nil {
|
||||
return imap.Message{}, nil, fmt.Errorf("failed to fetch message: %w", err)
|
||||
}
|
||||
@ -352,7 +354,7 @@ func (conn *imapConnector) CreateMessage(
|
||||
}
|
||||
|
||||
func (conn *imapConnector) GetMessageLiteral(ctx context.Context, id imap.MessageID) ([]byte, error) {
|
||||
msg, err := conn.client.GetFullMessage(ctx, string(id))
|
||||
msg, err := conn.client.GetFullMessage(ctx, string(id), newProtonAPIScheduler(), proton.NewDefaultAttachmentAllocator())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -378,6 +380,10 @@ func (conn *imapConnector) GetMessageLiteral(ctx context.Context, id imap.Messag
|
||||
func (conn *imapConnector) AddMessagesToMailbox(ctx context.Context, messageIDs []imap.MessageID, mailboxID imap.MailboxID) error {
|
||||
defer conn.goPollAPIEvents(false)
|
||||
|
||||
if isAllMailOrScheduled(mailboxID) {
|
||||
return fmt.Errorf("not allowed")
|
||||
}
|
||||
|
||||
return conn.client.LabelMessages(ctx, mapTo[imap.MessageID, string](messageIDs), string(mailboxID))
|
||||
}
|
||||
|
||||
@ -385,6 +391,10 @@ func (conn *imapConnector) AddMessagesToMailbox(ctx context.Context, messageIDs
|
||||
func (conn *imapConnector) RemoveMessagesFromMailbox(ctx context.Context, messageIDs []imap.MessageID, mailboxID imap.MailboxID) error {
|
||||
defer conn.goPollAPIEvents(false)
|
||||
|
||||
if isAllMailOrScheduled(mailboxID) {
|
||||
return fmt.Errorf("not allowed")
|
||||
}
|
||||
|
||||
if err := conn.client.UnlabelMessages(ctx, mapTo[imap.MessageID, string](messageIDs), string(mailboxID)); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -429,7 +439,9 @@ func (conn *imapConnector) MoveMessages(ctx context.Context, messageIDs []imap.M
|
||||
defer conn.goPollAPIEvents(false)
|
||||
|
||||
if (labelFromID == proton.InboxLabel && labelToID == proton.SentLabel) ||
|
||||
(labelFromID == proton.SentLabel && labelToID == proton.InboxLabel) {
|
||||
(labelFromID == proton.SentLabel && labelToID == proton.InboxLabel) ||
|
||||
isAllMailOrScheduled(labelFromID) ||
|
||||
isAllMailOrScheduled(labelToID) {
|
||||
return false, fmt.Errorf("not allowed")
|
||||
}
|
||||
|
||||
@ -493,19 +505,20 @@ func (conn *imapConnector) GetUpdates() <-chan imap.Update {
|
||||
}, conn.updateChLock)
|
||||
}
|
||||
|
||||
// GetUIDValidity returns the default UID validity for this user.
|
||||
func (conn *imapConnector) GetUIDValidity() imap.UID {
|
||||
return conn.vault.GetUIDValidity(conn.addrID)
|
||||
}
|
||||
// GetMailboxVisibility returns the visibility of a mailbox over IMAP.
|
||||
func (conn *imapConnector) GetMailboxVisibility(_ context.Context, mailboxID imap.MailboxID) imap.MailboxVisibility {
|
||||
switch mailboxID {
|
||||
case proton.AllMailLabel:
|
||||
if atomic.LoadUint32(&conn.showAllMail) != 0 {
|
||||
return imap.Visible
|
||||
}
|
||||
return imap.Hidden
|
||||
|
||||
// SetUIDValidity sets the default UID validity for this user.
|
||||
func (conn *imapConnector) SetUIDValidity(validity imap.UID) error {
|
||||
return conn.vault.SetUIDValidity(conn.addrID, validity)
|
||||
}
|
||||
|
||||
// IsMailboxVisible returns whether this mailbox should be visible over IMAP.
|
||||
func (conn *imapConnector) IsMailboxVisible(_ context.Context, mailboxID imap.MailboxID) bool {
|
||||
return atomic.LoadUint32(&conn.showAllMail) != 0 || mailboxID != proton.AllMailLabel
|
||||
case proton.AllScheduledLabel:
|
||||
return imap.HiddenIfEmpty
|
||||
default:
|
||||
return imap.Visible
|
||||
}
|
||||
}
|
||||
|
||||
// Close the connector will no longer be used and all resources should be closed/released.
|
||||
@ -536,7 +549,7 @@ func (conn *imapConnector) importMessage(
|
||||
|
||||
messageID = msg.ID
|
||||
} else {
|
||||
res, err := stream.Collect(ctx, conn.client.ImportMessages(ctx, addrKR, 1, 1, []proton.ImportReq{{
|
||||
str, err := conn.client.ImportMessages(ctx, addrKR, 1, 1, []proton.ImportReq{{
|
||||
Metadata: proton.ImportMetadata{
|
||||
AddressID: conn.addrID,
|
||||
LabelIDs: labelIDs,
|
||||
@ -544,7 +557,12 @@ func (conn *imapConnector) importMessage(
|
||||
Flags: flags,
|
||||
},
|
||||
Message: literal,
|
||||
}}...))
|
||||
}}...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to prepare message for import: %w", err)
|
||||
}
|
||||
|
||||
res, err := stream.Collect(ctx, str)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to import message: %w", err)
|
||||
}
|
||||
@ -554,7 +572,7 @@ func (conn *imapConnector) importMessage(
|
||||
|
||||
var err error
|
||||
|
||||
if full, err = conn.client.GetFullMessage(ctx, messageID); err != nil {
|
||||
if full, err = conn.client.GetFullMessage(ctx, messageID, newProtonAPIScheduler(), proton.NewDefaultAttachmentAllocator()); err != nil {
|
||||
return fmt.Errorf("failed to fetch message: %w", err)
|
||||
}
|
||||
|
||||
@ -601,7 +619,7 @@ func toIMAPMessage(message proton.MessageMetadata) imap.Message {
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *imapConnector) createDraft(ctx context.Context, literal []byte, addrKR *crypto.KeyRing, sender proton.Address) (proton.Message, error) { //nolint:funlen
|
||||
func (conn *imapConnector) createDraft(ctx context.Context, literal []byte, addrKR *crypto.KeyRing, sender proton.Address) (proton.Message, error) {
|
||||
// Create a new message parser from the reader.
|
||||
parser, err := parser.New(bytes.NewReader(literal))
|
||||
if err != nil {
|
||||
@ -673,3 +691,7 @@ func toIMAPMailbox(label proton.Label, flags, permFlags, attrs imap.FlagSet) ima
|
||||
Attributes: attrs,
|
||||
}
|
||||
}
|
||||
|
||||
func isAllMailOrScheduled(mailboxID imap.MailboxID) bool {
|
||||
return (mailboxID == proton.AllMailLabel) || (mailboxID == proton.AllScheduledLabel)
|
||||
}
|
||||
|
||||
@ -218,8 +218,6 @@ func (h *sendRecorder) getWaitCh(hash string) (<-chan struct{}, bool) {
|
||||
// - the Content-Type header of each (leaf) part,
|
||||
// - the Content-Disposition header of each (leaf) part,
|
||||
// - the (decoded) body of each part.
|
||||
//
|
||||
// nolint:funlen
|
||||
func getMessageHash(b []byte) (string, error) {
|
||||
section := rfc822.Parse(b)
|
||||
|
||||
|
||||
@ -47,8 +47,6 @@ import (
|
||||
)
|
||||
|
||||
// sendMail sends an email from the given address to the given recipients.
|
||||
//
|
||||
// nolint:funlen
|
||||
func (user *User) sendMail(authID string, from string, to []string, r io.Reader) error {
|
||||
return safe.RLockRet(func() error {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -165,7 +163,7 @@ func (user *User) sendMail(authID string, from string, to []string, r io.Reader)
|
||||
}
|
||||
|
||||
// sendWithKey sends the message with the given address key.
|
||||
func sendWithKey( //nolint:funlen
|
||||
func sendWithKey(
|
||||
ctx context.Context,
|
||||
client *proton.Client,
|
||||
sentry reporter.Reporter,
|
||||
@ -247,7 +245,7 @@ func sendWithKey( //nolint:funlen
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func getParentID( //nolint:funlen
|
||||
func getParentID(
|
||||
ctx context.Context,
|
||||
client *proton.Client,
|
||||
authAddrID string,
|
||||
@ -375,7 +373,6 @@ func createDraft(
|
||||
})
|
||||
}
|
||||
|
||||
// nolint:funlen
|
||||
func createAttachments(
|
||||
ctx context.Context,
|
||||
client *proton.Client,
|
||||
@ -468,12 +465,12 @@ func getRecipients(
|
||||
prefs, err := parallel.MapContext(ctx, runtime.NumCPU(), addresses, func(ctx context.Context, recipient string) (proton.SendPreferences, error) {
|
||||
pubKeys, recType, err := client.GetPublicKeys(ctx, recipient)
|
||||
if err != nil {
|
||||
return proton.SendPreferences{}, fmt.Errorf("failed to get public keys: %w", err)
|
||||
return proton.SendPreferences{}, fmt.Errorf("failed to get public key for %v: %w", recipient, err)
|
||||
}
|
||||
|
||||
contactSettings, err := getContactSettings(ctx, client, userKR, recipient)
|
||||
if err != nil {
|
||||
return proton.SendPreferences{}, fmt.Errorf("failed to get contact settings: %w", err)
|
||||
return proton.SendPreferences{}, fmt.Errorf("failed to get contact settings for %v: %w", recipient, err)
|
||||
}
|
||||
|
||||
return buildSendPrefs(contactSettings, settings, pubKeys, draft.MIMEType, recType == proton.RecipientTypeInternal)
|
||||
|
||||
@ -18,6 +18,7 @@
|
||||
package user
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime"
|
||||
@ -25,6 +26,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/gluon/imap"
|
||||
"github.com/ProtonMail/gluon/logging"
|
||||
"github.com/ProtonMail/gluon/queue"
|
||||
"github.com/ProtonMail/gluon/reporter"
|
||||
"github.com/ProtonMail/go-proton-api"
|
||||
@ -35,17 +37,37 @@ import (
|
||||
"github.com/bradenaw/juniper/parallel"
|
||||
"github.com/bradenaw/juniper/xslices"
|
||||
"github.com/google/uuid"
|
||||
"github.com/pbnjay/memory"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
maxUpdateSize = 1 << 27 // 128 MiB
|
||||
maxBatchSize = 1 << 8 // 256
|
||||
)
|
||||
// syncSystemLabels ensures that system labels are all known to gluon.
|
||||
func (user *User) syncSystemLabels(ctx context.Context) error {
|
||||
return safe.RLockRet(func() error {
|
||||
var updates []imap.Update
|
||||
|
||||
// doSync begins syncing the users data.
|
||||
for _, label := range xslices.Filter(maps.Values(user.apiLabels), func(label proton.Label) bool { return label.Type == proton.LabelTypeSystem }) {
|
||||
if !wantLabel(label) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, updateCh := range xslices.Unique(maps.Values(user.updateCh)) {
|
||||
update := newSystemMailboxCreatedUpdate(imap.MailboxID(label.ID), label.Name)
|
||||
updateCh.Enqueue(update)
|
||||
updates = append(updates, update)
|
||||
}
|
||||
}
|
||||
if err := waitOnIMAPUpdates(ctx, updates); err != nil {
|
||||
return fmt.Errorf("could not sync system labels: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, user.apiUserLock, user.apiAddrsLock, user.apiLabelsLock, user.updateChLock)
|
||||
}
|
||||
|
||||
// doSync begins syncing the user's data.
|
||||
// It first ensures the latest event ID is known; if not, it fetches it.
|
||||
// It sends a SyncStarted event and then either SyncFinished or SyncFailed
|
||||
// depending on whether the sync was successful.
|
||||
@ -89,7 +111,6 @@ func (user *User) doSync(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// nolint:funlen
|
||||
func (user *User) sync(ctx context.Context) error {
|
||||
return safe.RLockRet(func() error {
|
||||
return withAddrKRs(user.apiUser, user.apiAddrs, user.vault.KeyPass(), func(_ *crypto.KeyRing, addrKRs map[string]*crypto.KeyRing) error {
|
||||
@ -143,7 +164,7 @@ func (user *User) sync(ctx context.Context) error {
|
||||
addrKRs,
|
||||
user.updateCh,
|
||||
user.eventCh,
|
||||
user.syncWorkers,
|
||||
user.maxSyncMemory,
|
||||
); err != nil {
|
||||
return fmt.Errorf("failed to sync messages: %w", err)
|
||||
}
|
||||
@ -212,7 +233,15 @@ func syncLabels(ctx context.Context, apiLabels map[string]proton.Label, updateCh
|
||||
return nil
|
||||
}
|
||||
|
||||
// nolint:funlen
|
||||
const Kilobyte = uint64(1024)
|
||||
const Megabyte = 1024 * Kilobyte
|
||||
const Gigabyte = 1024 * Megabyte
|
||||
|
||||
func toMB(v uint64) float64 {
|
||||
return float64(v) / float64(Megabyte)
|
||||
}
|
||||
|
||||
// nolint:gocyclo
|
||||
func syncMessages(
|
||||
ctx context.Context,
|
||||
userID string,
|
||||
@ -224,7 +253,7 @@ func syncMessages(
|
||||
addrKRs map[string]*crypto.KeyRing,
|
||||
updateCh map[string]*queue.QueuedChannel[imap.Update],
|
||||
eventCh *queue.QueuedChannel[events.Event],
|
||||
syncWorkers int,
|
||||
maxSyncMemory uint64,
|
||||
) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
@ -235,78 +264,330 @@ func syncMessages(
|
||||
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"messages": len(messageIDs),
|
||||
"workers": syncWorkers,
|
||||
"numCPU": runtime.NumCPU(),
|
||||
}).Info("Starting message sync")
|
||||
|
||||
// Create the flushers, one per update channel.
|
||||
flushers := make(map[string]*flusher, len(updateCh))
|
||||
|
||||
for addrID, updateCh := range updateCh {
|
||||
flushers[addrID] = newFlusher(updateCh, maxUpdateSize)
|
||||
}
|
||||
|
||||
// Create a reporter to report sync progress updates.
|
||||
syncReporter := newSyncReporter(userID, eventCh, len(messageIDs), time.Second)
|
||||
defer syncReporter.done()
|
||||
|
||||
type flushUpdate struct {
|
||||
messageID string
|
||||
pushedUpdates []imap.Update
|
||||
batchLen int
|
||||
// Expected mem usage for this whole process should be the sum of MaxMessageBuildingMem and MaxDownloadRequestMem
|
||||
// times x due to pipeline and all additional memory used by network requests and compression+io.
|
||||
|
||||
// There's no point in using more than 128MB of download data per stage, after that we reach a point of diminishing
|
||||
// returns as we can't keep the pipeline fed fast enough.
|
||||
const MaxDownloadRequestMem = 128 * Megabyte
|
||||
|
||||
// Any lower than this and we may fail to download messages.
|
||||
const MinDownloadRequestMem = 40 * Megabyte
|
||||
|
||||
// This value can be increased to your hearts content. The more system memory the user has, the more messages
|
||||
// we can build in parallel.
|
||||
const MaxMessageBuildingMem = 128 * Megabyte
|
||||
const MinMessageBuildingMem = 64 * Megabyte
|
||||
|
||||
// Maximum recommend value for parallel downloads by the API team.
|
||||
const maxParallelDownloads = 20
|
||||
|
||||
totalMemory := memory.TotalMemory()
|
||||
|
||||
if maxSyncMemory >= totalMemory/2 {
|
||||
logrus.Warnf("Requested max sync memory of %v MB is greater than half of system memory (%v MB), forcing to half of system memory",
|
||||
maxSyncMemory, toMB(totalMemory/2))
|
||||
maxSyncMemory = totalMemory / 2
|
||||
}
|
||||
|
||||
if maxSyncMemory < 800*Megabyte {
|
||||
logrus.Warnf("Requested max sync memory of %v MB, but minimum recommended is 800 MB, forcing max syncMemory to 800MB", toMB(maxSyncMemory))
|
||||
maxSyncMemory = 800 * Megabyte
|
||||
}
|
||||
|
||||
logrus.Debugf("Total System Memory: %v", toMB(totalMemory))
|
||||
|
||||
syncMaxDownloadRequestMem := MaxDownloadRequestMem
|
||||
syncMaxMessageBuildingMem := MaxMessageBuildingMem
|
||||
|
||||
// If less than 2GB available try and limit max memory to 512 MB
|
||||
switch {
|
||||
case maxSyncMemory < 2*Gigabyte:
|
||||
if maxSyncMemory < 800*Megabyte {
|
||||
logrus.Warnf("System has less than 800MB of memory, you may experience issues sycing large mailboxes")
|
||||
}
|
||||
syncMaxDownloadRequestMem = MinDownloadRequestMem
|
||||
syncMaxMessageBuildingMem = MinMessageBuildingMem
|
||||
case maxSyncMemory == 2*Gigabyte:
|
||||
// Increasing the max download capacity has very little effect on sync speed. We could increase the download
|
||||
// memory but the user would see less sync notifications. A smaller value here leads to more frequent
|
||||
// updates. Additionally, most of ot sync time is spent in the message building.
|
||||
syncMaxDownloadRequestMem = MaxDownloadRequestMem
|
||||
// Currently limited so that if a user has multiple accounts active it also doesn't cause excessive memory usage.
|
||||
syncMaxMessageBuildingMem = MaxMessageBuildingMem
|
||||
default:
|
||||
// Divide by 8 as download stage and build stage will use aprox. 4x the specified memory.
|
||||
remainingMemory := (maxSyncMemory - 2*Gigabyte) / 8
|
||||
syncMaxDownloadRequestMem = MaxDownloadRequestMem + remainingMemory
|
||||
syncMaxMessageBuildingMem = MaxMessageBuildingMem + remainingMemory
|
||||
}
|
||||
|
||||
logrus.Debugf("Max memory usage for sync Download=%vMB Building=%vMB Predicted Max Total=%vMB",
|
||||
toMB(syncMaxDownloadRequestMem),
|
||||
toMB(syncMaxMessageBuildingMem),
|
||||
toMB((syncMaxMessageBuildingMem*4)+(syncMaxDownloadRequestMem*4)),
|
||||
)
|
||||
|
||||
type flushUpdate struct {
|
||||
messageID string
|
||||
err error
|
||||
batchLen int
|
||||
}
|
||||
|
||||
type downloadRequest struct {
|
||||
ids []string
|
||||
expectedSize uint64
|
||||
err error
|
||||
}
|
||||
|
||||
type downloadedMessageBatch struct {
|
||||
batch []proton.FullMessage
|
||||
}
|
||||
|
||||
type builtMessageBatch struct {
|
||||
batch []*buildRes
|
||||
}
|
||||
|
||||
downloadCh := make(chan downloadRequest)
|
||||
|
||||
buildCh := make(chan downloadedMessageBatch)
|
||||
|
||||
// The higher this value, the longer we can continue our download iteration before being blocked on channel writes
|
||||
// to the update flushing goroutine.
|
||||
flushCh := make(chan []*buildRes, 2)
|
||||
flushCh := make(chan builtMessageBatch)
|
||||
|
||||
// Allow up to 4 batched wait requests.
|
||||
flushUpdateCh := make(chan flushUpdate, 4)
|
||||
flushUpdateCh := make(chan flushUpdate)
|
||||
|
||||
errorCh := make(chan error, syncWorkers)
|
||||
errorCh := make(chan error, maxParallelDownloads*4)
|
||||
|
||||
// Go routine in charge of downloading message metadata
|
||||
logging.GoAnnotated(ctx, func(ctx context.Context) {
|
||||
defer close(downloadCh)
|
||||
const MetadataDataPageSize = 150
|
||||
|
||||
var downloadReq downloadRequest
|
||||
downloadReq.ids = make([]string, 0, MetadataDataPageSize)
|
||||
|
||||
metadataChunks := xslices.Chunk(messageIDs, MetadataDataPageSize)
|
||||
for i, metadataChunk := range metadataChunks {
|
||||
logrus.Debugf("Metadata Request (%v of %v), previous: %v", i, len(metadataChunks), len(downloadReq.ids))
|
||||
metadata, err := client.GetMessageMetadataPage(ctx, 0, len(metadataChunk), proton.MessageFilter{ID: metadataChunk})
|
||||
if err != nil {
|
||||
downloadReq.err = err
|
||||
select {
|
||||
case downloadCh <- downloadReq:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Build look up table so that messages are processed in the same order.
|
||||
metadataMap := make(map[string]int, len(metadata))
|
||||
for i, v := range metadata {
|
||||
metadataMap[v.ID] = i
|
||||
}
|
||||
|
||||
for i, id := range metadataChunk {
|
||||
m := &metadata[metadataMap[id]]
|
||||
nextSize := downloadReq.expectedSize + uint64(m.Size)
|
||||
if nextSize >= syncMaxDownloadRequestMem || len(downloadReq.ids) >= 256 {
|
||||
logrus.Debugf("Download Request Sent at %v of %v", i, len(metadata))
|
||||
select {
|
||||
case downloadCh <- downloadReq:
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
downloadReq.expectedSize = 0
|
||||
downloadReq.ids = make([]string, 0, MetadataDataPageSize)
|
||||
nextSize = uint64(m.Size)
|
||||
}
|
||||
downloadReq.ids = append(downloadReq.ids, id)
|
||||
downloadReq.expectedSize = nextSize
|
||||
}
|
||||
}
|
||||
|
||||
if len(downloadReq.ids) != 0 {
|
||||
logrus.Debugf("Sending remaining download request")
|
||||
select {
|
||||
case downloadCh <- downloadReq:
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}, logging.Labels{"sync-stage": "meta-data"})
|
||||
|
||||
// Goroutine in charge of downloading and building messages in maxBatchSize batches.
|
||||
go func() {
|
||||
defer close(flushCh)
|
||||
logging.GoAnnotated(ctx, func(ctx context.Context) {
|
||||
defer close(buildCh)
|
||||
defer close(errorCh)
|
||||
defer func() {
|
||||
logrus.Debugf("sync downloader exit")
|
||||
}()
|
||||
|
||||
attachmentDownloader := newAttachmentDownloader(ctx, client, maxParallelDownloads)
|
||||
defer attachmentDownloader.close()
|
||||
|
||||
for request := range downloadCh {
|
||||
logrus.Debugf("Download request: %v MB:%v", len(request.ids), toMB(request.expectedSize))
|
||||
if request.err != nil {
|
||||
errorCh <- request.err
|
||||
return
|
||||
}
|
||||
|
||||
for _, batch := range xslices.Chunk(messageIDs, maxBatchSize) {
|
||||
if ctx.Err() != nil {
|
||||
errorCh <- ctx.Err()
|
||||
return
|
||||
}
|
||||
|
||||
result, err := parallel.MapContext(ctx, syncWorkers, batch, func(ctx context.Context, id string) (*buildRes, error) {
|
||||
msg, err := client.GetFullMessage(ctx, id)
|
||||
result, err := parallel.MapContext(ctx, maxParallelDownloads, request.ids, func(ctx context.Context, id string) (proton.FullMessage, error) {
|
||||
var result proton.FullMessage
|
||||
|
||||
msg, err := client.GetMessage(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return proton.FullMessage{}, err
|
||||
}
|
||||
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
attachments, err := attachmentDownloader.getAttachments(ctx, msg.Attachments)
|
||||
if err != nil {
|
||||
return proton.FullMessage{}, err
|
||||
}
|
||||
|
||||
return buildRFC822(apiLabels, msg, addrKRs[msg.AddressID]), nil
|
||||
result.Message = msg
|
||||
result.AttData = attachments
|
||||
|
||||
return result, nil
|
||||
})
|
||||
if err != nil {
|
||||
errorCh <- err
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case buildCh <- downloadedMessageBatch{
|
||||
batch: result,
|
||||
}:
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}, logging.Labels{"sync-stage": "download"})
|
||||
|
||||
// Goroutine which builds messages after they have been downloaded
|
||||
logging.GoAnnotated(ctx, func(ctx context.Context) {
|
||||
defer close(flushCh)
|
||||
defer func() {
|
||||
logrus.Debugf("sync builder exit")
|
||||
}()
|
||||
|
||||
maxMessagesInParallel := runtime.NumCPU()
|
||||
|
||||
for buildBatch := range buildCh {
|
||||
if ctx.Err() != nil {
|
||||
errorCh <- ctx.Err()
|
||||
return
|
||||
}
|
||||
|
||||
flushCh <- result
|
||||
var expectedMemUsage uint64
|
||||
var chunks [][]proton.FullMessage
|
||||
|
||||
{
|
||||
var lastIndex int
|
||||
var index int
|
||||
for _, v := range buildBatch.batch {
|
||||
var dataSize uint64
|
||||
for _, a := range v.Attachments {
|
||||
dataSize += uint64(a.Size)
|
||||
}
|
||||
|
||||
// 2x increase for attachment due to extra memory needed for decrypting and writing
|
||||
// in memory buffer.
|
||||
dataSize *= 2
|
||||
dataSize += uint64(len(v.Body))
|
||||
|
||||
nextMemSize := expectedMemUsage + dataSize
|
||||
if nextMemSize >= syncMaxMessageBuildingMem {
|
||||
chunks = append(chunks, buildBatch.batch[lastIndex:index])
|
||||
lastIndex = index
|
||||
expectedMemUsage = dataSize
|
||||
} else {
|
||||
expectedMemUsage = nextMemSize
|
||||
}
|
||||
|
||||
index++
|
||||
}
|
||||
|
||||
if index < len(buildBatch.batch) {
|
||||
chunks = append(chunks, buildBatch.batch[index:])
|
||||
} else if index == len(buildBatch.batch) && len(chunks) == 0 {
|
||||
chunks = [][]proton.FullMessage{buildBatch.batch}
|
||||
}
|
||||
}
|
||||
|
||||
for index, chunk := range chunks {
|
||||
logrus.Debugf("Build request: %v of %v count=%v", index, len(chunks), len(chunk))
|
||||
|
||||
result, err := parallel.MapContext(ctx, maxMessagesInParallel, chunk, func(ctx context.Context, msg proton.FullMessage) (*buildRes, error) {
|
||||
return buildRFC822(apiLabels, msg, addrKRs[msg.AddressID], new(bytes.Buffer)), nil
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case flushCh <- builtMessageBatch{result}:
|
||||
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}, logging.Labels{"sync-stage": "builder"})
|
||||
|
||||
// Goroutine which converts the messages into updates and builds a waitable structure for progress tracking.
|
||||
go func() {
|
||||
logging.GoAnnotated(ctx, func(ctx context.Context) {
|
||||
defer close(flushUpdateCh)
|
||||
for batch := range flushCh {
|
||||
for _, res := range batch {
|
||||
defer func() {
|
||||
logrus.Debugf("sync flush exit")
|
||||
}()
|
||||
|
||||
type updateTargetInfo struct {
|
||||
queueIndex int
|
||||
ch *queue.QueuedChannel[imap.Update]
|
||||
}
|
||||
|
||||
pendingUpdates := make([][]*imap.MessageCreated, len(updateCh))
|
||||
addressToIndex := make(map[string]updateTargetInfo)
|
||||
|
||||
{
|
||||
i := 0
|
||||
for addrID, updateCh := range updateCh {
|
||||
addressToIndex[addrID] = updateTargetInfo{
|
||||
ch: updateCh,
|
||||
queueIndex: i,
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
for downloadBatch := range flushCh {
|
||||
logrus.Debugf("Flush batch: %v", len(downloadBatch.batch))
|
||||
for _, res := range downloadBatch.batch {
|
||||
if res.err != nil {
|
||||
if err := vault.AddFailedMessageID(res.messageID); err != nil {
|
||||
logrus.WithError(err).Error("Failed to add failed message ID")
|
||||
@ -327,31 +608,38 @@ func syncMessages(
|
||||
}
|
||||
}
|
||||
|
||||
flushers[res.addressID].push(res.update)
|
||||
targetInfo := addressToIndex[res.addressID]
|
||||
pendingUpdates[targetInfo.queueIndex] = append(pendingUpdates[targetInfo.queueIndex], res.update)
|
||||
}
|
||||
|
||||
var pushedUpdates []imap.Update
|
||||
for _, flusher := range flushers {
|
||||
flusher.flush()
|
||||
pushedUpdates = append(pushedUpdates, flusher.collectPushedUpdates()...)
|
||||
for _, info := range addressToIndex {
|
||||
up := imap.NewMessagesCreated(true, pendingUpdates[info.queueIndex]...)
|
||||
info.ch.Enqueue(up)
|
||||
|
||||
err, ok := up.WaitContext(ctx)
|
||||
if ok && err != nil {
|
||||
flushUpdateCh <- flushUpdate{
|
||||
err: fmt.Errorf("failed to apply sync update to gluon %v: %w", up.String(), err),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
pendingUpdates[info.queueIndex] = pendingUpdates[info.queueIndex][:0]
|
||||
}
|
||||
|
||||
flushUpdateCh <- flushUpdate{
|
||||
messageID: batch[0].messageID,
|
||||
pushedUpdates: pushedUpdates,
|
||||
batchLen: len(batch),
|
||||
select {
|
||||
case flushUpdateCh <- flushUpdate{
|
||||
messageID: downloadBatch.batch[0].messageID,
|
||||
err: nil,
|
||||
batchLen: len(downloadBatch.batch),
|
||||
}:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}, logging.Labels{"sync-stage": "flush"})
|
||||
|
||||
for flushUpdate := range flushUpdateCh {
|
||||
for _, up := range flushUpdate.pushedUpdates {
|
||||
err, ok := up.WaitContext(ctx)
|
||||
if ok && err != nil {
|
||||
return fmt.Errorf("failed to apply sync update to gluon %v: %w", up.String(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := vault.SetLastMessageID(flushUpdate.messageID); err != nil {
|
||||
return fmt.Errorf("failed to set last synced message ID: %w", err)
|
||||
}
|
||||
@ -394,6 +682,9 @@ func newSystemMailboxCreatedUpdate(labelID imap.MailboxID, labelName string) *im
|
||||
|
||||
case proton.StarredLabel:
|
||||
attrs = attrs.Add(imap.AttrFlagged)
|
||||
|
||||
case proton.AllScheduledLabel:
|
||||
labelName = "Scheduled" // API actual name is "All Scheduled"
|
||||
}
|
||||
|
||||
return imap.NewMailboxCreated(imap.Mailbox{
|
||||
@ -456,6 +747,9 @@ func wantLabel(label proton.Label) bool {
|
||||
case proton.StarredLabel:
|
||||
return true
|
||||
|
||||
case proton.AllScheduledLabel:
|
||||
return true
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
@ -471,3 +765,90 @@ func wantLabels(apiLabels map[string]proton.Label, labelIDs []string) []string {
|
||||
return wantLabel(apiLabel)
|
||||
})
|
||||
}
|
||||
|
||||
type attachmentResult struct {
|
||||
attachment []byte
|
||||
err error
|
||||
}
|
||||
|
||||
type attachmentJob struct {
|
||||
id string
|
||||
size int64
|
||||
result chan attachmentResult
|
||||
}
|
||||
|
||||
type attachmentDownloader struct {
|
||||
workerCh chan attachmentJob
|
||||
cancel context.CancelFunc
|
||||
}
|
||||
|
||||
func attachmentWorker(ctx context.Context, client *proton.Client, work <-chan attachmentJob) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case job, ok := <-work:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
var b bytes.Buffer
|
||||
b.Grow(int(job.size))
|
||||
err := client.GetAttachmentInto(ctx, job.id, &b)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
close(job.result)
|
||||
return
|
||||
case job.result <- attachmentResult{attachment: b.Bytes(), err: err}:
|
||||
close(job.result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func newAttachmentDownloader(ctx context.Context, client *proton.Client, workerCount int) *attachmentDownloader {
|
||||
workerCh := make(chan attachmentJob, (workerCount+2)*workerCount)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
for i := 0; i < workerCount; i++ {
|
||||
workerCh = make(chan attachmentJob)
|
||||
logging.GoAnnotated(ctx, func(ctx context.Context) { attachmentWorker(ctx, client, workerCh) }, logging.Labels{
|
||||
"sync": fmt.Sprintf("att-downloader %v", i),
|
||||
})
|
||||
}
|
||||
|
||||
return &attachmentDownloader{
|
||||
workerCh: workerCh,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
func (a *attachmentDownloader) getAttachments(ctx context.Context, attachments []proton.Attachment) ([][]byte, error) {
|
||||
resultChs := make([]chan attachmentResult, len(attachments))
|
||||
for i, id := range attachments {
|
||||
resultChs[i] = make(chan attachmentResult, 1)
|
||||
select {
|
||||
case a.workerCh <- attachmentJob{id: id.ID, result: resultChs[i], size: id.Size}:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
result := make([][]byte, len(attachments))
|
||||
var err error
|
||||
for i := 0; i < len(attachments); i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case r := <-resultChs[i]:
|
||||
if r.err != nil {
|
||||
err = fmt.Errorf("failed to get attachment %v: %w", attachments[i], r.err)
|
||||
}
|
||||
result[i] = r.attachment
|
||||
}
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
func (a *attachmentDownloader) close() {
|
||||
a.cancel()
|
||||
}
|
||||
|
||||
@ -48,16 +48,18 @@ func defaultJobOpts() message.JobOptions {
|
||||
}
|
||||
}
|
||||
|
||||
func buildRFC822(apiLabels map[string]proton.Label, full proton.FullMessage, addrKR *crypto.KeyRing) *buildRes {
|
||||
func buildRFC822(apiLabels map[string]proton.Label, full proton.FullMessage, addrKR *crypto.KeyRing, buffer *bytes.Buffer) *buildRes {
|
||||
var (
|
||||
update *imap.MessageCreated
|
||||
err error
|
||||
)
|
||||
|
||||
if literal, buildErr := message.BuildRFC822(addrKR, full.Message, full.AttData, defaultJobOpts()); buildErr != nil {
|
||||
buffer.Grow(full.Size)
|
||||
|
||||
if buildErr := message.BuildRFC822Into(addrKR, full.Message, full.AttData, defaultJobOpts(), buffer); buildErr != nil {
|
||||
update = newMessageCreatedFailedUpdate(apiLabels, full.MessageMetadata, buildErr)
|
||||
err = buildErr
|
||||
} else if created, parseErr := newMessageCreatedUpdate(apiLabels, full.MessageMetadata, literal); parseErr != nil {
|
||||
} else if created, parseErr := newMessageCreatedUpdate(apiLabels, full.MessageMetadata, buffer.Bytes()); parseErr != nil {
|
||||
update = newMessageCreatedFailedUpdate(apiLabels, full.MessageMetadata, parseErr)
|
||||
err = parseErr
|
||||
} else {
|
||||
|
||||
@ -1,63 +0,0 @@
|
||||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package user
|
||||
|
||||
import (
|
||||
"github.com/ProtonMail/gluon/imap"
|
||||
"github.com/ProtonMail/gluon/queue"
|
||||
)
|
||||
|
||||
type flusher struct {
|
||||
updateCh *queue.QueuedChannel[imap.Update]
|
||||
updates []*imap.MessageCreated
|
||||
pushedUpdates []imap.Update
|
||||
|
||||
maxUpdateSize int
|
||||
curChunkSize int
|
||||
}
|
||||
|
||||
func newFlusher(updateCh *queue.QueuedChannel[imap.Update], maxUpdateSize int) *flusher {
|
||||
return &flusher{
|
||||
updateCh: updateCh,
|
||||
maxUpdateSize: maxUpdateSize,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *flusher) push(update *imap.MessageCreated) {
|
||||
f.updates = append(f.updates, update)
|
||||
|
||||
if f.curChunkSize += len(update.Literal); f.curChunkSize >= f.maxUpdateSize {
|
||||
f.flush()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *flusher) flush() {
|
||||
if len(f.updates) > 0 {
|
||||
update := imap.NewMessagesCreated(true, f.updates...)
|
||||
f.updateCh.Enqueue(update)
|
||||
f.updates = nil
|
||||
f.curChunkSize = 0
|
||||
f.pushedUpdates = append(f.pushedUpdates, update)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *flusher) collectPushedUpdates() []imap.Update {
|
||||
updates := f.pushedUpdates
|
||||
f.pushedUpdates = nil
|
||||
return updates
|
||||
}
|
||||
@ -20,6 +20,7 @@ package user
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/ProtonMail/go-proton-api"
|
||||
@ -91,3 +92,7 @@ func sortSlice[Item any](items []Item, less func(Item, Item) bool) []Item {
|
||||
|
||||
return sorted
|
||||
}
|
||||
|
||||
func newProtonAPIScheduler() proton.Scheduler {
|
||||
return proton.NewParallelScheduler(runtime.NumCPU() / 2)
|
||||
}
|
||||
|
||||
@ -23,7 +23,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -86,13 +85,12 @@ type User struct {
|
||||
pollAPIEventsCh chan chan struct{}
|
||||
goPollAPIEvents func(wait bool)
|
||||
|
||||
syncWorkers int
|
||||
showAllMail uint32
|
||||
|
||||
maxSyncMemory uint64
|
||||
}
|
||||
|
||||
// New returns a new user.
|
||||
//
|
||||
// nolint:funlen
|
||||
func New(
|
||||
ctx context.Context,
|
||||
encVault *vault.User,
|
||||
@ -100,9 +98,9 @@ func New(
|
||||
reporter reporter.Reporter,
|
||||
apiUser proton.User,
|
||||
crashHandler async.PanicHandler,
|
||||
syncWorkers int,
|
||||
showAllMail bool,
|
||||
) (*User, error) { //nolint:funlen
|
||||
maxSyncMemory uint64,
|
||||
) (*User, error) {
|
||||
logrus.WithField("userID", apiUser.ID).Info("Creating new user")
|
||||
|
||||
// Get the user's API addresses.
|
||||
@ -144,8 +142,9 @@ func New(
|
||||
tasks: async.NewGroup(context.Background(), crashHandler),
|
||||
pollAPIEventsCh: make(chan chan struct{}),
|
||||
|
||||
syncWorkers: syncWorkers,
|
||||
showAllMail: b32(showAllMail),
|
||||
|
||||
maxSyncMemory: maxSyncMemory,
|
||||
}
|
||||
|
||||
// Initialize the user's update channels for its current address mode.
|
||||
@ -191,7 +190,12 @@ func New(
|
||||
// Sync the user.
|
||||
user.syncAbort.Do(ctx, func(ctx context.Context) {
|
||||
if user.vault.SyncStatus().IsComplete() {
|
||||
user.log.Info("Sync already complete, skipping")
|
||||
user.log.Info("Sync already complete, only system label will be updated")
|
||||
if err := user.syncSystemLabels(ctx); err != nil {
|
||||
user.log.WithError(err).Error("Failed to update system labels")
|
||||
return
|
||||
}
|
||||
user.log.Info("System label update complete, starting API event stream")
|
||||
return
|
||||
}
|
||||
|
||||
@ -282,7 +286,6 @@ func (user *User) SetAddressMode(_ context.Context, mode vault.AddressMode) erro
|
||||
|
||||
user.syncAbort.Abort()
|
||||
user.pollAbort.Abort()
|
||||
defer user.goSync()
|
||||
|
||||
return safe.LockRet(func() error {
|
||||
if err := user.vault.SetAddressMode(mode); err != nil {
|
||||
@ -414,8 +417,6 @@ func (user *User) NewIMAPConnectors() (map[string]connector.Connector, error) {
|
||||
}
|
||||
|
||||
// SendMail sends an email from the given address to the given recipients.
|
||||
//
|
||||
// nolint:funlen
|
||||
func (user *User) SendMail(authID string, from string, to []string, r io.Reader) error {
|
||||
if user.vault.SyncStatus().IsComplete() {
|
||||
defer user.goPollAPIEvents(true)
|
||||
@ -618,8 +619,6 @@ func (user *User) startEvents(ctx context.Context) {
|
||||
}
|
||||
|
||||
// doEventPoll is called whenever API events should be polled.
|
||||
//
|
||||
//nolint:funlen
|
||||
func (user *User) doEventPoll(ctx context.Context) error {
|
||||
user.eventLock.Lock()
|
||||
defer user.eventLock.Unlock()
|
||||
@ -647,11 +646,6 @@ func (user *User) doEventPoll(ctx context.Context) error {
|
||||
return fmt.Errorf("failed to handle event due to network issue: %w", err)
|
||||
}
|
||||
|
||||
// If the error is a url.Error, return error to retry later.
|
||||
if urlErr := new(url.Error); errors.As(err, &urlErr) {
|
||||
return fmt.Errorf("failed to handle event due to URL issue: %w", err)
|
||||
}
|
||||
|
||||
// If the error is a server-side issue, return error to retry later.
|
||||
if apiErr := new(proton.APIError); errors.As(err, &apiErr) && apiErr.Status >= 500 {
|
||||
return fmt.Errorf("failed to handle event due to server error: %w", err)
|
||||
|
||||
@ -119,14 +119,14 @@ func withUser(tb testing.TB, ctx context.Context, _ *server.Server, m *proton.Ma
|
||||
saltedKeyPass, err := salts.SaltForKey([]byte(password), apiUser.Keys.Primary().ID)
|
||||
require.NoError(tb, err)
|
||||
|
||||
vault, corrupt, err := vault.New(tb.TempDir(), tb.TempDir(), []byte("my secret key"))
|
||||
v, corrupt, err := vault.New(tb.TempDir(), tb.TempDir(), []byte("my secret key"))
|
||||
require.NoError(tb, err)
|
||||
require.False(tb, corrupt)
|
||||
|
||||
vaultUser, err := vault.AddUser(apiUser.ID, username, username+"@pm.me", apiAuth.UID, apiAuth.RefreshToken, saltedKeyPass)
|
||||
vaultUser, err := v.AddUser(apiUser.ID, username, username+"@pm.me", apiAuth.UID, apiAuth.RefreshToken, saltedKeyPass)
|
||||
require.NoError(tb, err)
|
||||
|
||||
user, err := New(ctx, vaultUser, client, nil, apiUser, nil, vault.SyncWorkers(), true)
|
||||
user, err := New(ctx, vaultUser, client, nil, apiUser, nil, true, vault.DefaultMaxSyncMemory)
|
||||
require.NoError(tb, err)
|
||||
defer user.Close()
|
||||
|
||||
|
||||
@ -196,7 +196,7 @@ func (vault *Vault) SetLastVersion(version *semver.Version) error {
|
||||
})
|
||||
}
|
||||
|
||||
// GetFirstStart sets whether this is the first time the bridge has been started.
|
||||
// GetFirstStart returns whether this is the first time the bridge has been started.
|
||||
func (vault *Vault) GetFirstStart() bool {
|
||||
return vault.get().Settings.FirstStart
|
||||
}
|
||||
@ -208,26 +208,20 @@ func (vault *Vault) SetFirstStart(firstStart bool) error {
|
||||
})
|
||||
}
|
||||
|
||||
// SyncWorkers returns the number of workers to use for syncing.
|
||||
func (vault *Vault) SyncWorkers() int {
|
||||
return vault.get().Settings.SyncWorkers
|
||||
// GetMaxSyncMemory returns the maximum amount of memory the sync process should use.
|
||||
func (vault *Vault) GetMaxSyncMemory() uint64 {
|
||||
v := vault.get().Settings.MaxSyncMemory
|
||||
// can be zero if never written to vault before.
|
||||
if v == 0 {
|
||||
return DefaultMaxSyncMemory
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// SetSyncWorkers sets the number of workers to use for syncing.
|
||||
func (vault *Vault) SetSyncWorkers(workers int) error {
|
||||
// SetMaxSyncMemory sets the maximum amount of memory the sync process should use.
|
||||
func (vault *Vault) SetMaxSyncMemory(maxMemory uint64) error {
|
||||
return vault.mod(func(data *Data) {
|
||||
data.Settings.SyncWorkers = workers
|
||||
})
|
||||
}
|
||||
|
||||
// SyncAttPool returns the size of the attachment pool.
|
||||
func (vault *Vault) SyncAttPool() int {
|
||||
return vault.get().Settings.SyncAttPool
|
||||
}
|
||||
|
||||
// SetSyncAttPool sets the size of the attachment pool.
|
||||
func (vault *Vault) SetSyncAttPool(pool int) error {
|
||||
return vault.mod(func(data *Data) {
|
||||
data.Settings.SyncAttPool = pool
|
||||
data.Settings.MaxSyncMemory = maxMemory
|
||||
})
|
||||
}
|
||||
|
||||
@ -208,11 +208,10 @@ func TestVault_Settings_FirstStart(t *testing.T) {
|
||||
require.Equal(t, false, s.GetFirstStart())
|
||||
}
|
||||
|
||||
func TestVault_Settings_SyncWorkers(t *testing.T) {
|
||||
func TestVault_Settings_MaxSyncMemory(t *testing.T) {
|
||||
// create a new test vault.
|
||||
s := newVault(t)
|
||||
|
||||
syncWorkers := vault.GetDefaultSyncWorkerCount()
|
||||
require.Equal(t, syncWorkers, s.SyncWorkers())
|
||||
require.Equal(t, syncWorkers, s.SyncAttPool())
|
||||
// Check the default first start value.
|
||||
require.Equal(t, vault.DefaultMaxSyncMemory, s.GetMaxSyncMemory())
|
||||
}
|
||||
|
||||
@ -19,7 +19,6 @@ package vault
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"runtime"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/updater"
|
||||
)
|
||||
@ -44,25 +43,12 @@ type Settings struct {
|
||||
LastVersion string
|
||||
FirstStart bool
|
||||
|
||||
SyncWorkers int
|
||||
SyncAttPool int
|
||||
MaxSyncMemory uint64
|
||||
}
|
||||
|
||||
func GetDefaultSyncWorkerCount() int {
|
||||
const minSyncWorkers = 16
|
||||
|
||||
syncWorkers := runtime.NumCPU() * 4
|
||||
|
||||
if syncWorkers < minSyncWorkers {
|
||||
syncWorkers = minSyncWorkers
|
||||
}
|
||||
|
||||
return syncWorkers
|
||||
}
|
||||
const DefaultMaxSyncMemory = 2 * 1024 * uint64(1024*1024)
|
||||
|
||||
func newDefaultSettings(gluonDir string) Settings {
|
||||
syncWorkers := GetDefaultSyncWorkerCount()
|
||||
|
||||
return Settings{
|
||||
GluonDir: gluonDir,
|
||||
|
||||
@ -83,7 +69,6 @@ func newDefaultSettings(gluonDir string) Settings {
|
||||
LastVersion: "0.0.0",
|
||||
FirstStart: true,
|
||||
|
||||
SyncWorkers: syncWorkers,
|
||||
SyncAttPool: syncWorkers,
|
||||
MaxSyncMemory: DefaultMaxSyncMemory,
|
||||
}
|
||||
}
|
||||
|
||||
@ -17,8 +17,6 @@
|
||||
|
||||
package vault
|
||||
|
||||
import "github.com/ProtonMail/gluon/imap"
|
||||
|
||||
// UserData holds information about a single bridge user.
|
||||
// The user may or may not be logged in.
|
||||
type UserData struct {
|
||||
@ -28,7 +26,6 @@ type UserData struct {
|
||||
|
||||
GluonKey []byte
|
||||
GluonIDs map[string]string
|
||||
UIDValidity map[string]imap.UID
|
||||
BridgePass []byte // raw token represented as byte slice (needs to be encoded)
|
||||
AddressMode AddressMode
|
||||
|
||||
@ -79,7 +76,6 @@ func newDefaultUser(userID, username, primaryEmail, authUID, authRef string, key
|
||||
|
||||
GluonKey: newRandomToken(32),
|
||||
GluonIDs: make(map[string]string),
|
||||
UIDValidity: make(map[string]imap.UID),
|
||||
BridgePass: newRandomToken(16),
|
||||
AddressMode: CombinedMode,
|
||||
|
||||
|
||||
@ -20,7 +20,6 @@ package vault
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ProtonMail/gluon/imap"
|
||||
"github.com/bradenaw/juniper/xslices"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
@ -81,24 +80,6 @@ func (user *User) RemoveGluonID(addrID, gluonID string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (user *User) GetUIDValidity(addrID string) imap.UID {
|
||||
if validity, ok := user.vault.getUser(user.userID).UIDValidity[addrID]; ok {
|
||||
return validity
|
||||
}
|
||||
|
||||
if err := user.SetUIDValidity(addrID, 1000); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return user.GetUIDValidity(addrID)
|
||||
}
|
||||
|
||||
func (user *User) SetUIDValidity(addrID string, validity imap.UID) error {
|
||||
return user.vault.modUser(user.userID, func(data *UserData) {
|
||||
data.UIDValidity[addrID] = validity
|
||||
})
|
||||
}
|
||||
|
||||
// AddressMode returns the user's address mode.
|
||||
func (user *User) AddressMode() AddressMode {
|
||||
return user.vault.getUser(user.userID).AddressMode
|
||||
@ -208,10 +189,6 @@ func (user *User) ClearSyncStatus() error {
|
||||
data.SyncStatus = SyncStatus{}
|
||||
|
||||
data.EventID = ""
|
||||
|
||||
for addrID := range data.UIDValidity {
|
||||
data.UIDValidity[addrID]++
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user