mirror of
https://github.com/ProtonMail/proton-bridge.git
synced 2025-12-10 04:36:43 +00:00
GODT-2224: Refactor bridge sync to use less memory
Updates go-proton-api and Gluon to includes memory reduction changes and modify the sync process to take into account how much memory is used during the sync stage. The sync process now has an extra stage which first download the message metada to ensure that we only download up to `syncMaxDownloadRequesMem` messages or 250 messages total. This allows for scaling the download request automatically to accommodate many small or few very large messages. The IDs are then sent to a download go-routine which downloads the message and its attachments. The result is then forwarded to another go-routine which builds the actual message. This stage tries to ensure that we don't use more than `syncMaxMessageBuildingMem` to build these messages. Finally the result is sent to a last go-routine which applies the changes to Gluon and waits for them to be completed. The new process is currently limited to 2GB. Dynamic scaling will be implemented in a follow up. For systems with less than 2GB of memory we limit the values to a set of values that is known to work.
This commit is contained in:
@ -53,6 +53,7 @@ Proton Mail Bridge includes the following 3rd party software:
|
|||||||
* [html2text](https://github.com/jaytaylor/html2text) available under [license](https://github.com/jaytaylor/html2text/blob/master/LICENSE)
|
* [html2text](https://github.com/jaytaylor/html2text) available under [license](https://github.com/jaytaylor/html2text/blob/master/LICENSE)
|
||||||
* [go-keychain](https://github.com/keybase/go-keychain) available under [license](https://github.com/keybase/go-keychain/blob/master/LICENSE)
|
* [go-keychain](https://github.com/keybase/go-keychain) available under [license](https://github.com/keybase/go-keychain/blob/master/LICENSE)
|
||||||
* [dns](https://github.com/miekg/dns) available under [license](https://github.com/miekg/dns/blob/master/LICENSE)
|
* [dns](https://github.com/miekg/dns) available under [license](https://github.com/miekg/dns/blob/master/LICENSE)
|
||||||
|
* [memory](https://github.com/pbnjay/memory) available under [license](https://github.com/pbnjay/memory/blob/master/LICENSE)
|
||||||
* [errors](https://github.com/pkg/errors) available under [license](https://github.com/pkg/errors/blob/master/LICENSE)
|
* [errors](https://github.com/pkg/errors) available under [license](https://github.com/pkg/errors/blob/master/LICENSE)
|
||||||
* [profile](https://github.com/pkg/profile) available under [license](https://github.com/pkg/profile/blob/master/LICENSE)
|
* [profile](https://github.com/pkg/profile) available under [license](https://github.com/pkg/profile/blob/master/LICENSE)
|
||||||
* [logrus](https://github.com/sirupsen/logrus) available under [license](https://github.com/sirupsen/logrus/blob/master/LICENSE)
|
* [logrus](https://github.com/sirupsen/logrus) available under [license](https://github.com/sirupsen/logrus/blob/master/LICENSE)
|
||||||
@ -114,6 +115,7 @@ Proton Mail Bridge includes the following 3rd party software:
|
|||||||
* [reflect2](https://github.com/modern-go/reflect2) available under [license](https://github.com/modern-go/reflect2/blob/master/LICENSE)
|
* [reflect2](https://github.com/modern-go/reflect2) available under [license](https://github.com/modern-go/reflect2/blob/master/LICENSE)
|
||||||
* [tablewriter](https://github.com/olekukonko/tablewriter) available under [license](https://github.com/olekukonko/tablewriter/blob/master/LICENSE)
|
* [tablewriter](https://github.com/olekukonko/tablewriter) available under [license](https://github.com/olekukonko/tablewriter/blob/master/LICENSE)
|
||||||
* [go-toml](https://github.com/pelletier/go-toml/v2) available under [license](https://github.com/pelletier/go-toml/v2/blob/master/LICENSE)
|
* [go-toml](https://github.com/pelletier/go-toml/v2) available under [license](https://github.com/pelletier/go-toml/v2/blob/master/LICENSE)
|
||||||
|
* [lz4](https://github.com/pierrec/lz4/v4) available under [license](https://github.com/pierrec/lz4/v4/blob/master/LICENSE)
|
||||||
* [go-difflib](https://github.com/pmezard/go-difflib) available under [license](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
|
* [go-difflib](https://github.com/pmezard/go-difflib) available under [license](https://github.com/pmezard/go-difflib/blob/master/LICENSE)
|
||||||
* [procfs](https://github.com/prometheus/procfs) available under [license](https://github.com/prometheus/procfs/blob/master/LICENSE)
|
* [procfs](https://github.com/prometheus/procfs) available under [license](https://github.com/prometheus/procfs/blob/master/LICENSE)
|
||||||
* [uniseg](https://github.com/rivo/uniseg) available under [license](https://github.com/rivo/uniseg/blob/master/LICENSE)
|
* [uniseg](https://github.com/rivo/uniseg) available under [license](https://github.com/rivo/uniseg/blob/master/LICENSE)
|
||||||
|
|||||||
6
go.mod
6
go.mod
@ -5,9 +5,9 @@ go 1.18
|
|||||||
require (
|
require (
|
||||||
github.com/0xAX/notificator v0.0.0-20220220101646-ee9b8921e557
|
github.com/0xAX/notificator v0.0.0-20220220101646-ee9b8921e557
|
||||||
github.com/Masterminds/semver/v3 v3.1.1
|
github.com/Masterminds/semver/v3 v3.1.1
|
||||||
github.com/ProtonMail/gluon v0.14.2-0.20230127085305-bc2d818d9d13
|
github.com/ProtonMail/gluon v0.14.2-0.20230130104154-2c64e59b8f54
|
||||||
github.com/ProtonMail/go-autostart v0.0.0-20210130080809-00ed301c8e9a
|
github.com/ProtonMail/go-autostart v0.0.0-20210130080809-00ed301c8e9a
|
||||||
github.com/ProtonMail/go-proton-api v0.3.1-0.20230126112849-3c1ac277855e
|
github.com/ProtonMail/go-proton-api v0.3.1-0.20230130093944-dd1190680368
|
||||||
github.com/ProtonMail/go-rfc5322 v0.11.0
|
github.com/ProtonMail/go-rfc5322 v0.11.0
|
||||||
github.com/ProtonMail/gopenpgp/v2 v2.4.10
|
github.com/ProtonMail/gopenpgp/v2 v2.4.10
|
||||||
github.com/PuerkitoBio/goquery v1.8.0
|
github.com/PuerkitoBio/goquery v1.8.0
|
||||||
@ -35,6 +35,7 @@ require (
|
|||||||
github.com/jaytaylor/html2text v0.0.0-20211105163654-bc68cce691ba
|
github.com/jaytaylor/html2text v0.0.0-20211105163654-bc68cce691ba
|
||||||
github.com/keybase/go-keychain v0.0.0
|
github.com/keybase/go-keychain v0.0.0
|
||||||
github.com/miekg/dns v1.1.50
|
github.com/miekg/dns v1.1.50
|
||||||
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/pkg/profile v1.6.0
|
github.com/pkg/profile v1.6.0
|
||||||
github.com/sirupsen/logrus v1.9.0
|
github.com/sirupsen/logrus v1.9.0
|
||||||
@ -99,6 +100,7 @@ require (
|
|||||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||||
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/procfs v0.8.0 // indirect
|
github.com/prometheus/procfs v0.8.0 // indirect
|
||||||
github.com/rivo/uniseg v0.4.2 // indirect
|
github.com/rivo/uniseg v0.4.2 // indirect
|
||||||
|
|||||||
12
go.sum
12
go.sum
@ -28,8 +28,8 @@ github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs
|
|||||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
|
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
|
||||||
github.com/ProtonMail/docker-credential-helpers v1.1.0 h1:+kvUIpwWcbtP3WFv5sSvkFn/XLzSqPOB5AAthuk9xPk=
|
github.com/ProtonMail/docker-credential-helpers v1.1.0 h1:+kvUIpwWcbtP3WFv5sSvkFn/XLzSqPOB5AAthuk9xPk=
|
||||||
github.com/ProtonMail/docker-credential-helpers v1.1.0/go.mod h1:mK0aBveCxhnQ756AmaTfXMZDeULvheYVhF/MWMErN5g=
|
github.com/ProtonMail/docker-credential-helpers v1.1.0/go.mod h1:mK0aBveCxhnQ756AmaTfXMZDeULvheYVhF/MWMErN5g=
|
||||||
github.com/ProtonMail/gluon v0.14.2-0.20230127085305-bc2d818d9d13 h1:rljNZVgfq/F1LLyJ4NmCfEzWayC/rk+l9QgJjtQTLKI=
|
github.com/ProtonMail/gluon v0.14.2-0.20230130104154-2c64e59b8f54 h1:uUg8CDiYTMlbvGijzoN0fb72vwDJD7hMjgNTbmAHxRc=
|
||||||
github.com/ProtonMail/gluon v0.14.2-0.20230127085305-bc2d818d9d13/go.mod h1:z2AxLIiBCT1K+0OBHyaDI7AEaO5qI6/BEC2TE42vs4Q=
|
github.com/ProtonMail/gluon v0.14.2-0.20230130104154-2c64e59b8f54/go.mod h1:HYHr7hG7LPWI1S50M8NfHRb1kYi5B+Yu4/N/H+y+JUY=
|
||||||
github.com/ProtonMail/go-autostart v0.0.0-20210130080809-00ed301c8e9a h1:D+aZah+k14Gn6kmL7eKxoo/4Dr/lK3ChBcwce2+SQP4=
|
github.com/ProtonMail/go-autostart v0.0.0-20210130080809-00ed301c8e9a h1:D+aZah+k14Gn6kmL7eKxoo/4Dr/lK3ChBcwce2+SQP4=
|
||||||
github.com/ProtonMail/go-autostart v0.0.0-20210130080809-00ed301c8e9a/go.mod h1:oTGdE7/DlWIr23G0IKW3OXK9wZ5Hw1GGiaJFccTvZi4=
|
github.com/ProtonMail/go-autostart v0.0.0-20210130080809-00ed301c8e9a/go.mod h1:oTGdE7/DlWIr23G0IKW3OXK9wZ5Hw1GGiaJFccTvZi4=
|
||||||
github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
|
github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
|
||||||
@ -41,8 +41,8 @@ github.com/ProtonMail/go-message v0.0.0-20210611055058-fabeff2ec753/go.mod h1:NB
|
|||||||
github.com/ProtonMail/go-mime v0.0.0-20220302105931-303f85f7fe0f/go.mod h1:NYt+V3/4rEeDuaev/zw1zCq8uqVEuPHzDPo3OZrlGJ4=
|
github.com/ProtonMail/go-mime v0.0.0-20220302105931-303f85f7fe0f/go.mod h1:NYt+V3/4rEeDuaev/zw1zCq8uqVEuPHzDPo3OZrlGJ4=
|
||||||
github.com/ProtonMail/go-mime v0.0.0-20220429130430-2192574d760f h1:4IWzKjHzZxdrW9k4zl/qCwenOVHDbVDADPPHFLjs0Oc=
|
github.com/ProtonMail/go-mime v0.0.0-20220429130430-2192574d760f h1:4IWzKjHzZxdrW9k4zl/qCwenOVHDbVDADPPHFLjs0Oc=
|
||||||
github.com/ProtonMail/go-mime v0.0.0-20220429130430-2192574d760f/go.mod h1:qRZgbeASl2a9OwmsV85aWwRqic0NHPh+9ewGAzb4cgM=
|
github.com/ProtonMail/go-mime v0.0.0-20220429130430-2192574d760f/go.mod h1:qRZgbeASl2a9OwmsV85aWwRqic0NHPh+9ewGAzb4cgM=
|
||||||
github.com/ProtonMail/go-proton-api v0.3.1-0.20230126112849-3c1ac277855e h1:UkfLQc44UvknNCLoBEZb1qg7zfVWVLMvCE/LtdVEcAw=
|
github.com/ProtonMail/go-proton-api v0.3.1-0.20230130093944-dd1190680368 h1:XWPaCK8ctgpvI+ZAYcsFiYWbuvkv3T4+WZq0o6Gnt2s=
|
||||||
github.com/ProtonMail/go-proton-api v0.3.1-0.20230126112849-3c1ac277855e/go.mod h1:JUo5IQG0hNuPRuDpOUsCOvtee6UjTEHHF1QN2i8RSos=
|
github.com/ProtonMail/go-proton-api v0.3.1-0.20230130093944-dd1190680368/go.mod h1:JUo5IQG0hNuPRuDpOUsCOvtee6UjTEHHF1QN2i8RSos=
|
||||||
github.com/ProtonMail/go-rfc5322 v0.11.0 h1:o5Obrm4DpmQEffvgsVqG6S4BKwC1Wat+hYwjIp2YcCY=
|
github.com/ProtonMail/go-rfc5322 v0.11.0 h1:o5Obrm4DpmQEffvgsVqG6S4BKwC1Wat+hYwjIp2YcCY=
|
||||||
github.com/ProtonMail/go-rfc5322 v0.11.0/go.mod h1:6oOKr0jXvpoE6pwTx/HukigQpX2J9WUf6h0auplrFTw=
|
github.com/ProtonMail/go-rfc5322 v0.11.0/go.mod h1:6oOKr0jXvpoE6pwTx/HukigQpX2J9WUf6h0auplrFTw=
|
||||||
github.com/ProtonMail/go-srp v0.0.5 h1:xhUioxZgDbCnpo9JehyFhwwsn9JLWkUGfB0oiKXgiGg=
|
github.com/ProtonMail/go-srp v0.0.5 h1:xhUioxZgDbCnpo9JehyFhwwsn9JLWkUGfB0oiKXgiGg=
|
||||||
@ -314,9 +314,13 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn
|
|||||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=
|
||||||
|
github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=
|
||||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
|
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
|
||||||
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
|
github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas=
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.17 h1:kV4Ip+/hUBC+8T6+2EgburRtkE9ef4nbY3f4dFhGjMc=
|
||||||
|
github.com/pierrec/lz4/v4 v4.1.17/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||||
|
|||||||
@ -205,7 +205,7 @@ func run(c *cli.Context) error { //nolint:funlen
|
|||||||
// Ensure we are the only instance running.
|
// Ensure we are the only instance running.
|
||||||
return withSingleInstance(locations, version, func() error {
|
return withSingleInstance(locations, version, func() error {
|
||||||
// Unlock the encrypted vault.
|
// Unlock the encrypted vault.
|
||||||
return WithVault(locations, func(vault *vault.Vault, insecure, corrupt bool) error {
|
return WithVault(locations, func(v *vault.Vault, insecure, corrupt bool) error {
|
||||||
// Report insecure vault.
|
// Report insecure vault.
|
||||||
if insecure {
|
if insecure {
|
||||||
_ = reporter.ReportMessageWithContext("Vault is insecure", map[string]interface{}{})
|
_ = reporter.ReportMessageWithContext("Vault is insecure", map[string]interface{}{})
|
||||||
@ -216,27 +216,39 @@ func run(c *cli.Context) error { //nolint:funlen
|
|||||||
_ = reporter.ReportMessageWithContext("Vault is corrupt", map[string]interface{}{})
|
_ = reporter.ReportMessageWithContext("Vault is corrupt", map[string]interface{}{})
|
||||||
}
|
}
|
||||||
|
|
||||||
if !vault.Migrated() {
|
// Force re-sync if last version <= 3.0.12 due to chances in the gluon cache format.
|
||||||
|
if lastVersion := v.GetLastVersion(); lastVersion != nil {
|
||||||
|
versionWithLZ4Cache := semver.MustParse("3.0.13")
|
||||||
|
if lastVersion.LessThan(versionWithLZ4Cache) {
|
||||||
|
if err := v.ForUser(1, func(user *vault.User) error {
|
||||||
|
return user.ClearSyncStatus()
|
||||||
|
}); err != nil {
|
||||||
|
logrus.WithError(err).Error("Failed to force resync on user")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !v.Migrated() {
|
||||||
// Migrate old settings into the vault.
|
// Migrate old settings into the vault.
|
||||||
if err := migrateOldSettings(vault); err != nil {
|
if err := migrateOldSettings(v); err != nil {
|
||||||
logrus.WithError(err).Error("Failed to migrate old settings")
|
logrus.WithError(err).Error("Failed to migrate old settings")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Migrate old accounts into the vault.
|
// Migrate old accounts into the vault.
|
||||||
if err := migrateOldAccounts(locations, vault); err != nil {
|
if err := migrateOldAccounts(locations, v); err != nil {
|
||||||
logrus.WithError(err).Error("Failed to migrate old accounts")
|
logrus.WithError(err).Error("Failed to migrate old accounts")
|
||||||
}
|
}
|
||||||
|
|
||||||
// The vault has been migrated.
|
// The vault has been migrated.
|
||||||
if err := vault.SetMigrated(); err != nil {
|
if err := v.SetMigrated(); err != nil {
|
||||||
logrus.WithError(err).Error("Failed to mark vault as migrated")
|
logrus.WithError(err).Error("Failed to mark vault as migrated")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load the cookies from the vault.
|
// Load the cookies from the vault.
|
||||||
return withCookieJar(vault, func(cookieJar http.CookieJar) error {
|
return withCookieJar(v, func(cookieJar http.CookieJar) error {
|
||||||
// Create a new bridge instance.
|
// Create a new bridge instance.
|
||||||
return withBridge(c, exe, locations, version, identifier, crashHandler, reporter, vault, cookieJar, func(b *bridge.Bridge, eventCh <-chan events.Event) error {
|
return withBridge(c, exe, locations, version, identifier, crashHandler, reporter, v, cookieJar, func(b *bridge.Bridge, eventCh <-chan events.Event) error {
|
||||||
if insecure {
|
if insecure {
|
||||||
logrus.Warn("The vault key could not be retrieved; the vault will not be encrypted")
|
logrus.Warn("The vault key could not be retrieved; the vault will not be encrypted")
|
||||||
b.PushError(bridge.ErrVaultInsecure)
|
b.PushError(bridge.ErrVaultInsecure)
|
||||||
|
|||||||
@ -265,14 +265,6 @@ func migratePrefsToVault(vault *vault.Vault, b []byte) error {
|
|||||||
errs = multierror.Append(errs, fmt.Errorf("failed to migrate show all mail: %w", err))
|
errs = multierror.Append(errs, fmt.Errorf("failed to migrate show all mail: %w", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := vault.SetSyncWorkers(prefs.FetchWorkers); err != nil {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("failed to migrate sync workers: %w", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := vault.SetSyncAttPool(prefs.AttachmentWorkers); err != nil {
|
|
||||||
errs = multierror.Append(errs, fmt.Errorf("failed to migrate sync attachment pool: %w", err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := vault.SetCookies([]byte(prefs.Cookies)); err != nil {
|
if err := vault.SetCookies([]byte(prefs.Cookies)); err != nil {
|
||||||
errs = multierror.Append(errs, fmt.Errorf("failed to migrate cookies: %w", err))
|
errs = multierror.Append(errs, fmt.Errorf("failed to migrate cookies: %w", err))
|
||||||
}
|
}
|
||||||
|
|||||||
@ -68,8 +68,6 @@ func TestMigratePrefsToVault(t *testing.T) {
|
|||||||
require.True(t, vault.GetAutostart())
|
require.True(t, vault.GetAutostart())
|
||||||
|
|
||||||
// Check that the other app settings have been migrated.
|
// Check that the other app settings have been migrated.
|
||||||
require.Equal(t, 16, vault.SyncWorkers())
|
|
||||||
require.Equal(t, 16, vault.SyncAttPool())
|
|
||||||
require.False(t, vault.GetProxyAllowed())
|
require.False(t, vault.GetProxyAllowed())
|
||||||
require.False(t, vault.GetShowAllMail())
|
require.False(t, vault.GetShowAllMail())
|
||||||
|
|
||||||
|
|||||||
@ -32,14 +32,12 @@ func defaultAPIOptions(
|
|||||||
version *semver.Version,
|
version *semver.Version,
|
||||||
cookieJar http.CookieJar,
|
cookieJar http.CookieJar,
|
||||||
transport http.RoundTripper,
|
transport http.RoundTripper,
|
||||||
poolSize int,
|
|
||||||
) []proton.Option {
|
) []proton.Option {
|
||||||
return []proton.Option{
|
return []proton.Option{
|
||||||
proton.WithHostURL(apiURL),
|
proton.WithHostURL(apiURL),
|
||||||
proton.WithAppVersion(constants.AppVersion(version.Original())),
|
proton.WithAppVersion(constants.AppVersion(version.Original())),
|
||||||
proton.WithCookieJar(cookieJar),
|
proton.WithCookieJar(cookieJar),
|
||||||
proton.WithTransport(transport),
|
proton.WithTransport(transport),
|
||||||
proton.WithAttPoolSize(poolSize),
|
|
||||||
proton.WithLogger(logrus.StandardLogger()),
|
proton.WithLogger(logrus.StandardLogger()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -32,7 +32,6 @@ func newAPIOptions(
|
|||||||
version *semver.Version,
|
version *semver.Version,
|
||||||
cookieJar http.CookieJar,
|
cookieJar http.CookieJar,
|
||||||
transport http.RoundTripper,
|
transport http.RoundTripper,
|
||||||
poolSize int,
|
|
||||||
) []proton.Option {
|
) []proton.Option {
|
||||||
return defaultAPIOptions(apiURL, version, cookieJar, transport, poolSize)
|
return defaultAPIOptions(apiURL, version, cookieJar, transport)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -33,9 +33,8 @@ func newAPIOptions(
|
|||||||
version *semver.Version,
|
version *semver.Version,
|
||||||
cookieJar http.CookieJar,
|
cookieJar http.CookieJar,
|
||||||
transport http.RoundTripper,
|
transport http.RoundTripper,
|
||||||
poolSize int,
|
|
||||||
) []proton.Option {
|
) []proton.Option {
|
||||||
opt := defaultAPIOptions(apiURL, version, cookieJar, transport, poolSize)
|
opt := defaultAPIOptions(apiURL, version, cookieJar, transport)
|
||||||
|
|
||||||
if host := os.Getenv("BRIDGE_API_HOST"); host != "" {
|
if host := os.Getenv("BRIDGE_API_HOST"); host != "" {
|
||||||
opt = append(opt, proton.WithHostURL(host))
|
opt = append(opt, proton.WithHostURL(host))
|
||||||
|
|||||||
@ -145,7 +145,7 @@ func New( //nolint:funlen
|
|||||||
logSMTP bool, // whether to log SMTP activity
|
logSMTP bool, // whether to log SMTP activity
|
||||||
) (*Bridge, <-chan events.Event, error) {
|
) (*Bridge, <-chan events.Event, error) {
|
||||||
// api is the user's API manager.
|
// api is the user's API manager.
|
||||||
api := proton.New(newAPIOptions(apiURL, curVersion, cookieJar, roundTripper, vault.SyncAttPool())...)
|
api := proton.New(newAPIOptions(apiURL, curVersion, cookieJar, roundTripper)...)
|
||||||
|
|
||||||
// tasks holds all the bridge's background tasks.
|
// tasks holds all the bridge's background tasks.
|
||||||
tasks := async.NewGroup(context.Background(), crashHandler)
|
tasks := async.NewGroup(context.Background(), crashHandler)
|
||||||
|
|||||||
@ -310,7 +310,6 @@ func (*storeBuilder) New(path, userID string, passphrase []byte) (store.Store, e
|
|||||||
return store.NewOnDiskStore(
|
return store.NewOnDiskStore(
|
||||||
filepath.Join(path, userID),
|
filepath.Join(path, userID),
|
||||||
passphrase,
|
passphrase,
|
||||||
store.WithCompressor(new(store.GZipCompressor)),
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -461,7 +461,6 @@ func (bridge *Bridge) addUserWithVault(
|
|||||||
bridge.reporter,
|
bridge.reporter,
|
||||||
apiUser,
|
apiUser,
|
||||||
bridge.crashHandler,
|
bridge.crashHandler,
|
||||||
bridge.vault.SyncWorkers(),
|
|
||||||
bridge.vault.GetShowAllMail(),
|
bridge.vault.GetShowAllMail(),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@ -18,6 +18,7 @@
|
|||||||
package user
|
package user
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -502,7 +503,7 @@ func (user *User) handleMessageEvents(ctx context.Context, messageEvents []proto
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (user *User) handleCreateMessageEvent(ctx context.Context, event proton.MessageEvent) ([]imap.Update, error) {
|
func (user *User) handleCreateMessageEvent(ctx context.Context, event proton.MessageEvent) ([]imap.Update, error) {
|
||||||
full, err := user.client.GetFullMessage(ctx, event.Message.ID)
|
full, err := user.client.GetFullMessage(ctx, event.Message.ID, newProtonAPIScheduler(), proton.NewDefaultAttachmentAllocator())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the message is not found, it means that it has been deleted before we could fetch it.
|
// If the message is not found, it means that it has been deleted before we could fetch it.
|
||||||
if apiErr := new(proton.APIError); errors.As(err, &apiErr) && apiErr.Status == http.StatusUnprocessableEntity {
|
if apiErr := new(proton.APIError); errors.As(err, &apiErr) && apiErr.Status == http.StatusUnprocessableEntity {
|
||||||
@ -521,7 +522,7 @@ func (user *User) handleCreateMessageEvent(ctx context.Context, event proton.Mes
|
|||||||
|
|
||||||
var update imap.Update
|
var update imap.Update
|
||||||
if err := withAddrKR(user.apiUser, user.apiAddrs[event.Message.AddressID], user.vault.KeyPass(), func(_, addrKR *crypto.KeyRing) error {
|
if err := withAddrKR(user.apiUser, user.apiAddrs[event.Message.AddressID], user.vault.KeyPass(), func(_, addrKR *crypto.KeyRing) error {
|
||||||
res := buildRFC822(user.apiLabels, full, addrKR)
|
res := buildRFC822(user.apiLabels, full, addrKR, new(bytes.Buffer))
|
||||||
|
|
||||||
if res.err != nil {
|
if res.err != nil {
|
||||||
user.log.WithError(err).Error("Failed to build RFC822 message")
|
user.log.WithError(err).Error("Failed to build RFC822 message")
|
||||||
@ -599,7 +600,7 @@ func (user *User) handleUpdateDraftEvent(ctx context.Context, event proton.Messa
|
|||||||
"subject": logging.Sensitive(event.Message.Subject),
|
"subject": logging.Sensitive(event.Message.Subject),
|
||||||
}).Info("Handling draft updated event")
|
}).Info("Handling draft updated event")
|
||||||
|
|
||||||
full, err := user.client.GetFullMessage(ctx, event.Message.ID)
|
full, err := user.client.GetFullMessage(ctx, event.Message.ID, newProtonAPIScheduler(), proton.NewDefaultAttachmentAllocator())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the message is not found, it means that it has been deleted before we could fetch it.
|
// If the message is not found, it means that it has been deleted before we could fetch it.
|
||||||
if apiErr := new(proton.APIError); errors.As(err, &apiErr) && apiErr.Status == http.StatusUnprocessableEntity {
|
if apiErr := new(proton.APIError); errors.As(err, &apiErr) && apiErr.Status == http.StatusUnprocessableEntity {
|
||||||
@ -613,7 +614,7 @@ func (user *User) handleUpdateDraftEvent(ctx context.Context, event proton.Messa
|
|||||||
var update imap.Update
|
var update imap.Update
|
||||||
|
|
||||||
if err := withAddrKR(user.apiUser, user.apiAddrs[event.Message.AddressID], user.vault.KeyPass(), func(_, addrKR *crypto.KeyRing) error {
|
if err := withAddrKR(user.apiUser, user.apiAddrs[event.Message.AddressID], user.vault.KeyPass(), func(_, addrKR *crypto.KeyRing) error {
|
||||||
res := buildRFC822(user.apiLabels, full, addrKR)
|
res := buildRFC822(user.apiLabels, full, addrKR, new(bytes.Buffer))
|
||||||
|
|
||||||
if res.err != nil {
|
if res.err != nil {
|
||||||
logrus.WithError(err).Error("Failed to build RFC822 message")
|
logrus.WithError(err).Error("Failed to build RFC822 message")
|
||||||
|
|||||||
@ -336,7 +336,7 @@ func (conn *imapConnector) CreateMessage(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (conn *imapConnector) GetMessageLiteral(ctx context.Context, id imap.MessageID) ([]byte, error) {
|
func (conn *imapConnector) GetMessageLiteral(ctx context.Context, id imap.MessageID) ([]byte, error) {
|
||||||
msg, err := conn.client.GetFullMessage(ctx, string(id))
|
msg, err := conn.client.GetFullMessage(ctx, string(id), newProtonAPIScheduler(), proton.NewDefaultAttachmentAllocator())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -538,7 +538,7 @@ func (conn *imapConnector) importMessage(
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if full, err = conn.client.GetFullMessage(ctx, messageID); err != nil {
|
if full, err = conn.client.GetFullMessage(ctx, messageID, newProtonAPIScheduler(), proton.NewDefaultAttachmentAllocator()); err != nil {
|
||||||
return fmt.Errorf("failed to fetch message: %w", err)
|
return fmt.Errorf("failed to fetch message: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -18,6 +18,7 @@
|
|||||||
package user
|
package user
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
"runtime"
|
||||||
@ -25,6 +26,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/ProtonMail/gluon/imap"
|
"github.com/ProtonMail/gluon/imap"
|
||||||
|
"github.com/ProtonMail/gluon/logging"
|
||||||
"github.com/ProtonMail/gluon/queue"
|
"github.com/ProtonMail/gluon/queue"
|
||||||
"github.com/ProtonMail/gluon/reporter"
|
"github.com/ProtonMail/gluon/reporter"
|
||||||
"github.com/ProtonMail/go-proton-api"
|
"github.com/ProtonMail/go-proton-api"
|
||||||
@ -35,16 +37,12 @@ import (
|
|||||||
"github.com/bradenaw/juniper/parallel"
|
"github.com/bradenaw/juniper/parallel"
|
||||||
"github.com/bradenaw/juniper/xslices"
|
"github.com/bradenaw/juniper/xslices"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"github.com/pbnjay/memory"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/exp/maps"
|
"golang.org/x/exp/maps"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
maxUpdateSize = 1 << 27 // 128 MiB
|
|
||||||
maxBatchSize = 1 << 8 // 256
|
|
||||||
)
|
|
||||||
|
|
||||||
// doSync begins syncing the users data.
|
// doSync begins syncing the users data.
|
||||||
// It first ensures the latest event ID is known; if not, it fetches it.
|
// It first ensures the latest event ID is known; if not, it fetches it.
|
||||||
// It sends a SyncStarted event and then either SyncFinished or SyncFailed
|
// It sends a SyncStarted event and then either SyncFinished or SyncFailed
|
||||||
@ -143,7 +141,6 @@ func (user *User) sync(ctx context.Context) error {
|
|||||||
addrKRs,
|
addrKRs,
|
||||||
user.updateCh,
|
user.updateCh,
|
||||||
user.eventCh,
|
user.eventCh,
|
||||||
user.syncWorkers,
|
|
||||||
); err != nil {
|
); err != nil {
|
||||||
return fmt.Errorf("failed to sync messages: %w", err)
|
return fmt.Errorf("failed to sync messages: %w", err)
|
||||||
}
|
}
|
||||||
@ -212,7 +209,15 @@ func syncLabels(ctx context.Context, apiLabels map[string]proton.Label, updateCh
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint:funlen
|
const Kilobyte = uint64(1024)
|
||||||
|
const Megabyte = 1024 * Kilobyte
|
||||||
|
const Gigabyte = 1024 * Megabyte
|
||||||
|
|
||||||
|
func toMB(v uint64) float64 {
|
||||||
|
return float64(v) / float64(Megabyte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// nolint:funlen,gocyclo
|
||||||
func syncMessages(
|
func syncMessages(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
userID string,
|
userID string,
|
||||||
@ -224,7 +229,6 @@ func syncMessages(
|
|||||||
addrKRs map[string]*crypto.KeyRing,
|
addrKRs map[string]*crypto.KeyRing,
|
||||||
updateCh map[string]*queue.QueuedChannel[imap.Update],
|
updateCh map[string]*queue.QueuedChannel[imap.Update],
|
||||||
eventCh *queue.QueuedChannel[events.Event],
|
eventCh *queue.QueuedChannel[events.Event],
|
||||||
syncWorkers int,
|
|
||||||
) error {
|
) error {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -235,78 +239,319 @@ func syncMessages(
|
|||||||
|
|
||||||
logrus.WithFields(logrus.Fields{
|
logrus.WithFields(logrus.Fields{
|
||||||
"messages": len(messageIDs),
|
"messages": len(messageIDs),
|
||||||
"workers": syncWorkers,
|
|
||||||
"numCPU": runtime.NumCPU(),
|
"numCPU": runtime.NumCPU(),
|
||||||
}).Info("Starting message sync")
|
}).Info("Starting message sync")
|
||||||
|
|
||||||
// Create the flushers, one per update channel.
|
// Create the flushers, one per update channel.
|
||||||
flushers := make(map[string]*flusher, len(updateCh))
|
|
||||||
|
|
||||||
for addrID, updateCh := range updateCh {
|
|
||||||
flushers[addrID] = newFlusher(updateCh, maxUpdateSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a reporter to report sync progress updates.
|
// Create a reporter to report sync progress updates.
|
||||||
syncReporter := newSyncReporter(userID, eventCh, len(messageIDs), time.Second)
|
syncReporter := newSyncReporter(userID, eventCh, len(messageIDs), time.Second)
|
||||||
defer syncReporter.done()
|
defer syncReporter.done()
|
||||||
|
|
||||||
type flushUpdate struct {
|
// Expected mem usage for this whole process should be the sum of MaxMessageBuildingMem and MaxDownloadRequestMem
|
||||||
messageID string
|
// times x due to pipeline and all additional memory used by network requests and compression+io.
|
||||||
pushedUpdates []imap.Update
|
|
||||||
batchLen int
|
// There's no point in using more than 128MB of download data per stage, after that we reach a point of diminishing
|
||||||
|
// returns as we can't keep the pipeline fed fast enough.
|
||||||
|
const MaxDownloadRequestMem = 128 * Megabyte
|
||||||
|
|
||||||
|
// Any lower than this and we may fail to download messages.
|
||||||
|
const MinDownloadRequestMem = 40 * Megabyte
|
||||||
|
|
||||||
|
// This value can be increased to your hearts content. The more system memory the user has, the more messages
|
||||||
|
// we can build in parallel.
|
||||||
|
const MaxMessageBuildingMem = 128 * Megabyte
|
||||||
|
const MinMessageBuildingMem = 64 * Megabyte
|
||||||
|
|
||||||
|
// Maximum recommend value for parallel downloads by the API team.
|
||||||
|
const maxParallelDownloads = 20
|
||||||
|
|
||||||
|
totalMemory := memory.TotalMemory()
|
||||||
|
logrus.Debugf("Total System Memory: %v", toMB(totalMemory))
|
||||||
|
|
||||||
|
syncMaxDownloadRequestMem := MaxDownloadRequestMem
|
||||||
|
syncMaxMessageBuildingMem := MaxMessageBuildingMem
|
||||||
|
|
||||||
|
// If less than 2GB available try and limit max memory to 512 MB
|
||||||
|
if totalMemory < 2*Gigabyte {
|
||||||
|
if totalMemory < 800*Megabyte {
|
||||||
|
logrus.Warnf("System has less than 800MB of memory, you may experience issues sycing large mailboxes")
|
||||||
|
}
|
||||||
|
syncMaxDownloadRequestMem = MinDownloadRequestMem
|
||||||
|
syncMaxMessageBuildingMem = MinMessageBuildingMem
|
||||||
|
} else {
|
||||||
|
// Increasing the max download capacity has very little effect on sync speed. We could increase the download
|
||||||
|
// memory but the user would see less sync notifications. A smaller value here leads to more frequent
|
||||||
|
// updates. Additionally, most of ot sync time is spent in the message building.
|
||||||
|
syncMaxDownloadRequestMem = MaxDownloadRequestMem
|
||||||
|
// Currently limited so that if a user has multiple accounts active it also doesn't cause excessive memory usage.
|
||||||
|
syncMaxMessageBuildingMem = MaxMessageBuildingMem
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("Max memory usage for sync Download=%vMB Building=%vMB Predicted Max Total=%vMB",
|
||||||
|
toMB(syncMaxDownloadRequestMem),
|
||||||
|
toMB(syncMaxMessageBuildingMem),
|
||||||
|
toMB((syncMaxMessageBuildingMem*4)+(syncMaxDownloadRequestMem*4)),
|
||||||
|
)
|
||||||
|
|
||||||
|
type flushUpdate struct {
|
||||||
|
messageID string
|
||||||
|
err error
|
||||||
|
batchLen int
|
||||||
|
}
|
||||||
|
|
||||||
|
type downloadRequest struct {
|
||||||
|
ids []string
|
||||||
|
expectedSize uint64
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type downloadedMessageBatch struct {
|
||||||
|
batch []proton.FullMessage
|
||||||
|
}
|
||||||
|
|
||||||
|
type builtMessageBatch struct {
|
||||||
|
batch []*buildRes
|
||||||
|
}
|
||||||
|
|
||||||
|
downloadCh := make(chan downloadRequest)
|
||||||
|
|
||||||
|
buildCh := make(chan downloadedMessageBatch)
|
||||||
|
|
||||||
// The higher this value, the longer we can continue our download iteration before being blocked on channel writes
|
// The higher this value, the longer we can continue our download iteration before being blocked on channel writes
|
||||||
// to the update flushing goroutine.
|
// to the update flushing goroutine.
|
||||||
flushCh := make(chan []*buildRes, 2)
|
flushCh := make(chan builtMessageBatch)
|
||||||
|
|
||||||
// Allow up to 4 batched wait requests.
|
flushUpdateCh := make(chan flushUpdate)
|
||||||
flushUpdateCh := make(chan flushUpdate, 4)
|
|
||||||
|
|
||||||
errorCh := make(chan error, syncWorkers)
|
errorCh := make(chan error, maxParallelDownloads+2)
|
||||||
|
|
||||||
|
// Go routine in charge of downloading message metadata
|
||||||
|
logging.GoAnnotated(ctx, func(ctx context.Context) {
|
||||||
|
defer close(downloadCh)
|
||||||
|
const MetadataDataPageSize = 150
|
||||||
|
|
||||||
|
var downloadReq downloadRequest
|
||||||
|
downloadReq.ids = make([]string, 0, MetadataDataPageSize)
|
||||||
|
|
||||||
|
metadataChunks := xslices.Chunk(messageIDs, MetadataDataPageSize)
|
||||||
|
for i, metadataChunk := range metadataChunks {
|
||||||
|
logrus.Debugf("Metadata Request (%v of %v), previous: %v", i, len(metadataChunks), len(downloadReq.ids))
|
||||||
|
metadata, err := client.GetMessageMetadataPage(ctx, 0, len(metadataChunk), proton.MessageFilter{ID: metadataChunk})
|
||||||
|
if err != nil {
|
||||||
|
downloadReq.err = err
|
||||||
|
downloadCh <- downloadReq
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
downloadReq.err = err
|
||||||
|
downloadCh <- downloadReq
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build look up table so that messages are processed in the same order.
|
||||||
|
metadataMap := make(map[string]int, len(metadata))
|
||||||
|
for i, v := range metadata {
|
||||||
|
metadataMap[v.ID] = i
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, id := range metadataChunk {
|
||||||
|
m := &metadata[metadataMap[id]]
|
||||||
|
nextSize := downloadReq.expectedSize + uint64(m.Size)
|
||||||
|
if nextSize >= syncMaxDownloadRequestMem || len(downloadReq.ids) >= 256 {
|
||||||
|
logrus.Debugf("Download Request Sent at %v of %v", i, len(metadata))
|
||||||
|
select {
|
||||||
|
case downloadCh <- downloadReq:
|
||||||
|
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
downloadReq.expectedSize = 0
|
||||||
|
downloadReq.ids = make([]string, 0, MetadataDataPageSize)
|
||||||
|
nextSize = uint64(m.Size)
|
||||||
|
}
|
||||||
|
downloadReq.ids = append(downloadReq.ids, id)
|
||||||
|
downloadReq.expectedSize = nextSize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(downloadReq.ids) != 0 {
|
||||||
|
logrus.Debugf("Sending remaining download request")
|
||||||
|
select {
|
||||||
|
case downloadCh <- downloadReq:
|
||||||
|
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, logging.Labels{"sync-stage": "meta-data"})
|
||||||
|
|
||||||
// Goroutine in charge of downloading and building messages in maxBatchSize batches.
|
// Goroutine in charge of downloading and building messages in maxBatchSize batches.
|
||||||
go func() {
|
logging.GoAnnotated(ctx, func(ctx context.Context) {
|
||||||
defer close(flushCh)
|
defer close(buildCh)
|
||||||
defer close(errorCh)
|
defer close(errorCh)
|
||||||
|
defer func() {
|
||||||
|
logrus.Debugf("sync downloader exit")
|
||||||
|
}()
|
||||||
|
|
||||||
|
for request := range downloadCh {
|
||||||
|
logrus.Debugf("Download request: %v MB:%v", len(request.ids), toMB(request.expectedSize))
|
||||||
|
if request.err != nil {
|
||||||
|
errorCh <- request.err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
for _, batch := range xslices.Chunk(messageIDs, maxBatchSize) {
|
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
errorCh <- ctx.Err()
|
errorCh <- ctx.Err()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
result, err := parallel.MapContext(ctx, syncWorkers, batch, func(ctx context.Context, id string) (*buildRes, error) {
|
result, err := parallel.MapContext(ctx, maxParallelDownloads, request.ids, func(ctx context.Context, id string) (proton.FullMessage, error) {
|
||||||
msg, err := client.GetFullMessage(ctx, id)
|
var result proton.FullMessage
|
||||||
|
|
||||||
|
msg, err := client.GetMessage(ctx, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return proton.FullMessage{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if ctx.Err() != nil {
|
var attachmentSize int64
|
||||||
return nil, ctx.Err()
|
for _, a := range msg.Attachments {
|
||||||
|
attachmentSize += a.Size
|
||||||
}
|
}
|
||||||
|
|
||||||
return buildRFC822(apiLabels, msg, addrKRs[msg.AddressID]), nil
|
// allocate attachment data.
|
||||||
|
result.AttData = make([][]byte, len(msg.Attachments))
|
||||||
|
|
||||||
|
for i, a := range msg.Attachments {
|
||||||
|
var buffer bytes.Buffer
|
||||||
|
buffer.Grow(int(a.Size))
|
||||||
|
if err := client.GetAttachmentInto(ctx, a.ID, &buffer); err != nil {
|
||||||
|
return proton.FullMessage{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result.AttData[i] = buffer.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
result.Message = msg
|
||||||
|
|
||||||
|
return result, nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorCh <- err
|
errorCh <- err
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
select {
|
||||||
|
case buildCh <- downloadedMessageBatch{
|
||||||
|
batch: result,
|
||||||
|
}:
|
||||||
|
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, logging.Labels{"sync-stage": "download"})
|
||||||
|
|
||||||
|
// Goroutine which builds messages after they have been downloaded
|
||||||
|
logging.GoAnnotated(ctx, func(ctx context.Context) {
|
||||||
|
defer close(flushCh)
|
||||||
|
defer func() {
|
||||||
|
logrus.Debugf("sync builder exit")
|
||||||
|
}()
|
||||||
|
|
||||||
|
maxMessagesInParallel := runtime.NumCPU()
|
||||||
|
|
||||||
|
for buildBatch := range buildCh {
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
errorCh <- ctx.Err()
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
flushCh <- result
|
var expectedMemUsage uint64
|
||||||
|
var chunks [][]proton.FullMessage
|
||||||
|
|
||||||
|
{
|
||||||
|
var lastIndex int
|
||||||
|
var index int
|
||||||
|
for _, v := range buildBatch.batch {
|
||||||
|
var dataSize uint64
|
||||||
|
for _, a := range v.Attachments {
|
||||||
|
dataSize += uint64(a.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2x increase for attachment due to extra memory needed for decrypting and writing
|
||||||
|
// in memory buffer.
|
||||||
|
dataSize *= 2
|
||||||
|
dataSize += uint64(len(v.Body))
|
||||||
|
|
||||||
|
nextMemSize := expectedMemUsage + dataSize
|
||||||
|
if nextMemSize >= syncMaxMessageBuildingMem {
|
||||||
|
chunks = append(chunks, buildBatch.batch[lastIndex:index])
|
||||||
|
lastIndex = index
|
||||||
|
expectedMemUsage = dataSize
|
||||||
|
} else {
|
||||||
|
expectedMemUsage = nextMemSize
|
||||||
|
}
|
||||||
|
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
|
||||||
|
if index < len(buildBatch.batch) {
|
||||||
|
chunks = append(chunks, buildBatch.batch[index:])
|
||||||
|
} else if index == len(buildBatch.batch) && len(chunks) == 0 {
|
||||||
|
chunks = [][]proton.FullMessage{buildBatch.batch}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for index, chunk := range chunks {
|
||||||
|
result, err := parallel.MapContext(ctx, maxMessagesInParallel, chunk, func(ctx context.Context, msg proton.FullMessage) (*buildRes, error) {
|
||||||
|
return buildRFC822(apiLabels, msg, addrKRs[msg.AddressID], new(bytes.Buffer)), nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
logrus.Debugf("Build request: %v of %v", index, len(chunks))
|
||||||
|
|
||||||
|
select {
|
||||||
|
case flushCh <- builtMessageBatch{result}:
|
||||||
|
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}()
|
}, logging.Labels{"sync-stage": "builder"})
|
||||||
|
|
||||||
// Goroutine which converts the messages into updates and builds a waitable structure for progress tracking.
|
// Goroutine which converts the messages into updates and builds a waitable structure for progress tracking.
|
||||||
go func() {
|
logging.GoAnnotated(ctx, func(ctx context.Context) {
|
||||||
defer close(flushUpdateCh)
|
defer close(flushUpdateCh)
|
||||||
for batch := range flushCh {
|
defer func() {
|
||||||
for _, res := range batch {
|
logrus.Debugf("sync flush exit")
|
||||||
|
}()
|
||||||
|
|
||||||
|
type updateTargetInfo struct {
|
||||||
|
queueIndex int
|
||||||
|
ch *queue.QueuedChannel[imap.Update]
|
||||||
|
}
|
||||||
|
|
||||||
|
pendingUpdates := make([][]*imap.MessageCreated, len(updateCh))
|
||||||
|
addressToIndex := make(map[string]updateTargetInfo)
|
||||||
|
|
||||||
|
{
|
||||||
|
i := 0
|
||||||
|
for addrID, updateCh := range updateCh {
|
||||||
|
addressToIndex[addrID] = updateTargetInfo{
|
||||||
|
ch: updateCh,
|
||||||
|
queueIndex: i,
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for downloadBatch := range flushCh {
|
||||||
|
logrus.Debugf("Flush batch: %v", len(downloadBatch.batch))
|
||||||
|
for _, res := range downloadBatch.batch {
|
||||||
if res.err != nil {
|
if res.err != nil {
|
||||||
if err := vault.AddFailedMessageID(res.messageID); err != nil {
|
if err := vault.AddFailedMessageID(res.messageID); err != nil {
|
||||||
logrus.WithError(err).Error("Failed to add failed message ID")
|
logrus.WithError(err).Error("Failed to add failed message ID")
|
||||||
@ -327,31 +572,38 @@ func syncMessages(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
flushers[res.addressID].push(res.update)
|
targetInfo := addressToIndex[res.addressID]
|
||||||
|
pendingUpdates[targetInfo.queueIndex] = append(pendingUpdates[targetInfo.queueIndex], res.update)
|
||||||
}
|
}
|
||||||
|
|
||||||
var pushedUpdates []imap.Update
|
for _, info := range addressToIndex {
|
||||||
for _, flusher := range flushers {
|
up := imap.NewMessagesCreated(true, pendingUpdates[info.queueIndex]...)
|
||||||
flusher.flush()
|
info.ch.Enqueue(up)
|
||||||
pushedUpdates = append(pushedUpdates, flusher.collectPushedUpdates()...)
|
|
||||||
|
err, ok := up.WaitContext(ctx)
|
||||||
|
if ok && err != nil {
|
||||||
|
flushUpdateCh <- flushUpdate{
|
||||||
|
err: fmt.Errorf("failed to apply sync update to gluon %v: %w", up.String(), err),
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
pendingUpdates[info.queueIndex] = pendingUpdates[info.queueIndex][:0]
|
||||||
}
|
}
|
||||||
|
|
||||||
flushUpdateCh <- flushUpdate{
|
select {
|
||||||
messageID: batch[0].messageID,
|
case flushUpdateCh <- flushUpdate{
|
||||||
pushedUpdates: pushedUpdates,
|
messageID: downloadBatch.batch[0].messageID,
|
||||||
batchLen: len(batch),
|
err: nil,
|
||||||
|
batchLen: len(downloadBatch.batch),
|
||||||
|
}:
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}, logging.Labels{"sync-stage": "flush"})
|
||||||
|
|
||||||
for flushUpdate := range flushUpdateCh {
|
for flushUpdate := range flushUpdateCh {
|
||||||
for _, up := range flushUpdate.pushedUpdates {
|
|
||||||
err, ok := up.WaitContext(ctx)
|
|
||||||
if ok && err != nil {
|
|
||||||
return fmt.Errorf("failed to apply sync update to gluon %v: %w", up.String(), err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := vault.SetLastMessageID(flushUpdate.messageID); err != nil {
|
if err := vault.SetLastMessageID(flushUpdate.messageID); err != nil {
|
||||||
return fmt.Errorf("failed to set last synced message ID: %w", err)
|
return fmt.Errorf("failed to set last synced message ID: %w", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -48,16 +48,18 @@ func defaultJobOpts() message.JobOptions {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildRFC822(apiLabels map[string]proton.Label, full proton.FullMessage, addrKR *crypto.KeyRing) *buildRes {
|
func buildRFC822(apiLabels map[string]proton.Label, full proton.FullMessage, addrKR *crypto.KeyRing, buffer *bytes.Buffer) *buildRes {
|
||||||
var (
|
var (
|
||||||
update *imap.MessageCreated
|
update *imap.MessageCreated
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
if literal, buildErr := message.BuildRFC822(addrKR, full.Message, full.AttData, defaultJobOpts()); buildErr != nil {
|
buffer.Grow(full.Size)
|
||||||
|
|
||||||
|
if buildErr := message.BuildRFC822Into(addrKR, full.Message, full.AttData, defaultJobOpts(), buffer); buildErr != nil {
|
||||||
update = newMessageCreatedFailedUpdate(apiLabels, full.MessageMetadata, buildErr)
|
update = newMessageCreatedFailedUpdate(apiLabels, full.MessageMetadata, buildErr)
|
||||||
err = buildErr
|
err = buildErr
|
||||||
} else if created, parseErr := newMessageCreatedUpdate(apiLabels, full.MessageMetadata, literal); parseErr != nil {
|
} else if created, parseErr := newMessageCreatedUpdate(apiLabels, full.MessageMetadata, buffer.Bytes()); parseErr != nil {
|
||||||
update = newMessageCreatedFailedUpdate(apiLabels, full.MessageMetadata, parseErr)
|
update = newMessageCreatedFailedUpdate(apiLabels, full.MessageMetadata, parseErr)
|
||||||
err = parseErr
|
err = parseErr
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -1,63 +0,0 @@
|
|||||||
// Copyright (c) 2023 Proton AG
|
|
||||||
//
|
|
||||||
// This file is part of Proton Mail Bridge.
|
|
||||||
//
|
|
||||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package user
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ProtonMail/gluon/imap"
|
|
||||||
"github.com/ProtonMail/gluon/queue"
|
|
||||||
)
|
|
||||||
|
|
||||||
type flusher struct {
|
|
||||||
updateCh *queue.QueuedChannel[imap.Update]
|
|
||||||
updates []*imap.MessageCreated
|
|
||||||
pushedUpdates []imap.Update
|
|
||||||
|
|
||||||
maxUpdateSize int
|
|
||||||
curChunkSize int
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFlusher(updateCh *queue.QueuedChannel[imap.Update], maxUpdateSize int) *flusher {
|
|
||||||
return &flusher{
|
|
||||||
updateCh: updateCh,
|
|
||||||
maxUpdateSize: maxUpdateSize,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *flusher) push(update *imap.MessageCreated) {
|
|
||||||
f.updates = append(f.updates, update)
|
|
||||||
|
|
||||||
if f.curChunkSize += len(update.Literal); f.curChunkSize >= f.maxUpdateSize {
|
|
||||||
f.flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *flusher) flush() {
|
|
||||||
if len(f.updates) > 0 {
|
|
||||||
update := imap.NewMessagesCreated(true, f.updates...)
|
|
||||||
f.updateCh.Enqueue(update)
|
|
||||||
f.updates = nil
|
|
||||||
f.curChunkSize = 0
|
|
||||||
f.pushedUpdates = append(f.pushedUpdates, update)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *flusher) collectPushedUpdates() []imap.Update {
|
|
||||||
updates := f.pushedUpdates
|
|
||||||
f.pushedUpdates = nil
|
|
||||||
return updates
|
|
||||||
}
|
|
||||||
@ -20,6 +20,7 @@ package user
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/ProtonMail/go-proton-api"
|
"github.com/ProtonMail/go-proton-api"
|
||||||
@ -91,3 +92,7 @@ func sortSlice[Item any](items []Item, less func(Item, Item) bool) []Item {
|
|||||||
|
|
||||||
return sorted
|
return sorted
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newProtonAPIScheduler() proton.Scheduler {
|
||||||
|
return proton.NewParallelScheduler(runtime.NumCPU() / 2)
|
||||||
|
}
|
||||||
|
|||||||
@ -84,7 +84,6 @@ type User struct {
|
|||||||
pollAPIEventsCh chan chan struct{}
|
pollAPIEventsCh chan chan struct{}
|
||||||
goPollAPIEvents func(wait bool)
|
goPollAPIEvents func(wait bool)
|
||||||
|
|
||||||
syncWorkers int
|
|
||||||
showAllMail uint32
|
showAllMail uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -98,7 +97,6 @@ func New(
|
|||||||
reporter reporter.Reporter,
|
reporter reporter.Reporter,
|
||||||
apiUser proton.User,
|
apiUser proton.User,
|
||||||
crashHandler async.PanicHandler,
|
crashHandler async.PanicHandler,
|
||||||
syncWorkers int,
|
|
||||||
showAllMail bool,
|
showAllMail bool,
|
||||||
) (*User, error) { //nolint:funlen
|
) (*User, error) { //nolint:funlen
|
||||||
logrus.WithField("userID", apiUser.ID).Info("Creating new user")
|
logrus.WithField("userID", apiUser.ID).Info("Creating new user")
|
||||||
@ -142,7 +140,6 @@ func New(
|
|||||||
tasks: async.NewGroup(context.Background(), crashHandler),
|
tasks: async.NewGroup(context.Background(), crashHandler),
|
||||||
pollAPIEventsCh: make(chan chan struct{}),
|
pollAPIEventsCh: make(chan chan struct{}),
|
||||||
|
|
||||||
syncWorkers: syncWorkers,
|
|
||||||
showAllMail: b32(showAllMail),
|
showAllMail: b32(showAllMail),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -216,7 +216,7 @@ func withUser(tb testing.TB, ctx context.Context, _ *server.Server, m *proton.Ma
|
|||||||
vaultUser, err := vault.AddUser(apiUser.ID, username, username+"@pm.me", apiAuth.UID, apiAuth.RefreshToken, saltedKeyPass)
|
vaultUser, err := vault.AddUser(apiUser.ID, username, username+"@pm.me", apiAuth.UID, apiAuth.RefreshToken, saltedKeyPass)
|
||||||
require.NoError(tb, err)
|
require.NoError(tb, err)
|
||||||
|
|
||||||
user, err := New(ctx, vaultUser, client, nil, apiUser, nil, vault.SyncWorkers(), true)
|
user, err := New(ctx, vaultUser, client, nil, apiUser, nil, true)
|
||||||
require.NoError(tb, err)
|
require.NoError(tb, err)
|
||||||
defer user.Close()
|
defer user.Close()
|
||||||
|
|
||||||
|
|||||||
@ -189,27 +189,3 @@ func (vault *Vault) SetFirstStart(firstStart bool) error {
|
|||||||
data.Settings.FirstStart = firstStart
|
data.Settings.FirstStart = firstStart
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// SyncWorkers returns the number of workers to use for syncing.
|
|
||||||
func (vault *Vault) SyncWorkers() int {
|
|
||||||
return vault.get().Settings.SyncWorkers
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSyncWorkers sets the number of workers to use for syncing.
|
|
||||||
func (vault *Vault) SetSyncWorkers(workers int) error {
|
|
||||||
return vault.mod(func(data *Data) {
|
|
||||||
data.Settings.SyncWorkers = workers
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyncAttPool returns the size of the attachment pool.
|
|
||||||
func (vault *Vault) SyncAttPool() int {
|
|
||||||
return vault.get().Settings.SyncAttPool
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSyncAttPool sets the size of the attachment pool.
|
|
||||||
func (vault *Vault) SetSyncAttPool(pool int) error {
|
|
||||||
return vault.mod(func(data *Data) {
|
|
||||||
data.Settings.SyncAttPool = pool
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|||||||
@ -202,12 +202,3 @@ func TestVault_Settings_FirstStart(t *testing.T) {
|
|||||||
// Check the new first start value.
|
// Check the new first start value.
|
||||||
require.Equal(t, false, s.GetFirstStart())
|
require.Equal(t, false, s.GetFirstStart())
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestVault_Settings_SyncWorkers(t *testing.T) {
|
|
||||||
// create a new test vault.
|
|
||||||
s := newVault(t)
|
|
||||||
|
|
||||||
syncWorkers := vault.GetDefaultSyncWorkerCount()
|
|
||||||
require.Equal(t, syncWorkers, s.SyncWorkers())
|
|
||||||
require.Equal(t, syncWorkers, s.SyncAttPool())
|
|
||||||
}
|
|
||||||
|
|||||||
@ -19,7 +19,6 @@ package vault
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/ProtonMail/proton-bridge/v3/internal/updater"
|
"github.com/ProtonMail/proton-bridge/v3/internal/updater"
|
||||||
)
|
)
|
||||||
@ -43,26 +42,9 @@ type Settings struct {
|
|||||||
|
|
||||||
LastVersion string
|
LastVersion string
|
||||||
FirstStart bool
|
FirstStart bool
|
||||||
|
|
||||||
SyncWorkers int
|
|
||||||
SyncAttPool int
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetDefaultSyncWorkerCount() int {
|
|
||||||
const minSyncWorkers = 16
|
|
||||||
|
|
||||||
syncWorkers := runtime.NumCPU() * 4
|
|
||||||
|
|
||||||
if syncWorkers < minSyncWorkers {
|
|
||||||
syncWorkers = minSyncWorkers
|
|
||||||
}
|
|
||||||
|
|
||||||
return syncWorkers
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDefaultSettings(gluonDir string) Settings {
|
func newDefaultSettings(gluonDir string) Settings {
|
||||||
syncWorkers := GetDefaultSyncWorkerCount()
|
|
||||||
|
|
||||||
return Settings{
|
return Settings{
|
||||||
GluonDir: gluonDir,
|
GluonDir: gluonDir,
|
||||||
|
|
||||||
@ -82,8 +64,5 @@ func newDefaultSettings(gluonDir string) Settings {
|
|||||||
|
|
||||||
LastVersion: "0.0.0",
|
LastVersion: "0.0.0",
|
||||||
FirstStart: true,
|
FirstStart: true,
|
||||||
|
|
||||||
SyncWorkers: syncWorkers,
|
|
||||||
SyncAttPool: syncWorkers,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -20,6 +20,7 @@ package message
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"io"
|
||||||
"mime"
|
"mime"
|
||||||
"net/mail"
|
"net/mail"
|
||||||
"strings"
|
"strings"
|
||||||
@ -46,65 +47,73 @@ var (
|
|||||||
const InternalIDDomain = `protonmail.internalid`
|
const InternalIDDomain = `protonmail.internalid`
|
||||||
|
|
||||||
func BuildRFC822(kr *crypto.KeyRing, msg proton.Message, attData [][]byte, opts JobOptions) ([]byte, error) {
|
func BuildRFC822(kr *crypto.KeyRing, msg proton.Message, attData [][]byte, opts JobOptions) ([]byte, error) {
|
||||||
switch {
|
|
||||||
case len(msg.Attachments) > 0:
|
|
||||||
return buildMultipartRFC822(kr, msg, attData, opts)
|
|
||||||
|
|
||||||
case msg.MIMEType == "multipart/mixed":
|
|
||||||
return buildPGPRFC822(kr, msg, opts)
|
|
||||||
|
|
||||||
default:
|
|
||||||
return buildSimpleRFC822(kr, msg, opts)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildSimpleRFC822(kr *crypto.KeyRing, msg proton.Message, opts JobOptions) ([]byte, error) {
|
|
||||||
dec, err := msg.Decrypt(kr)
|
|
||||||
if err != nil {
|
|
||||||
if !opts.IgnoreDecryptionErrors {
|
|
||||||
return nil, errors.Wrap(ErrDecryptionFailed, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
return buildMultipartRFC822(kr, msg, nil, opts)
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr := getTextPartHeader(getMessageHeader(msg, opts), dec, msg.MIMEType)
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
buf := new(bytes.Buffer)
|
||||||
|
if err := BuildRFC822Into(kr, msg, attData, opts, buf); err != nil {
|
||||||
w, err := message.CreateWriter(buf, hdr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := w.Write(dec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
return buf.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BuildRFC822Into(kr *crypto.KeyRing, msg proton.Message, attData [][]byte, opts JobOptions, buf *bytes.Buffer) error {
|
||||||
|
switch {
|
||||||
|
case len(msg.Attachments) > 0:
|
||||||
|
return buildMultipartRFC822(kr, msg, attData, opts, buf)
|
||||||
|
|
||||||
|
case msg.MIMEType == "multipart/mixed":
|
||||||
|
return buildPGPRFC822(kr, msg, opts, buf)
|
||||||
|
|
||||||
|
default:
|
||||||
|
return buildSimpleRFC822(kr, msg, opts, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildSimpleRFC822(kr *crypto.KeyRing, msg proton.Message, opts JobOptions, buf *bytes.Buffer) error {
|
||||||
|
var decrypted bytes.Buffer
|
||||||
|
decrypted.Grow(len(msg.Body))
|
||||||
|
|
||||||
|
if err := msg.DecryptInto(kr, &decrypted); err != nil {
|
||||||
|
if !opts.IgnoreDecryptionErrors {
|
||||||
|
return errors.Wrap(ErrDecryptionFailed, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
return buildMultipartRFC822(kr, msg, nil, opts, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr := getTextPartHeader(getMessageHeader(msg, opts), decrypted.Bytes(), msg.MIMEType)
|
||||||
|
|
||||||
|
w, err := message.CreateWriter(buf, hdr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := w.Write(decrypted.Bytes()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func buildMultipartRFC822(
|
func buildMultipartRFC822(
|
||||||
kr *crypto.KeyRing,
|
kr *crypto.KeyRing,
|
||||||
msg proton.Message,
|
msg proton.Message,
|
||||||
attData [][]byte,
|
attData [][]byte,
|
||||||
opts JobOptions,
|
opts JobOptions,
|
||||||
) ([]byte, error) {
|
buf *bytes.Buffer,
|
||||||
|
) error {
|
||||||
boundary := newBoundary(msg.ID)
|
boundary := newBoundary(msg.ID)
|
||||||
|
|
||||||
hdr := getMessageHeader(msg, opts)
|
hdr := getMessageHeader(msg, opts)
|
||||||
|
|
||||||
hdr.SetContentType("multipart/mixed", map[string]string{"boundary": boundary.gen()})
|
hdr.SetContentType("multipart/mixed", map[string]string{"boundary": boundary.gen()})
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
w, err := message.CreateWriter(buf, hdr)
|
w, err := message.CreateWriter(buf, hdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -126,23 +135,23 @@ func buildMultipartRFC822(
|
|||||||
|
|
||||||
if len(inlineAtts) > 0 {
|
if len(inlineAtts) > 0 {
|
||||||
if err := writeRelatedParts(w, kr, boundary, msg, inlineAtts, inlineData, opts); err != nil {
|
if err := writeRelatedParts(w, kr, boundary, msg, inlineAtts, inlineData, opts); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
} else if err := writeTextPart(w, kr, msg, opts); err != nil {
|
} else if err := writeTextPart(w, kr, msg, opts); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, att := range attachAtts {
|
for i, att := range attachAtts {
|
||||||
if err := writeAttachmentPart(w, kr, att, attachData[i], opts); err != nil {
|
if err := writeAttachmentPart(w, kr, att, attachData[i], opts); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.Close(); err != nil {
|
if err := w.Close(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeTextPart(
|
func writeTextPart(
|
||||||
@ -151,8 +160,10 @@ func writeTextPart(
|
|||||||
msg proton.Message,
|
msg proton.Message,
|
||||||
opts JobOptions,
|
opts JobOptions,
|
||||||
) error {
|
) error {
|
||||||
dec, err := msg.Decrypt(kr)
|
var decrypted bytes.Buffer
|
||||||
if err != nil {
|
decrypted.Grow(len(msg.Body))
|
||||||
|
|
||||||
|
if err := msg.DecryptInto(kr, &decrypted); err != nil {
|
||||||
if !opts.IgnoreDecryptionErrors {
|
if !opts.IgnoreDecryptionErrors {
|
||||||
return errors.Wrap(ErrDecryptionFailed, err.Error())
|
return errors.Wrap(ErrDecryptionFailed, err.Error())
|
||||||
}
|
}
|
||||||
@ -160,7 +171,7 @@ func writeTextPart(
|
|||||||
return writeCustomTextPart(w, msg, err)
|
return writeCustomTextPart(w, msg, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return writePart(w, getTextPartHeader(message.Header{}, dec, msg.MIMEType), dec)
|
return writePart(w, getTextPartHeader(message.Header{}, decrypted.Bytes(), msg.MIMEType), decrypted.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeAttachmentPart(
|
func writeAttachmentPart(
|
||||||
@ -175,9 +186,10 @@ func writeAttachmentPart(
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
msg := crypto.NewPGPSplitMessage(kps, attData).GetPGPMessage()
|
// Use io.Multi
|
||||||
|
attachmentReader := io.MultiReader(bytes.NewReader(kps), bytes.NewReader(attData))
|
||||||
|
|
||||||
dec, err := kr.Decrypt(msg, nil, crypto.GetUnixTime())
|
stream, err := kr.DecryptStream(attachmentReader, nil, crypto.GetUnixTime())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !opts.IgnoreDecryptionErrors {
|
if !opts.IgnoreDecryptionErrors {
|
||||||
return errors.Wrap(ErrDecryptionFailed, err.Error())
|
return errors.Wrap(ErrDecryptionFailed, err.Error())
|
||||||
@ -186,12 +198,38 @@ func writeAttachmentPart(
|
|||||||
log.
|
log.
|
||||||
WithField("attID", att.ID).
|
WithField("attID", att.ID).
|
||||||
WithError(err).
|
WithError(err).
|
||||||
Warn("Attachment decryption failed")
|
Warn("Attachment decryption failed - construct")
|
||||||
|
|
||||||
return writeCustomAttachmentPart(w, att, msg, err)
|
var pgpMessageBuffer bytes.Buffer
|
||||||
|
pgpMessageBuffer.Grow(len(kps) + len(attData))
|
||||||
|
pgpMessageBuffer.Write(kps)
|
||||||
|
pgpMessageBuffer.Write(attData)
|
||||||
|
|
||||||
|
return writeCustomAttachmentPart(w, att, &crypto.PGPMessage{Data: pgpMessageBuffer.Bytes()}, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return writePart(w, getAttachmentPartHeader(att), dec.GetBinary())
|
var decryptBuffer bytes.Buffer
|
||||||
|
decryptBuffer.Grow(len(kps) + len(attData))
|
||||||
|
|
||||||
|
if _, err := decryptBuffer.ReadFrom(stream); err != nil {
|
||||||
|
if !opts.IgnoreDecryptionErrors {
|
||||||
|
return errors.Wrap(ErrDecryptionFailed, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
log.
|
||||||
|
WithField("attID", att.ID).
|
||||||
|
WithError(err).
|
||||||
|
Warn("Attachment decryption failed - stream")
|
||||||
|
|
||||||
|
var pgpMessageBuffer bytes.Buffer
|
||||||
|
pgpMessageBuffer.Grow(len(kps) + len(attData))
|
||||||
|
pgpMessageBuffer.Write(kps)
|
||||||
|
pgpMessageBuffer.Write(attData)
|
||||||
|
|
||||||
|
return writeCustomAttachmentPart(w, att, &crypto.PGPMessage{Data: pgpMessageBuffer.Bytes()}, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return writePart(w, getAttachmentPartHeader(att), decryptBuffer.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeRelatedParts(
|
func writeRelatedParts(
|
||||||
@ -222,14 +260,16 @@ func writeRelatedParts(
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildPGPRFC822(kr *crypto.KeyRing, msg proton.Message, opts JobOptions) ([]byte, error) {
|
func buildPGPRFC822(kr *crypto.KeyRing, msg proton.Message, opts JobOptions, buf *bytes.Buffer) error {
|
||||||
dec, err := msg.Decrypt(kr)
|
var decrypted bytes.Buffer
|
||||||
if err != nil {
|
decrypted.Grow(len(msg.Body))
|
||||||
|
|
||||||
|
if err := msg.DecryptInto(kr, &decrypted); err != nil {
|
||||||
if !opts.IgnoreDecryptionErrors {
|
if !opts.IgnoreDecryptionErrors {
|
||||||
return nil, errors.Wrap(ErrDecryptionFailed, err.Error())
|
return errors.Wrap(ErrDecryptionFailed, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
return buildPGPMIMEFallbackRFC822(msg, opts)
|
return buildPGPMIMEFallbackRFC822(msg, opts, buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
hdr := getMessageHeader(msg, opts)
|
hdr := getMessageHeader(msg, opts)
|
||||||
@ -240,13 +280,13 @@ func buildPGPRFC822(kr *crypto.KeyRing, msg proton.Message, opts JobOptions) ([]
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len(sigs) > 0 {
|
if len(sigs) > 0 {
|
||||||
return writeMultipartSignedRFC822(hdr, dec, sigs[0])
|
return writeMultipartSignedRFC822(hdr, decrypted.Bytes(), sigs[0], buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
return writeMultipartEncryptedRFC822(hdr, dec)
|
return writeMultipartEncryptedRFC822(hdr, decrypted.Bytes(), buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildPGPMIMEFallbackRFC822(msg proton.Message, opts JobOptions) ([]byte, error) {
|
func buildPGPMIMEFallbackRFC822(msg proton.Message, opts JobOptions, buf *bytes.Buffer) error {
|
||||||
hdr := getMessageHeader(msg, opts)
|
hdr := getMessageHeader(msg, opts)
|
||||||
|
|
||||||
hdr.SetContentType("multipart/encrypted", map[string]string{
|
hdr.SetContentType("multipart/encrypted", map[string]string{
|
||||||
@ -254,11 +294,9 @@ func buildPGPMIMEFallbackRFC822(msg proton.Message, opts JobOptions) ([]byte, er
|
|||||||
"protocol": "application/pgp-encrypted",
|
"protocol": "application/pgp-encrypted",
|
||||||
})
|
})
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
w, err := message.CreateWriter(buf, hdr)
|
w, err := message.CreateWriter(buf, hdr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var encHdr message.Header
|
var encHdr message.Header
|
||||||
@ -267,7 +305,7 @@ func buildPGPMIMEFallbackRFC822(msg proton.Message, opts JobOptions) ([]byte, er
|
|||||||
encHdr.Set("Content-Description", "PGP/MIME version identification")
|
encHdr.Set("Content-Description", "PGP/MIME version identification")
|
||||||
|
|
||||||
if err := writePart(w, encHdr, []byte("Version: 1")); err != nil {
|
if err := writePart(w, encHdr, []byte("Version: 1")); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var dataHdr message.Header
|
var dataHdr message.Header
|
||||||
@ -277,19 +315,17 @@ func buildPGPMIMEFallbackRFC822(msg proton.Message, opts JobOptions) ([]byte, er
|
|||||||
dataHdr.Set("Content-Description", "OpenPGP encrypted message")
|
dataHdr.Set("Content-Description", "OpenPGP encrypted message")
|
||||||
|
|
||||||
if err := writePart(w, dataHdr, []byte(msg.Body)); err != nil {
|
if err := writePart(w, dataHdr, []byte(msg.Body)); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := w.Close(); err != nil {
|
if err := w.Close(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeMultipartSignedRFC822(header message.Header, body []byte, sig proton.Signature) ([]byte, error) { //nolint:funlen
|
func writeMultipartSignedRFC822(header message.Header, body []byte, sig proton.Signature, buf *bytes.Buffer) error { //nolint:funlen
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
boundary := newBoundary("").gen()
|
boundary := newBoundary("").gen()
|
||||||
|
|
||||||
header.SetContentType("multipart/signed", map[string]string{
|
header.SetContentType("multipart/signed", map[string]string{
|
||||||
@ -299,27 +335,27 @@ func writeMultipartSignedRFC822(header message.Header, body []byte, sig proton.S
|
|||||||
})
|
})
|
||||||
|
|
||||||
if err := textproto.WriteHeader(buf, header.Header); err != nil {
|
if err := textproto.WriteHeader(buf, header.Header); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
mw := textproto.NewMultipartWriter(buf)
|
mw := textproto.NewMultipartWriter(buf)
|
||||||
|
|
||||||
if err := mw.SetBoundary(boundary); err != nil {
|
if err := mw.SetBoundary(boundary); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
bodyHeader, bodyData, err := readHeaderBody(body)
|
bodyHeader, bodyData, err := readHeaderBody(body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
bodyPart, err := mw.CreatePart(*bodyHeader)
|
bodyPart, err := mw.CreatePart(*bodyHeader)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := bodyPart.Write(bodyData); err != nil {
|
if _, err := bodyPart.Write(bodyData); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var sigHeader message.Header
|
var sigHeader message.Header
|
||||||
@ -330,31 +366,29 @@ func writeMultipartSignedRFC822(header message.Header, body []byte, sig proton.S
|
|||||||
|
|
||||||
sigPart, err := mw.CreatePart(sigHeader.Header)
|
sigPart, err := mw.CreatePart(sigHeader.Header)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sigData, err := sig.Data.GetArmored()
|
sigData, err := sig.Data.GetArmored()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := sigPart.Write([]byte(sigData)); err != nil {
|
if _, err := sigPart.Write([]byte(sigData)); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := mw.Close(); err != nil {
|
if err := mw.Close(); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeMultipartEncryptedRFC822(header message.Header, body []byte) ([]byte, error) {
|
func writeMultipartEncryptedRFC822(header message.Header, body []byte, buf *bytes.Buffer) error {
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
|
|
||||||
bodyHeader, bodyData, err := readHeaderBody(body)
|
bodyHeader, bodyData, err := readHeaderBody(body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove old content type header as it is non-standard. Ensure that messages
|
// Remove old content type header as it is non-standard. Ensure that messages
|
||||||
@ -371,14 +405,14 @@ func writeMultipartEncryptedRFC822(header message.Header, body []byte) ([]byte,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := textproto.WriteHeader(buf, header.Header); err != nil {
|
if err := textproto.WriteHeader(buf, header.Header); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := buf.Write(bodyData); err != nil {
|
if _, err := buf.Write(bodyData); err != nil {
|
||||||
return nil, err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.Bytes(), nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func addressEmpty(address *mail.Address) bool {
|
func addressEmpty(address *mail.Address) bool {
|
||||||
|
|||||||
Reference in New Issue
Block a user