fix(GODT-2829): Fix new sync service bugs

* Fix wrong context use for message downloads
* Fix delete of sync data failing due ErrNotFound
* Pre-allocate attachment data buffer before download
* Fix calculation of progress if message count is higher than total
This commit is contained in:
Leander Beernaert
2023-08-28 11:23:57 +02:00
parent 1fa0d77b10
commit 0b9b886039
11 changed files with 118 additions and 55 deletions

View File

@ -58,7 +58,7 @@ func newSyncLimits(maxSyncMemory uint64) syncLimits {
MinMessageBuildingMem: 64 * Megabyte,
// Maximum recommend value for parallel downloads by the API team.
MaxParallelDownloads: 20,
MaxParallelDownloads: 32,
MaxSyncMemory: maxSyncMemory,
}

View File

@ -49,7 +49,7 @@ func NewService(reporter reporter.Reporter,
return &Service{
limits: limits,
metadataStage: NewMetadataStage(metaCh, downloadCh, limits.DownloadRequestMem, panicHandler),
downloadStage: NewDownloadStage(downloadCh, buildCh, 20, panicHandler),
downloadStage: NewDownloadStage(downloadCh, buildCh, limits.MaxParallelDownloads, panicHandler),
buildStage: NewBuildStage(buildCh, applyCh, limits.MessageBuildMem, panicHandler, reporter),
applyStage: NewApplyStage(applyCh),
metaCh: metaCh,

View File

@ -76,7 +76,7 @@ func (a *ApplyStage) run(ctx context.Context) {
continue
}
if err := req.job.updateApplier.ApplySyncUpdates(ctx, req.messages); err != nil {
if err := req.job.updateApplier.ApplySyncUpdates(req.getContext(), req.messages); err != nil {
a.log.WithError(err).Error("Failed to apply sync updates")
req.job.onError(err)
continue

View File

@ -18,6 +18,7 @@
package syncservice
import (
"bytes"
"context"
"errors"
"sync/atomic"
@ -58,7 +59,7 @@ func NewDownloadStage(
return &DownloadStage{
input: input,
output: output,
maxParallelDownloads: maxParallelDownloads,
maxParallelDownloads: maxParallelDownloads * 2,
panicHandler: panicHandler,
log: logrus.WithField("sync-stage", "download"),
}
@ -94,7 +95,7 @@ func (d *DownloadStage) run(ctx context.Context) {
// Step 1: Download Messages.
result, err := autoDownloadRate(
ctx,
request.getContext(),
&DefaultDownloadRateModifier{},
request.job.client,
d.maxParallelDownloads,
@ -155,14 +156,15 @@ func (d *DownloadStage) run(ctx context.Context) {
// Step 3: Download attachments data to the message.
attachments, err := autoDownloadRate(
ctx,
request.getContext(),
&DefaultDownloadRateModifier{},
request.job.client,
d.maxParallelDownloads,
attachmentIndices,
newCoolDown,
func(ctx context.Context, client APIClient, input attachmentMeta) ([]byte, error) {
return downloadAttachment(ctx, request.job.downloadCache, client, result[input.msgIdx].Attachments[input.attIdx].ID)
attachment := result[input.msgIdx].Attachments[input.attIdx]
return downloadAttachment(ctx, request.job.downloadCache, client, attachment.ID, attachment.Size)
},
)
if err != nil {
@ -202,17 +204,22 @@ func downloadMessage(ctx context.Context, cache *DownloadCache, client APIClient
return msg, nil
}
func downloadAttachment(ctx context.Context, cache *DownloadCache, client APIClient, id string) ([]byte, error) {
func downloadAttachment(ctx context.Context, cache *DownloadCache, client APIClient, id string, size int64) ([]byte, error) {
data, ok := cache.GetAttachment(id)
if ok {
return data, nil
}
data, err := client.GetAttachment(ctx, id)
if err != nil {
var buffer bytes.Buffer
buffer.Grow(int(size))
if err := client.GetAttachmentInto(ctx, id, &buffer); err != nil {
return nil, err
}
data = buffer.Bytes()
cache.StoreAttachment(id, data)
return data, nil
@ -236,6 +243,10 @@ func autoDownloadRate[T any, R any](
proton429or5xxCounter := int32(0)
parallelTasks := maxParallelDownloads
for _, chunk := range xslices.Chunk(data, maxParallelDownloads) {
if err := ctx.Err(); err != nil {
return nil, err
}
parallelTasks = modifier.Apply(atomic.LoadInt32(&proton429or5xxCounter) != 0, parallelTasks, maxParallelDownloads)
atomic.StoreInt32(&proton429or5xxCounter, 0)

View File

@ -18,6 +18,7 @@
package syncservice
import (
"bytes"
"context"
"errors"
"fmt"
@ -61,9 +62,9 @@ func TestDownloadAttachment_NotInCache(t *testing.T) {
mockCtrl := gomock.NewController(t)
client := NewMockAPIClient(mockCtrl)
cache := newDownloadCache()
client.EXPECT().GetAttachment(gomock.Any(), gomock.Any()).Return(nil, nil)
client.EXPECT().GetAttachmentInto(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
_, err := downloadAttachment(context.Background(), cache, client, "id")
_, err := downloadAttachment(context.Background(), cache, client, "id", 1024)
require.NoError(t, err)
}
@ -74,7 +75,7 @@ func TestDownloadAttachment_InCache(t *testing.T) {
attachment := []byte("hello world")
cache.StoreAttachment("id", attachment)
downloaded, err := downloadAttachment(context.Background(), cache, client, "id")
downloaded, err := downloadAttachment(context.Background(), cache, client, "id", 1024)
require.NoError(t, err)
require.Equal(t, attachment, downloaded)
}
@ -343,7 +344,7 @@ func TestDownloadStage_JobAbortsOnAttachmentDownloadError(t *testing.T) {
ID: "attach",
}},
}, nil)
tj.client.EXPECT().GetAttachment(gomock.Any(), gomock.Eq("attach")).Return(nil, expectedErr)
tj.client.EXPECT().GetAttachmentInto(gomock.Any(), gomock.Eq("attach"), gomock.Any()).Return(expectedErr)
tj.job.begin()
childJob := tj.job.newChildJob("f", 10)
@ -403,7 +404,13 @@ func buildDownloadStageData(tj *tjob, numMessages int, with422 bool) ([]string,
tj.client.EXPECT().GetMessage(gomock.Any(), gomock.Eq(m.ID)).Return(m.Message, nil)
for idx, a := range m.Attachments {
tj.client.EXPECT().GetAttachment(gomock.Any(), gomock.Eq(a.ID)).Return(m.AttData[idx], nil)
attData := m.AttData[idx]
tj.client.EXPECT().GetAttachmentInto(gomock.Any(), gomock.Eq(a.ID), gomock.Any()).DoAndReturn(
func(_ context.Context, _ string, b *bytes.Buffer) error {
_, err := b.Write(attData)
return err
},
)
}
}

View File

@ -57,8 +57,8 @@ func NewMetadataStage(
}
}
const MetadataPageSize = 150
const MetadataMaxMessages = 250
const MetadataPageSize = 128
const MetadataMaxMessages = 64
func (m *MetadataStage) Run(group *async.Group) {
group.Once(func(ctx context.Context) {