Files
proton-bridge/internal/services/syncservice/service.go
Leander Beernaert 7a1c7e8743 fix(GODT-3124): Handling of sync child jobs
Improve the handling of sync child jobs to ensure it behaves correctly
in all scenarios.

The sync service now uses a isolated context to avoid all the pipeline
stages shutting down before all the sync tasks have had the opportunity
to run their course.

The job waiter now immediately starts with a counter of 1 and waits
until all the child and the parent job finish before considering the
work to be finished.

Finally, we also handle the case where a sync job can't be queued
because the calling context has been cancelled.
2023-11-29 18:04:22 +00:00

75 lines
2.3 KiB
Go

// Copyright (c) 2023 Proton AG
//
// This file is part of Proton Mail Bridge.
//
// Proton Mail Bridge is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Proton Mail Bridge is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
package syncservice
import (
"context"
"github.com/ProtonMail/gluon/async"
"github.com/ProtonMail/gluon/reporter"
)
// Service which mediates IMAP syncing in Bridge.
// IMPORTANT: Be sure to cancel all ongoing sync Handlers before cancelling this service's Group.
type Service struct {
metadataStage *MetadataStage
downloadStage *DownloadStage
buildStage *BuildStage
applyStage *ApplyStage
limits syncLimits
metaCh *ChannelConsumerProducer[*Job]
group *async.Group
}
func NewService(reporter reporter.Reporter,
panicHandler async.PanicHandler,
) *Service {
limits := newSyncLimits(2 * Gigabyte)
metaCh := NewChannelConsumerProducer[*Job]()
downloadCh := NewChannelConsumerProducer[DownloadRequest]()
buildCh := NewChannelConsumerProducer[BuildRequest]()
applyCh := NewChannelConsumerProducer[ApplyRequest]()
return &Service{
limits: limits,
metadataStage: NewMetadataStage(metaCh, downloadCh, limits.DownloadRequestMem, panicHandler),
downloadStage: NewDownloadStage(downloadCh, buildCh, limits.MaxParallelDownloads, panicHandler),
buildStage: NewBuildStage(buildCh, applyCh, limits.MessageBuildMem, panicHandler, reporter),
applyStage: NewApplyStage(applyCh),
metaCh: metaCh,
group: async.NewGroup(context.Background(), panicHandler),
}
}
func (s *Service) Run() {
s.metadataStage.Run(s.group)
s.downloadStage.Run(s.group)
s.buildStage.Run(s.group)
s.applyStage.Run(s.group)
}
func (s *Service) Sync(ctx context.Context, stage *Job) error {
return s.metaCh.Produce(ctx, stage)
}
func (s *Service) Close() {
s.group.CancelAndWait()
s.metaCh.Close()
}