GODT-1158: Store full messages bodies on disk

- GODT-1158: simple on-disk cache in store
- GODT-1158: better member naming in event loop
- GODT-1158: create on-disk cache during bridge setup
- GODT-1158: better job options
- GODT-1158: rename GetLiteral to GetRFC822
- GODT-1158: rename events -> currentEvents
- GODT-1158: unlock cache per-user
- GODT-1158: clean up cache after logout
- GODT-1158: randomized encrypted cache passphrase
- GODT-1158: Opt out of on-disk cache in settings
- GODT-1158: free space in cache
- GODT-1158: make tests compile
- GODT-1158: optional compression
- GODT-1158: cache custom location
- GODT-1158: basic capacity checker
- GODT-1158: cache free space config
- GODT-1158: only unlock cache if pmapi client is unlocked as well
- GODT-1158: simple background sync worker
- GODT-1158: set size/bodystructure when caching message
- GODT-1158: limit store db update blocking with semaphore
- GODT-1158: dumb 10-semaphore
- GODT-1158: properly handle delete; remove bad bodystructure handling
- GODT-1158: hacky fix for caching after logout... baaaaad
- GODT-1158: cache worker
- GODT-1158: compute body structure lazily
- GODT-1158: cache size in store
- GODT-1158: notify cacher when adding to store
- GODT-1158: 15 second store cache watcher
- GODT-1158: enable cacher
- GODT-1158: better cache worker starting/stopping
- GODT-1158: limit cacher to less concurrency than disk cache
- GODT-1158: message builder prio + pchan pkg
- GODT-1158: fix pchan, use in message builder
- GODT-1158: no sem in cacher (rely on message builder prio)
- GODT-1158: raise priority of existing jobs when requested
- GODT-1158: pending messages in on-disk cache
- GODT-1158: WIP just a note about deleting messages from disk cache
- GODT-1158: pending wait when trying to write
- GODT-1158: pending.add to return bool
- GODT-1225: Headers in bodystructure are stored as bytes.
- GODT-1158: fixing header caching
- GODT-1158: don't cache in background
- GODT-1158: all concurrency set in settings
- GODT-1158: worker pools inside message builder
- GODT-1158: fix linter issues
- GODT-1158: remove completed builds from builder
- GODT-1158: remove builder pool
- GODT-1158: cacher defer job done properly
- GODT-1158: fix linter
- GODT-1299: Continue with bodystructure build if deserialization failed
- GODT-1324: Delete messages from the cache when they are deleted on the server
- GODT-1158: refactor cache tests
- GODT-1158: move builder to app/bridge
- GODT-1306: Migrate cache on disk when location is changed (and delete when disabled)
This commit is contained in:
James Houlahan
2021-07-30 12:20:38 +02:00
committed by Jakub
parent 5cb893fc1b
commit 6bd0739013
79 changed files with 2911 additions and 1387 deletions

View File

@ -20,10 +20,12 @@ package message
import (
"context"
"io"
"io/ioutil"
"sync"
"github.com/ProtonMail/gopenpgp/v2/crypto"
"github.com/ProtonMail/proton-bridge/pkg/pmapi"
"github.com/ProtonMail/proton-bridge/pkg/pool"
"github.com/pkg/errors"
)
@ -32,11 +34,15 @@ var (
ErrNoSuchKeyRing = errors.New("the keyring to decrypt this message could not be found")
)
const (
BackgroundPriority = 1 << iota
ForegroundPriority
)
type Builder struct {
reqs chan fetchReq
done chan struct{}
jobs map[string]*BuildJob
locker sync.Mutex
pool *pool.Pool
jobs map[string]*Job
lock sync.Mutex
}
type Fetcher interface {
@ -48,111 +54,159 @@ type Fetcher interface {
// NewBuilder creates a new builder which manages the given number of fetch/attach/build workers.
// - fetchWorkers: the number of workers which fetch messages from API
// - attachWorkers: the number of workers which fetch attachments from API.
// - buildWorkers: the number of workers which decrypt/build RFC822 message literals.
//
// NOTE: Each fetch worker spawns a unique set of attachment workers!
// There can therefore be up to fetchWorkers*attachWorkers simultaneous API connections.
//
// The returned builder is ready to handle jobs -- see (*Builder).NewJob for more information.
//
// Call (*Builder).Done to shut down the builder and stop all workers.
func NewBuilder(fetchWorkers, attachWorkers, buildWorkers int) *Builder {
b := newBuilder()
func NewBuilder(fetchWorkers, attachWorkers int) *Builder {
attacherPool := pool.New(attachWorkers, newAttacherWorkFunc())
fetchReqCh, fetchResCh := startFetchWorkers(fetchWorkers, attachWorkers)
buildReqCh, buildResCh := startBuildWorkers(buildWorkers)
fetcherPool := pool.New(fetchWorkers, newFetcherWorkFunc(attacherPool))
go func() {
defer close(fetchReqCh)
for {
select {
case req := <-b.reqs:
fetchReqCh <- req
case <-b.done:
return
}
}
}()
go func() {
defer close(buildReqCh)
for res := range fetchResCh {
if res.err != nil {
b.jobFailure(res.messageID, res.err)
} else {
buildReqCh <- res
}
}
}()
go func() {
for res := range buildResCh {
if res.err != nil {
b.jobFailure(res.messageID, res.err)
} else {
b.jobSuccess(res.messageID, res.literal)
}
}
}()
return b
}
func newBuilder() *Builder {
return &Builder{
reqs: make(chan fetchReq),
done: make(chan struct{}),
jobs: make(map[string]*BuildJob),
pool: fetcherPool,
jobs: make(map[string]*Job),
}
}
// NewJob tells the builder to begin building the message with the given ID.
// The result (or any error which occurred during building) can be retrieved from the returned job when available.
func (b *Builder) NewJob(ctx context.Context, api Fetcher, messageID string) *BuildJob {
return b.NewJobWithOptions(ctx, api, messageID, JobOptions{})
func (builder *Builder) NewJob(ctx context.Context, fetcher Fetcher, messageID string, prio int) (*Job, pool.DoneFunc) {
return builder.NewJobWithOptions(ctx, fetcher, messageID, JobOptions{}, prio)
}
// NewJobWithOptions creates a new job with custom options. See NewJob for more information.
func (b *Builder) NewJobWithOptions(ctx context.Context, api Fetcher, messageID string, opts JobOptions) *BuildJob {
b.locker.Lock()
defer b.locker.Unlock()
func (builder *Builder) NewJobWithOptions(ctx context.Context, fetcher Fetcher, messageID string, opts JobOptions, prio int) (*Job, pool.DoneFunc) {
builder.lock.Lock()
defer builder.lock.Unlock()
if job, ok := b.jobs[messageID]; ok {
return job
if job, ok := builder.jobs[messageID]; ok {
if job.GetPriority() < prio {
job.SetPriority(prio)
}
return job, job.done
}
b.jobs[messageID] = newBuildJob(messageID)
job, done := builder.pool.NewJob(
&fetchReq{
fetcher: fetcher,
messageID: messageID,
options: opts,
},
prio,
)
go func() { b.reqs <- fetchReq{ctx: ctx, api: api, messageID: messageID, opts: opts} }()
buildJob := &Job{
Job: job,
done: done,
}
return b.jobs[messageID]
builder.jobs[messageID] = buildJob
return buildJob, func() {
builder.lock.Lock()
defer builder.lock.Unlock()
// Remove the job from the builder.
delete(builder.jobs, messageID)
// And mark it as done.
done()
}
}
// Done shuts down the builder and stops all workers.
func (b *Builder) Done() {
b.locker.Lock()
defer b.locker.Unlock()
close(b.done)
func (builder *Builder) Done() {
// NOTE(GODT-1158): Stop worker pool.
}
func (b *Builder) jobSuccess(messageID string, literal []byte) {
b.locker.Lock()
defer b.locker.Unlock()
b.jobs[messageID].postSuccess(literal)
delete(b.jobs, messageID)
type fetchReq struct {
fetcher Fetcher
messageID string
options JobOptions
}
func (b *Builder) jobFailure(messageID string, err error) {
b.locker.Lock()
defer b.locker.Unlock()
b.jobs[messageID].postFailure(err)
delete(b.jobs, messageID)
type attachReq struct {
fetcher Fetcher
message *pmapi.Message
}
type Job struct {
*pool.Job
done pool.DoneFunc
}
func (job *Job) GetResult() ([]byte, error) {
res, err := job.Job.GetResult()
if err != nil {
return nil, err
}
return res.([]byte), nil
}
func newAttacherWorkFunc() pool.WorkFunc {
return func(payload interface{}, prio int) (interface{}, error) {
req, ok := payload.(*attachReq)
if !ok {
panic("bad payload type")
}
res := make(map[string][]byte)
for _, att := range req.message.Attachments {
rc, err := req.fetcher.GetAttachment(context.Background(), att.ID)
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
if err := rc.Close(); err != nil {
return nil, err
}
res[att.ID] = b
}
return res, nil
}
}
func newFetcherWorkFunc(attacherPool *pool.Pool) pool.WorkFunc {
return func(payload interface{}, prio int) (interface{}, error) {
req, ok := payload.(*fetchReq)
if !ok {
panic("bad payload type")
}
msg, err := req.fetcher.GetMessage(context.Background(), req.messageID)
if err != nil {
return nil, err
}
attJob, attDone := attacherPool.NewJob(&attachReq{
fetcher: req.fetcher,
message: msg,
}, prio)
defer attDone()
val, err := attJob.GetResult()
if err != nil {
return nil, err
}
attData, ok := val.(map[string][]byte)
if !ok {
panic("bad response type")
}
kr, err := req.fetcher.KeyRingForAddressID(msg.AddressID)
if err != nil {
return nil, ErrNoSuchKeyRing
}
return buildRFC822(kr, msg, attData, req.options)
}
}

View File

@ -1,89 +0,0 @@
// Copyright (c) 2021 Proton Technologies AG
//
// This file is part of ProtonMail Bridge.
//
// ProtonMail Bridge is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// ProtonMail Bridge is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
package message
import (
"sync"
"github.com/pkg/errors"
)
type buildRes struct {
messageID string
literal []byte
err error
}
func newBuildResSuccess(messageID string, literal []byte) buildRes {
return buildRes{
messageID: messageID,
literal: literal,
}
}
func newBuildResFailure(messageID string, err error) buildRes {
return buildRes{
messageID: messageID,
err: err,
}
}
// startBuildWorkers starts the given number of build workers.
// These workers decrypt and build messages into RFC822 literals.
// Two channels are returned:
// - buildReqCh: used to send work items to the worker pool
// - buildResCh: used to receive work results from the worker pool
func startBuildWorkers(buildWorkers int) (chan fetchRes, chan buildRes) {
buildReqCh := make(chan fetchRes)
buildResCh := make(chan buildRes)
go func() {
defer close(buildResCh)
var wg sync.WaitGroup
wg.Add(buildWorkers)
for workerID := 0; workerID < buildWorkers; workerID++ {
go buildWorker(buildReqCh, buildResCh, &wg)
}
wg.Wait()
}()
return buildReqCh, buildResCh
}
func buildWorker(buildReqCh <-chan fetchRes, buildResCh chan<- buildRes, wg *sync.WaitGroup) {
defer wg.Done()
for req := range buildReqCh {
l := log.
WithField("addrID", req.msg.AddressID).
WithField("msgID", req.msg.ID)
if kr, err := req.api.KeyRingForAddressID(req.msg.AddressID); err != nil {
l.WithError(err).Warn("Cannot find keyring for address")
buildResCh <- newBuildResFailure(req.msg.ID, errors.Wrap(ErrNoSuchKeyRing, err.Error()))
} else if literal, err := buildRFC822(kr, req.msg, req.atts, req.opts); err != nil {
l.WithError(err).Warn("Build failed")
buildResCh <- newBuildResFailure(req.msg.ID, err)
} else {
buildResCh <- newBuildResSuccess(req.msg.ID, literal)
}
}
}

View File

@ -1,141 +0,0 @@
// Copyright (c) 2021 Proton Technologies AG
//
// This file is part of ProtonMail Bridge.
//
// ProtonMail Bridge is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// ProtonMail Bridge is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
package message
import (
"context"
"io/ioutil"
"sync"
"github.com/ProtonMail/proton-bridge/pkg/parallel"
"github.com/ProtonMail/proton-bridge/pkg/pmapi"
)
type fetchReq struct {
ctx context.Context
api Fetcher
messageID string
opts JobOptions
}
type fetchRes struct {
fetchReq
msg *pmapi.Message
atts [][]byte
err error
}
func newFetchResSuccess(req fetchReq, msg *pmapi.Message, atts [][]byte) fetchRes {
return fetchRes{
fetchReq: req,
msg: msg,
atts: atts,
}
}
func newFetchResFailure(req fetchReq, err error) fetchRes {
return fetchRes{
fetchReq: req,
err: err,
}
}
// startFetchWorkers starts the given number of fetch workers.
// These workers download message and attachment data from API.
// Each fetch worker will use up to the given number of attachment workers to download attachments.
// Two channels are returned:
// - fetchReqCh: used to send work items to the worker pool
// - fetchResCh: used to receive work results from the worker pool
func startFetchWorkers(fetchWorkers, attachWorkers int) (chan fetchReq, chan fetchRes) {
fetchReqCh := make(chan fetchReq)
fetchResCh := make(chan fetchRes)
go func() {
defer close(fetchResCh)
var wg sync.WaitGroup
wg.Add(fetchWorkers)
for workerID := 0; workerID < fetchWorkers; workerID++ {
go fetchWorker(fetchReqCh, fetchResCh, attachWorkers, &wg)
}
wg.Wait()
}()
return fetchReqCh, fetchResCh
}
func fetchWorker(fetchReqCh <-chan fetchReq, fetchResCh chan<- fetchRes, attachWorkers int, wg *sync.WaitGroup) {
defer wg.Done()
for req := range fetchReqCh {
msg, atts, err := fetchMessage(req, attachWorkers)
if err != nil {
fetchResCh <- newFetchResFailure(req, err)
} else {
fetchResCh <- newFetchResSuccess(req, msg, atts)
}
}
}
func fetchMessage(req fetchReq, attachWorkers int) (*pmapi.Message, [][]byte, error) {
msg, err := req.api.GetMessage(req.ctx, req.messageID)
if err != nil {
return nil, nil, err
}
attList := make([]interface{}, len(msg.Attachments))
for i, att := range msg.Attachments {
attList[i] = att.ID
}
process := func(value interface{}) (interface{}, error) {
rc, err := req.api.GetAttachment(req.ctx, value.(string))
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(rc)
if err != nil {
return nil, err
}
if err := rc.Close(); err != nil {
return nil, err
}
return b, nil
}
attData := make([][]byte, len(msg.Attachments))
collect := func(idx int, value interface{}) error {
attData[idx] = value.([]byte) //nolint[forcetypeassert] we wan't to panic here
return nil
}
if err := parallel.RunParallel(attachWorkers, attList, process, collect); err != nil {
return nil, nil, err
}
return msg, attData, nil
}

View File

@ -25,35 +25,3 @@ type JobOptions struct {
AddMessageDate bool // Whether to include message time as X-Pm-Date.
AddMessageIDReference bool // Whether to include the MessageID in References.
}
type BuildJob struct {
messageID string
literal []byte
err error
done chan struct{}
}
func newBuildJob(messageID string) *BuildJob {
return &BuildJob{
messageID: messageID,
done: make(chan struct{}),
}
}
// GetResult returns the build result or any error which occurred during building.
// If the result is not ready yet, it blocks.
func (job *BuildJob) GetResult() ([]byte, error) {
<-job.done
return job.literal, job.err
}
func (job *BuildJob) postSuccess(literal []byte) {
job.literal = literal
close(job.done)
}
func (job *BuildJob) postFailure(err error) {
job.err = err
close(job.done)
}

View File

@ -34,7 +34,7 @@ import (
"github.com/pkg/errors"
)
func buildRFC822(kr *crypto.KeyRing, msg *pmapi.Message, attData [][]byte, opts JobOptions) ([]byte, error) {
func buildRFC822(kr *crypto.KeyRing, msg *pmapi.Message, attData map[string][]byte, opts JobOptions) ([]byte, error) {
switch {
case len(msg.Attachments) > 0:
return buildMultipartRFC822(kr, msg, attData, opts)
@ -80,7 +80,7 @@ func buildSimpleRFC822(kr *crypto.KeyRing, msg *pmapi.Message, opts JobOptions)
func buildMultipartRFC822(
kr *crypto.KeyRing,
msg *pmapi.Message,
attData [][]byte,
attData map[string][]byte,
opts JobOptions,
) ([]byte, error) {
boundary := newBoundary(msg.ID)
@ -103,13 +103,13 @@ func buildMultipartRFC822(
attachData [][]byte
)
for i, att := range msg.Attachments {
for _, att := range msg.Attachments {
if att.Disposition == pmapi.DispositionInline {
inlineAtts = append(inlineAtts, att)
inlineData = append(inlineData, attData[i])
inlineData = append(inlineData, attData[att.ID])
} else {
attachAtts = append(attachAtts, att)
attachData = append(attachData, attData[i])
attachData = append(attachData, attData[att.ID])
}
}

File diff suppressed because it is too large Load Diff

View File

@ -38,9 +38,10 @@ type BodyStructure map[string]*SectionInfo
// SectionInfo is used to hold data about parts of each section.
type SectionInfo struct {
Header textproto.MIMEHeader
Header []byte
Start, BSize, Size, Lines int
reader io.Reader
isHeaderReadFinished bool
}
// Read will also count the final size of section.
@ -48,9 +49,38 @@ func (si *SectionInfo) Read(p []byte) (n int, err error) {
n, err = si.reader.Read(p)
si.Size += n
si.Lines += bytes.Count(p, []byte("\n"))
si.readHeader(p)
return
}
// readHeader appends read data to Header until empty line is found.
func (si *SectionInfo) readHeader(p []byte) {
if si.isHeaderReadFinished {
return
}
si.Header = append(si.Header, p...)
if i := bytes.Index(si.Header, []byte("\n\r\n")); i > 0 {
si.Header = si.Header[:i+3]
si.isHeaderReadFinished = true
return
}
// textproto works also with simple line ending so we should be liberal
// as well.
if i := bytes.Index(si.Header, []byte("\n\n")); i > 0 {
si.Header = si.Header[:i+2]
si.isHeaderReadFinished = true
}
}
// GetMIMEHeader parses bytes and return MIME header.
func (si *SectionInfo) GetMIMEHeader() (textproto.MIMEHeader, error) {
return textproto.NewReader(bufio.NewReader(bytes.NewReader(si.Header))).ReadMIMEHeader()
}
func NewBodyStructure(reader io.Reader) (structure *BodyStructure, err error) {
structure = &BodyStructure{}
err = structure.Parse(reader)
@ -93,14 +123,15 @@ func (bs *BodyStructure) parseAllChildSections(r io.Reader, currentPath []int, s
bufInfo := bufio.NewReader(info)
tp := textproto.NewReader(bufInfo)
if info.Header, err = tp.ReadMIMEHeader(); err != nil {
tpHeader, err := tp.ReadMIMEHeader()
if err != nil {
return
}
bodyInfo := &SectionInfo{reader: tp.R}
bodyReader := bufio.NewReader(bodyInfo)
mediaType, params, _ := pmmime.ParseMediaType(info.Header.Get("Content-Type"))
mediaType, params, _ := pmmime.ParseMediaType(tpHeader.Get("Content-Type"))
// If multipart, call getAllParts, else read to count lines.
if (strings.HasPrefix(mediaType, "multipart/") || mediaType == rfc822Message) && params["boundary"] != "" {
@ -260,9 +291,9 @@ func (bs *BodyStructure) GetMailHeader() (header textproto.MIMEHeader, err error
}
// GetMailHeaderBytes returns the bytes with main mail header.
// Warning: It can contain extra lines or multipart comment.
func (bs *BodyStructure) GetMailHeaderBytes(wholeMail io.ReadSeeker) (header []byte, err error) {
return bs.GetSectionHeaderBytes(wholeMail, []int{})
// Warning: It can contain extra lines.
func (bs *BodyStructure) GetMailHeaderBytes() (header []byte, err error) {
return bs.GetSectionHeaderBytes([]int{})
}
func goToOffsetAndReadNBytes(wholeMail io.ReadSeeker, offset, length int) ([]byte, error) {
@ -283,22 +314,21 @@ func goToOffsetAndReadNBytes(wholeMail io.ReadSeeker, offset, length int) ([]byt
}
// GetSectionHeader returns the mime header of specified section.
func (bs *BodyStructure) GetSectionHeader(sectionPath []int) (header textproto.MIMEHeader, err error) {
func (bs *BodyStructure) GetSectionHeader(sectionPath []int) (textproto.MIMEHeader, error) {
info, err := bs.getInfoCheckSection(sectionPath)
if err != nil {
return
return nil, err
}
header = info.Header
return
return info.GetMIMEHeader()
}
func (bs *BodyStructure) GetSectionHeaderBytes(wholeMail io.ReadSeeker, sectionPath []int) (header []byte, err error) {
// GetSectionHeaderBytes returns raw header bytes of specified section.
func (bs *BodyStructure) GetSectionHeaderBytes(sectionPath []int) ([]byte, error) {
info, err := bs.getInfoCheckSection(sectionPath)
if err != nil {
return
return nil, err
}
headerLength := info.Size - info.BSize
return goToOffsetAndReadNBytes(wholeMail, info.Start, headerLength)
return info.Header, nil
}
// IMAPBodyStructure will prepare imap bodystructure recurently for given part.
@ -309,7 +339,12 @@ func (bs *BodyStructure) IMAPBodyStructure(currentPart []int) (imapBS *imap.Body
return
}
mediaType, params, _ := pmmime.ParseMediaType(info.Header.Get("Content-Type"))
tpHeader, err := info.GetMIMEHeader()
if err != nil {
return
}
mediaType, params, _ := pmmime.ParseMediaType(tpHeader.Get("Content-Type"))
mediaTypeSep := strings.Split(mediaType, "/")
@ -324,19 +359,19 @@ func (bs *BodyStructure) IMAPBodyStructure(currentPart []int) (imapBS *imap.Body
Lines: uint32(info.Lines),
}
if val := info.Header.Get("Content-ID"); val != "" {
if val := tpHeader.Get("Content-ID"); val != "" {
imapBS.Id = val
}
if val := info.Header.Get("Content-Transfer-Encoding"); val != "" {
if val := tpHeader.Get("Content-Transfer-Encoding"); val != "" {
imapBS.Encoding = val
}
if val := info.Header.Get("Content-Description"); val != "" {
if val := tpHeader.Get("Content-Description"); val != "" {
imapBS.Description = val
}
if val := info.Header.Get("Content-Disposition"); val != "" {
if val := tpHeader.Get("Content-Disposition"); val != "" {
imapBS.Disposition = val
}

View File

@ -21,7 +21,6 @@ import (
"bytes"
"fmt"
"io/ioutil"
"net/textproto"
"path/filepath"
"runtime"
"sort"
@ -71,7 +70,9 @@ func TestParseBodyStructure(t *testing.T) {
debug("%10s: %-50s %5s %5s %5s %5s", "section", "type", "start", "size", "bsize", "lines")
for _, path := range paths {
sec := (*bs)[path]
contentType := (*bs)[path].Header.Get("Content-Type")
header, err := sec.GetMIMEHeader()
require.NoError(t, err)
contentType := header.Get("Content-Type")
debug("%10s: %-50s %5d %5d %5d %5d", path, contentType, sec.Start, sec.Size, sec.BSize, sec.Lines)
require.Equal(t, expectedStructure[path], contentType)
}
@ -100,7 +101,9 @@ func TestParseBodyStructurePGP(t *testing.T) {
haveStructure := map[string]string{}
for path := range *bs {
haveStructure[path] = (*bs)[path].Header.Get("Content-Type")
header, err := (*bs)[path].GetMIMEHeader()
require.NoError(t, err)
haveStructure[path] = header.Get("Content-Type")
}
require.Equal(t, expectedStructure, haveStructure)
@ -192,7 +195,7 @@ Content-Type: plain/text
r.NoError(err, debug(wantPath, info, haveBody))
r.Equal(wantBody, string(haveBody), debug(wantPath, info, haveBody))
haveHeader, err := bs.GetSectionHeaderBytes(strings.NewReader(wantMail), wantPath)
haveHeader, err := bs.GetSectionHeaderBytes(wantPath)
r.NoError(err, debug(wantPath, info, haveHeader))
r.Equal(wantHeader, string(haveHeader), debug(wantPath, info, haveHeader))
}
@ -211,7 +214,7 @@ Content-Type: multipart/mixed; boundary="0000MAIN"
bs, err := NewBodyStructure(structReader)
require.NoError(t, err)
haveHeader, err := bs.GetMailHeaderBytes(strings.NewReader(sampleMail))
haveHeader, err := bs.GetMailHeaderBytes()
require.NoError(t, err)
require.Equal(t, wantHeader, haveHeader)
}
@ -533,18 +536,14 @@ func TestBodyStructureSerialize(t *testing.T) {
r := require.New(t)
want := &BodyStructure{
"1": {
Header: textproto.MIMEHeader{
"Content": []string{"type"},
},
Start: 1,
Size: 2,
BSize: 3,
Lines: 4,
Header: []byte("Content: type"),
Start: 1,
Size: 2,
BSize: 3,
Lines: 4,
},
"1.1.1": {
Header: textproto.MIMEHeader{
"X-Pm-Key": []string{"id"},
},
Header: []byte("X-Pm-Key: id"),
Start: 11,
Size: 12,
BSize: 13,
@ -562,3 +561,32 @@ func TestBodyStructureSerialize(t *testing.T) {
(*want)["1.1.1"].reader = nil
r.Equal(want, have)
}
func TestSectionInfoReadHeader(t *testing.T) {
r := require.New(t)
testData := []struct {
wantHeader, mail string
}{
{
"key1: val1\nkey2: val2\n\n",
"key1: val1\nkey2: val2\n\nbody is here\n\nand it is not confused",
},
{
"key1:\n val1\n\n",
"key1:\n val1\n\nbody is here",
},
{
"key1: val1\r\nkey2: val2\r\n\r\n",
"key1: val1\r\nkey2: val2\r\n\r\nbody is here\r\n\r\nand it is not confused",
},
}
for _, td := range testData {
bs, err := NewBodyStructure(strings.NewReader(td.mail))
r.NoError(err, "case %q", td.mail)
haveHeader, err := bs.GetMailHeaderBytes()
r.NoError(err, "case %q", td.mail)
r.Equal(td.wantHeader, string(haveHeader), "case %q", td.mail)
}
}