mirror of
https://github.com/ProtonMail/proton-bridge.git
synced 2025-12-10 12:46:46 +00:00
feat(GODT-2685): update to bug report log attachment logic.
This commit is contained in:
@ -18,13 +18,8 @@
|
|||||||
package bridge
|
package bridge
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"archive/zip"
|
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/ProtonMail/go-proton-api"
|
"github.com/ProtonMail/go-proton-api"
|
||||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||||
@ -33,8 +28,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
MaxTotalAttachmentSize = 7 * (1 << 20)
|
DefaultMaxBugReportZipSize = 7 * 1024 * 1024
|
||||||
MaxCompressedFilesCount = 6
|
DefaultMaxSessionCountForBugReport = 10
|
||||||
)
|
)
|
||||||
|
|
||||||
func (bridge *Bridge) ReportBug(ctx context.Context, osType, osVersion, description, username, email, client string, attachLogs bool) error {
|
func (bridge *Bridge) ReportBug(ctx context.Context, osType, osVersion, description, username, email, client string, attachLogs bool) error {
|
||||||
@ -50,54 +45,25 @@ func (bridge *Bridge) ReportBug(ctx context.Context, osType, osVersion, descript
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var atts []proton.ReportBugAttachment
|
var attachment []proton.ReportBugAttachment
|
||||||
|
|
||||||
if attachLogs {
|
if attachLogs {
|
||||||
logs, err := getMatchingLogs(bridge.locator, func(filename string) bool {
|
logsPath, err := bridge.locator.ProvideLogsPath()
|
||||||
return logging.MatchBridgeLogName(filename) && !logging.MatchStackTraceName(filename)
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
crashes, err := getMatchingLogs(bridge.locator, func(filename string) bool {
|
buffer, err := logging.ZipLogsForBugReport(logsPath, DefaultMaxSessionCountForBugReport, DefaultMaxBugReportZipSize)
|
||||||
return logging.MatchBridgeLogName(filename) && logging.MatchStackTraceName(filename)
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
guiLogs, err := getMatchingLogs(bridge.locator, func(filename string) bool {
|
body, err := io.ReadAll(buffer)
|
||||||
return logging.MatchGUILogName(filename) && !logging.MatchStackTraceName(filename)
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
var matchFiles []string
|
attachment = append(attachment, proton.ReportBugAttachment{
|
||||||
|
|
||||||
// Include bridge logs, up to a maximum amount.
|
|
||||||
matchFiles = append(matchFiles, logs[max(0, len(logs)-(MaxCompressedFilesCount/2)):]...)
|
|
||||||
|
|
||||||
// Include crash logs, up to a maximum amount.
|
|
||||||
matchFiles = append(matchFiles, crashes[max(0, len(crashes)-(MaxCompressedFilesCount/2)):]...)
|
|
||||||
|
|
||||||
// bridge-gui keeps just one small (~ 1kb) log file; we always include it.
|
|
||||||
if len(guiLogs) > 0 {
|
|
||||||
matchFiles = append(matchFiles, guiLogs[len(guiLogs)-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
archive, err := zipFiles(matchFiles)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
body, err := io.ReadAll(archive)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
atts = append(atts, proton.ReportBugAttachment{
|
|
||||||
Name: "logs.zip",
|
Name: "logs.zip",
|
||||||
Filename: "logs.zip",
|
Filename: "logs.zip",
|
||||||
MIMEType: "application/zip",
|
MIMEType: "application/zip",
|
||||||
@ -118,116 +84,5 @@ func (bridge *Bridge) ReportBug(ctx context.Context, osType, osVersion, descript
|
|||||||
|
|
||||||
Username: account,
|
Username: account,
|
||||||
Email: email,
|
Email: email,
|
||||||
}, atts...)
|
}, attachment...)
|
||||||
}
|
|
||||||
|
|
||||||
func max(a, b int) int {
|
|
||||||
if a > b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func getMatchingLogs(locator Locator, filenameMatchFunc func(string) bool) (filenames []string, err error) {
|
|
||||||
logsPath, err := locator.ProvideLogsPath()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
files, err := os.ReadDir(logsPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var matchFiles []string
|
|
||||||
|
|
||||||
for _, file := range files {
|
|
||||||
if filenameMatchFunc(file.Name()) {
|
|
||||||
matchFiles = append(matchFiles, filepath.Join(logsPath, file.Name()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Strings(matchFiles) // Sorted by timestamp: oldest first.
|
|
||||||
|
|
||||||
return matchFiles, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type limitedBuffer struct {
|
|
||||||
capacity int
|
|
||||||
buf *bytes.Buffer
|
|
||||||
}
|
|
||||||
|
|
||||||
func newLimitedBuffer(capacity int) *limitedBuffer {
|
|
||||||
return &limitedBuffer{
|
|
||||||
capacity: capacity,
|
|
||||||
buf: bytes.NewBuffer(make([]byte, 0, capacity)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *limitedBuffer) Write(p []byte) (n int, err error) {
|
|
||||||
if len(p)+b.buf.Len() > b.capacity {
|
|
||||||
return 0, ErrSizeTooLarge
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.buf.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *limitedBuffer) Read(p []byte) (n int, err error) {
|
|
||||||
return b.buf.Read(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func zipFiles(filenames []string) (io.Reader, error) {
|
|
||||||
if len(filenames) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := newLimitedBuffer(MaxTotalAttachmentSize)
|
|
||||||
|
|
||||||
w := zip.NewWriter(buf)
|
|
||||||
defer w.Close() //nolint:errcheck
|
|
||||||
|
|
||||||
for _, file := range filenames {
|
|
||||||
if err := addFileToZip(file, w); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addFileToZip(filename string, writer *zip.Writer) error {
|
|
||||||
fileReader, err := os.Open(filepath.Clean(filename))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer fileReader.Close() //nolint:errcheck,gosec
|
|
||||||
|
|
||||||
fileInfo, err := fileReader.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
header, err := zip.FileInfoHeader(fileInfo)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
header.Method = zip.Deflate
|
|
||||||
header.Name = filepath.Base(filename)
|
|
||||||
|
|
||||||
fileWriter, err := writer.CreateHeader(header)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := io.Copy(fileWriter, fileReader); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fileReader.Close()
|
|
||||||
}
|
}
|
||||||
|
|||||||
170
internal/logging/compression.go
Normal file
170
internal/logging/compression.go
Normal file
@ -0,0 +1,170 @@
|
|||||||
|
// Copyright (c) 2023 Proton AG
|
||||||
|
//
|
||||||
|
// This file is part of Proton Mail Bridge.
|
||||||
|
//
|
||||||
|
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package logging
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errNoInputFile = errors.New("no file was provided to put in the archive")
|
||||||
|
errCannotFitAnyFile = errors.New("no file can fit in the archive")
|
||||||
|
)
|
||||||
|
|
||||||
|
// zipFilesWithMaxSize compress the maximum number of files from the given list that can fit a ZIP archive file whose size does not exceed
|
||||||
|
// maxSize. Input files are taken in order and the function returns as soon as the next file cannot fit, even if another file further in the list
|
||||||
|
// may fit. The function return the number of files that were included in the archive. The files included are filePath[:fileCount].
|
||||||
|
func zipFilesWithMaxSize(filePaths []string, maxSize int64) (buffer *bytes.Buffer, fileCount int, err error) {
|
||||||
|
if len(filePaths) == 0 {
|
||||||
|
return nil, 0, errNoInputFile
|
||||||
|
}
|
||||||
|
buffer, err = createZipFromFile(filePaths[0])
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if int64(buffer.Len()) > maxSize {
|
||||||
|
return nil, 0, errCannotFitAnyFile
|
||||||
|
}
|
||||||
|
|
||||||
|
fileCount = 1
|
||||||
|
var previousBuffer *bytes.Buffer
|
||||||
|
|
||||||
|
for _, filePath := range filePaths[1:] {
|
||||||
|
previousBuffer = cloneBuffer(buffer)
|
||||||
|
|
||||||
|
zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes())))
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer, err = addFileToArchive(zipReader, filePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if int64(buffer.Len()) > maxSize {
|
||||||
|
return previousBuffer, fileCount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
fileCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer, fileCount, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cloneBuffer clones a buffer.
|
||||||
|
func cloneBuffer(buffer *bytes.Buffer) *bytes.Buffer {
|
||||||
|
return bytes.NewBuffer(bytes.Clone(buffer.Bytes()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// createZip creates a zip archive containing a single file.
|
||||||
|
func createZipFromFile(filePath string) (*bytes.Buffer, error) {
|
||||||
|
file, err := os.Open(filePath) //nolint:gosec
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() { _ = file.Close() }()
|
||||||
|
|
||||||
|
return createZip(file, filepath.Base(filePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
// createZip creates a zip file containing a file names filename with content read from reader.
|
||||||
|
func createZip(reader io.Reader, filename string) (*bytes.Buffer, error) {
|
||||||
|
b := bytes.NewBuffer(make([]byte, 0))
|
||||||
|
zipWriter := zip.NewWriter(b)
|
||||||
|
|
||||||
|
f, err := zipWriter.Create(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(f, reader); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = zipWriter.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// addToArchive adds a file to an archive. Because go zip package does not support adding a file to existing (closed) archive file, the way to do it
|
||||||
|
// is to create a new archive copy the raw content of the archive to the new one and add the new file before closing the archive.
|
||||||
|
func addFileToArchive(zipReader *zip.Reader, filePath string) (*bytes.Buffer, error) {
|
||||||
|
file, err := os.Open(filePath) //nolint:gosec
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer func() { _ = file.Close() }()
|
||||||
|
|
||||||
|
return addToArchive(zipReader, file, filepath.Base(filePath))
|
||||||
|
}
|
||||||
|
|
||||||
|
// addToArchive adds data from a reader to a file in an archive.
|
||||||
|
func addToArchive(zipReader *zip.Reader, reader io.Reader, filename string) (*bytes.Buffer, error) {
|
||||||
|
buffer := bytes.NewBuffer([]byte{})
|
||||||
|
zipWriter := zip.NewWriter(buffer)
|
||||||
|
|
||||||
|
if err := copyZipContent(zipReader, zipWriter); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := zipWriter.Create(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(f, reader); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := zipWriter.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyZipContent copies the content of a zip to another without recompression.
|
||||||
|
func copyZipContent(zipReader *zip.Reader, zipWriter *zip.Writer) error {
|
||||||
|
for _, zipItem := range zipReader.File {
|
||||||
|
itemReader, err := zipItem.OpenRaw()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
header := zipItem.FileHeader
|
||||||
|
targetItem, err := zipWriter.CreateRaw(&header)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := io.Copy(targetItem, itemReader); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
134
internal/logging/compression_test.go
Normal file
134
internal/logging/compression_test.go
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
// Copyright (c) 2023 Proton AG
|
||||||
|
//
|
||||||
|
// This file is part of Proton Mail Bridge.
|
||||||
|
//
|
||||||
|
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package logging
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/zip"
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/bradenaw/juniper/xslices"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestLogging_LogCompression(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
files := []fileInfo{
|
||||||
|
{filepath.Join(dir, "1.log"), 100000},
|
||||||
|
{filepath.Join(dir, "2.log"), 200000},
|
||||||
|
{filepath.Join(dir, "3.log"), 300000},
|
||||||
|
}
|
||||||
|
|
||||||
|
// Files will have a content and size (relative to the zip format overhead) that ensure a compression ratio of roughly 2:1.
|
||||||
|
createRandomFiles(t, files)
|
||||||
|
paths := xslices.Map(files, func(fileInfo fileInfo) string { return fileInfo.filename })
|
||||||
|
|
||||||
|
// Case 1: no input file.
|
||||||
|
_, _, err := zipFilesWithMaxSize([]string{}, 10)
|
||||||
|
require.ErrorIs(t, err, errNoInputFile)
|
||||||
|
|
||||||
|
// Case 2: limit to low, no file can be included.
|
||||||
|
_, _, err = zipFilesWithMaxSize(paths, 100)
|
||||||
|
require.ErrorIs(t, err, errCannotFitAnyFile)
|
||||||
|
|
||||||
|
// case 3: 1 file fits.
|
||||||
|
buffer, fileCount, err := zipFilesWithMaxSize(paths, 100000)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 1, fileCount)
|
||||||
|
checkZipFileContent(t, buffer, paths[0:1])
|
||||||
|
|
||||||
|
// case 4: 2 files fit.
|
||||||
|
buffer, fileCount, err = zipFilesWithMaxSize(paths, 200000)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 2, fileCount)
|
||||||
|
checkZipFileContent(t, buffer, paths[0:2])
|
||||||
|
|
||||||
|
// case 5: 3 files fit.
|
||||||
|
buffer, fileCount, err = zipFilesWithMaxSize(paths, 500000)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, 3, fileCount)
|
||||||
|
checkZipFileContent(t, buffer, paths)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createRandomFiles(t *testing.T, files []fileInfo) {
|
||||||
|
// The file is crafted to have a compression ratio of roughly 2:1 by filling the first half with random data, and the second with zeroes.
|
||||||
|
for _, file := range files {
|
||||||
|
randomData := make([]byte, file.size)
|
||||||
|
_, err := rand.Read(randomData[:file.size/2])
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.NoError(t, os.WriteFile(file.filename, randomData, 0660))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkZipFileContent(t *testing.T, buffer *bytes.Buffer, expectedFilePaths []string) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
count := unzipFile(t, buffer, dir)
|
||||||
|
require.Equal(t, len(expectedFilePaths), count)
|
||||||
|
for _, file := range expectedFilePaths {
|
||||||
|
checkFilesAreIdentical(t, file, filepath.Join(dir, filepath.Base(file)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func unzipFile(t *testing.T, buffer *bytes.Buffer, dir string) int {
|
||||||
|
reader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes())))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
for _, f := range reader.File {
|
||||||
|
info := f.FileInfo()
|
||||||
|
require.False(t, info.IsDir())
|
||||||
|
require.Equal(t, filepath.Base(info.Name()), info.Name()) // no sub-folder
|
||||||
|
extractFileFromZip(t, f, filepath.Join(dir, f.Name))
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(reader.File)
|
||||||
|
}
|
||||||
|
|
||||||
|
func extractFileFromZip(t *testing.T, zip *zip.File, path string) {
|
||||||
|
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, zip.Mode())
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() { _ = file.Close() }()
|
||||||
|
|
||||||
|
reader, err := zip.Open()
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() { _ = reader.Close() }()
|
||||||
|
|
||||||
|
_, err = io.Copy(file, reader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkFilesAreIdentical(t *testing.T, path1, path2 string) {
|
||||||
|
require.EqualValues(t, sha256Sum(t, path1), sha256Sum(t, path2))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sha256Sum(t *testing.T, path string) []byte {
|
||||||
|
f, err := os.Open(path)
|
||||||
|
require.NoError(t, err)
|
||||||
|
defer func() { _ = f.Close() }()
|
||||||
|
|
||||||
|
hash := sha256.New()
|
||||||
|
_, err = io.Copy(hash, f)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
return hash.Sum(nil)
|
||||||
|
}
|
||||||
@ -18,13 +18,18 @@
|
|||||||
package logging
|
package logging
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/bradenaw/juniper/xslices"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -33,9 +38,7 @@ const (
|
|||||||
// The Zendesk limit for an attachment is 50MB and this is what will
|
// The Zendesk limit for an attachment is 50MB and this is what will
|
||||||
// be allowed via the API. However, if that fails for some reason, the
|
// be allowed via the API. However, if that fails for some reason, the
|
||||||
// fallback is sending the report via email, which has a limit of 10mb
|
// fallback is sending the report via email, which has a limit of 10mb
|
||||||
// total or 7MB per file. Since we can produce up to 6 logs, and we
|
// total or 7MB per file.
|
||||||
// compress all the files (average compression - 80%), we need to have
|
|
||||||
// a limit of 30MB total before compression, hence 5MB per log file.
|
|
||||||
DefaultMaxLogFileSize = 5 * 1024 * 1024
|
DefaultMaxLogFileSize = 5 * 1024 * 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -104,6 +107,62 @@ func Init(logsPath string, sessionID SessionID, appName AppName, rotationSize, p
|
|||||||
return setLevel(level)
|
return setLevel(level)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ZipLogsForBugReport returns an archive containing the logs for bug report.
|
||||||
|
func ZipLogsForBugReport(logsPath string, maxSessionCount int, maxZipSize int64) (*bytes.Buffer, error) {
|
||||||
|
paths, err := getOrderedLogFileListForBugReport(logsPath, maxSessionCount)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer, _, err := zipFilesWithMaxSize(paths, maxZipSize)
|
||||||
|
return buffer, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// getOrderedLogFileListForBugReport returns the ordered list of log file paths to include in the user triggered bug reports. Only the last
|
||||||
|
// maxSessionCount sessions are included. Priorities:
|
||||||
|
// - session in chronologically descending order.
|
||||||
|
// - for each session: last 2 bridge logs, first bridge log, gui logs, launcher logs, all other bridge logs.
|
||||||
|
func getOrderedLogFileListForBugReport(logsPath string, maxSessionCount int) ([]string, error) {
|
||||||
|
sessionInfoList, err := buildSessionInfoList(logsPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
sortedSessions := maps.Values(sessionInfoList)
|
||||||
|
slices.SortFunc(sortedSessions, func(lhs, rhs *sessionInfo) bool { return lhs.sessionID > rhs.sessionID })
|
||||||
|
count := len(sortedSessions)
|
||||||
|
if count > maxSessionCount {
|
||||||
|
sortedSessions = sortedSessions[:maxSessionCount]
|
||||||
|
}
|
||||||
|
|
||||||
|
filePathFunc := func(logFileInfo logFileInfo) string { return filepath.Join(logsPath, logFileInfo.filename) }
|
||||||
|
|
||||||
|
var result []string
|
||||||
|
for _, session := range sortedSessions {
|
||||||
|
bridgeLogCount := len(session.bridgeLogs)
|
||||||
|
if bridgeLogCount > 0 {
|
||||||
|
result = append(result, filepath.Join(logsPath, session.bridgeLogs[bridgeLogCount-1].filename))
|
||||||
|
}
|
||||||
|
if bridgeLogCount > 1 {
|
||||||
|
result = append(result, filepath.Join(logsPath, session.bridgeLogs[bridgeLogCount-2].filename))
|
||||||
|
}
|
||||||
|
if bridgeLogCount > 2 {
|
||||||
|
result = append(result, filepath.Join(logsPath, session.bridgeLogs[0].filename))
|
||||||
|
}
|
||||||
|
if len(session.guiLogs) > 0 {
|
||||||
|
result = append(result, xslices.Map(session.guiLogs, filePathFunc)...)
|
||||||
|
}
|
||||||
|
if len(session.launcherLogs) > 0 {
|
||||||
|
result = append(result, xslices.Map(session.launcherLogs, filePathFunc)...)
|
||||||
|
}
|
||||||
|
if bridgeLogCount > 3 {
|
||||||
|
result = append(result, xslices.Map(session.bridgeLogs[1:bridgeLogCount-2], filePathFunc)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
// setLevel will change the level of logging and in case of Debug or Trace
|
// setLevel will change the level of logging and in case of Debug or Trace
|
||||||
// level it will also prevent from writing to file. Setting level to Info or
|
// level it will also prevent from writing to file. Setting level to Info or
|
||||||
// higher will not set writing to file again if it was previously cancelled by
|
// higher will not set writing to file again if it was previously cancelled by
|
||||||
|
|||||||
@ -22,6 +22,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -58,3 +59,37 @@ func TestLogging_MatchLogName(t *testing.T) {
|
|||||||
require.False(t, MatchGUILogName(launcherLog))
|
require.False(t, MatchGUILogName(launcherLog))
|
||||||
require.True(t, MatchLauncherLogName(launcherLog))
|
require.True(t, MatchLauncherLogName(launcherLog))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLogging_GetOrderedLogFileListForBugReport(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
filePaths, err := getOrderedLogFileListForBugReport(dir, 3)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.True(t, len(filePaths) == 0)
|
||||||
|
|
||||||
|
require.NoError(t, os.WriteFile(filepath.Join(dir, "invalid.log"), []byte("proton"), 0660))
|
||||||
|
|
||||||
|
_ = createDummySession(t, dir, 1000, 250, 500, 3000)
|
||||||
|
sessionID1 := createDummySession(t, dir, 1000, 250, 500, 500)
|
||||||
|
sessionID2 := createDummySession(t, dir, 1000, 250, 500, 500)
|
||||||
|
sessionID3 := createDummySession(t, dir, 1000, 250, 500, 4500)
|
||||||
|
|
||||||
|
filePaths, err = getOrderedLogFileListForBugReport(dir, 3)
|
||||||
|
fileSuffix := "_v" + constants.Version + "_" + constants.Tag + ".log"
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.EqualValues(t, []string{
|
||||||
|
filepath.Join(dir, string(sessionID3)+"_bri_004"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID3)+"_bri_003"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID3)+"_bri_000"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID3)+"_gui_000"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID3)+"_lau_000"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID3)+"_bri_001"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID3)+"_bri_002"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID2)+"_bri_000"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID2)+"_gui_000"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID2)+"_lau_000"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID1)+"_bri_000"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID1)+"_gui_000"+fileSuffix),
|
||||||
|
filepath.Join(dir, string(sessionID1)+"_lau_000"+fileSuffix),
|
||||||
|
}, filePaths)
|
||||||
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user