feat(GODT-2668): implemented new log retention policy.

This commit is contained in:
Xavier Michelon
2023-06-07 18:00:29 +02:00
parent 7a090ffcc9
commit c587dfc0dc
13 changed files with 617 additions and 205 deletions

View File

@ -81,7 +81,14 @@ func main() { //nolint:funlen
sessionID := logging.NewSessionID() sessionID := logging.NewSessionID()
crashHandler.AddRecoveryAction(logging.DumpStackTrace(logsPath, sessionID, launcherName)) crashHandler.AddRecoveryAction(logging.DumpStackTrace(logsPath, sessionID, launcherName))
if err := logging.Init(logsPath, sessionID, launcherName, os.Getenv("VERBOSITY")); err != nil { if err := logging.Init(
logsPath,
sessionID,
logging.LauncherShortAppName,
logging.DefaultMaxLogFileSize,
logging.NoPruning,
os.Getenv("VERBOSITY"),
); err != nil {
l.WithError(err).Fatal("Failed to setup logging") l.WithError(err).Fatal("Failed to setup logging")
} }

View File

@ -318,7 +318,14 @@ func withLogging(c *cli.Context, crashHandler *crash.Handler, locations *locatio
// Initialize logging. // Initialize logging.
sessionID := logging.NewSessionIDFromString(c.String(flagSessionID)) sessionID := logging.NewSessionIDFromString(c.String(flagSessionID))
if err := logging.Init(logsPath, sessionID, appShortName, c.String(flagLogLevel)); err != nil { if err := logging.Init(
logsPath,
sessionID,
logging.BridgeShortAppName,
logging.DefaultMaxLogFileSize,
logging.DefaultPruningSize,
c.String(flagLogLevel),
); err != nil {
return fmt.Errorf("could not initialize logging: %w", err) return fmt.Errorf("could not initialize logging: %w", err)
} }

View File

@ -40,7 +40,7 @@ Log &initLog(QString const &sessionID) {
// create new GUI log file // create new GUI log file
QString error; QString error;
if (!log.startWritingToFile(logsDir.absoluteFilePath(QString("%1_000_gui_v%2_%3.log").arg(sessionID, PROJECT_VER, PROJECT_TAG)), &error)) { if (!log.startWritingToFile(logsDir.absoluteFilePath(QString("%1_gui_000_v%2_%3.log").arg(sessionID, PROJECT_VER, PROJECT_TAG)), &error)) {
log.error(error); log.error(error);
} }

View File

@ -1,69 +0,0 @@
// Copyright (c) 2023 Proton AG
//
// This file is part of Proton Mail Bridge.
//
// Proton Mail Bridge is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Proton Mail Bridge is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
package logging
import (
"fmt"
"io/fs"
"os"
"path/filepath"
"github.com/bradenaw/juniper/xslices"
"github.com/sirupsen/logrus"
"golang.org/x/exp/slices"
)
func clearLogs(logDir string, maxLogs int, maxCrashes int) error {
files, err := os.ReadDir(logDir)
if err != nil {
return fmt.Errorf("failed to read log directory: %w", err)
}
names := xslices.Map(files, func(file fs.DirEntry) string {
return file.Name()
})
// Remove old logs.
removeOldLogs(logDir, xslices.Filter(names, func(name string) bool {
return MatchBridgeLogName(name) && !MatchStackTraceName(name)
}), maxLogs)
// Remove old stack traces.
removeOldLogs(logDir, xslices.Filter(names, func(name string) bool {
return MatchBridgeLogName(name) && MatchStackTraceName(name)
}), maxCrashes)
return nil
}
func removeOldLogs(dir string, names []string, max int) {
if count := len(names); count <= max {
return
}
// Sort by timestamp, oldest first.
slices.SortFunc(names, func(a, b string) bool {
return getLogTime(a).Before(getLogTime(b))
})
for _, path := range xslices.Map(names[:len(names)-max], func(name string) string { return filepath.Join(dir, name) }) {
if err := os.Remove(path); err != nil {
logrus.WithError(err).WithField("path", path).Warn("Failed to remove old log file")
}
}
}

View File

@ -29,7 +29,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func DumpStackTrace(logsPath string, sessionID SessionID, appName string) crash.RecoveryAction { func DumpStackTrace(logsPath string, sessionID SessionID, appName AppName) crash.RecoveryAction {
return func(r interface{}) error { return func(r interface{}) error {
file := filepath.Join(logsPath, getStackTraceName(sessionID, appName, constants.Version, constants.Tag)) file := filepath.Join(logsPath, getStackTraceName(sessionID, appName, constants.Version, constants.Tag))
@ -52,10 +52,10 @@ func DumpStackTrace(logsPath string, sessionID SessionID, appName string) crash.
} }
} }
func getStackTraceName(sessionID SessionID, appName, version, tag string) string { func getStackTraceName(sessionID SessionID, appName AppName, version, tag string) string {
return fmt.Sprintf("%v_000_%v_v%v_%v_crash.log", sessionID, appName, version, tag) return fmt.Sprintf("%v_%v_000_v%v_%v_crash.log", sessionID, appName, version, tag)
} }
func MatchStackTraceName(name string) bool { func MatchStackTraceName(name string) bool {
return regexp.MustCompile(`^\d{8}_\d{9}_000_.*_crash\.log$`).MatchString(name) return regexp.MustCompile(`^\d{8}_\d{9}_.*_000_.*_crash\.log$`).MatchString(name)
} }

View File

@ -24,7 +24,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestMatchStackTraceName(t *testing.T) { func TestLogging_MatchStackTraceName(t *testing.T) {
filename := getStackTraceName(NewSessionID(), constants.AppName, constants.Version, constants.Tag) filename := getStackTraceName(NewSessionID(), constants.AppName, constants.Version, constants.Tag)
require.True(t, len(filename) > 0) require.True(t, len(filename) > 0)
require.True(t, MatchStackTraceName(filename)) require.True(t, MatchStackTraceName(filename))

View File

@ -19,6 +19,7 @@ package logging
import ( import (
"context" "context"
"errors"
"os" "os"
"regexp" "regexp"
"time" "time"
@ -27,7 +28,7 @@ import (
) )
const ( const (
// MaxLogSize defines the maximum log size we should permit: 5 MB // DefaultMaxLogFileSize defines the maximum log size we should permit: 5 MB
// //
// The Zendesk limit for an attachment is 50MB and this is what will // The Zendesk limit for an attachment is 50MB and this is what will
// be allowed via the API. However, if that fails for some reason, the // be allowed via the API. However, if that fails for some reason, the
@ -35,10 +36,15 @@ const (
// total or 7MB per file. Since we can produce up to 6 logs, and we // total or 7MB per file. Since we can produce up to 6 logs, and we
// compress all the files (average compression - 80%), we need to have // compress all the files (average compression - 80%), we need to have
// a limit of 30MB total before compression, hence 5MB per log file. // a limit of 30MB total before compression, hence 5MB per log file.
MaxLogSize = 5 * 1024 * 1024 DefaultMaxLogFileSize = 5 * 1024 * 1024
)
// MaxLogs defines how many log files should be kept. type AppName string
MaxLogs = 10
const (
BridgeShortAppName AppName = "bri"
LauncherShortAppName AppName = "lau"
GUIShortAppName AppName = "gui"
) )
type coloredStdOutHook struct { type coloredStdOutHook struct {
@ -77,7 +83,9 @@ func (cs *coloredStdOutHook) Fire(entry *logrus.Entry) error {
return nil return nil
} }
func Init(logsPath string, sessionID SessionID, appName, level string) error { // Init Initialize logging. Log files are rotated when their size exceeds rotationSize. if pruningSize >= 0, pruning occurs using
// the default pruning algorithm.
func Init(logsPath string, sessionID SessionID, appName AppName, rotationSize, pruningSize int64, level string) error {
logrus.SetFormatter(&logrus.TextFormatter{ logrus.SetFormatter(&logrus.TextFormatter{
DisableColors: true, DisableColors: true,
FullTimestamp: true, FullTimestamp: true,
@ -86,7 +94,7 @@ func Init(logsPath string, sessionID SessionID, appName, level string) error {
logrus.AddHook(newColoredStdOutHook()) logrus.AddHook(newColoredStdOutHook())
rotator, err := NewDefaultRotator(logsPath, sessionID, appName, MaxLogSize) rotator, err := NewDefaultRotator(logsPath, sessionID, appName, rotationSize, pruningSize)
if err != nil { if err != nil {
return err return err
} }
@ -126,42 +134,51 @@ func setLevel(level string) error {
return nil return nil
} }
func getLogTime(filename string) time.Time { func getLogSessionID(filename string) (SessionID, error) {
re := regexp.MustCompile(`^(?P<sessionID>\d{8}_\d{9})_.*\.log$`) re := regexp.MustCompile(`^(?P<sessionID>\d{8}_\d{9})_.*\.log$`)
match := re.FindStringSubmatch(filename) match := re.FindStringSubmatch(filename)
errInvalidFileName := errors.New("log file name is invalid")
if len(match) == 0 { if len(match) == 0 {
logrus.WithField("filename", filename).Warn("Could not parse log filename") logrus.WithField("filename", filename).Warn("Could not parse log filename")
return time.Time{} return "", errInvalidFileName
} }
index := re.SubexpIndex("sessionID") index := re.SubexpIndex("sessionID")
if index < 0 { if index < 0 {
logrus.WithField("filename", filename).Warn("Could not parse log filename") logrus.WithField("filename", filename).Warn("Could not parse log filename")
return time.Time{} return "", errInvalidFileName
} }
return SessionID(match[index]).toTime() return SessionID(match[index]), nil
}
func getLogTime(filename string) time.Time {
sessionID, err := getLogSessionID(filename)
if err != nil {
return time.Time{}
}
return sessionID.toTime()
} }
// MatchBridgeLogName return true iff filename is a bridge log filename. // MatchBridgeLogName return true iff filename is a bridge log filename.
func MatchBridgeLogName(filename string) bool { func MatchBridgeLogName(filename string) bool {
return matchLogName(filename, "bridge") return matchLogName(filename, BridgeShortAppName)
} }
// MatchGUILogName return true iff filename is a bridge-gui log filename. // MatchGUILogName return true iff filename is a bridge-gui log filename.
func MatchGUILogName(filename string) bool { func MatchGUILogName(filename string) bool {
return matchLogName(filename, "gui") return matchLogName(filename, GUIShortAppName)
} }
// MatchLauncherLogName return true iff filename is a launcher log filename. // MatchLauncherLogName return true iff filename is a launcher log filename.
func MatchLauncherLogName(filename string) bool { func MatchLauncherLogName(filename string) bool {
return matchLogName(filename, "launcher") return matchLogName(filename, LauncherShortAppName)
} }
func matchLogName(logName, appName string) bool { func matchLogName(logName string, appName AppName) bool {
return regexp.MustCompile(`^\d{8}_\d{9}_\d{3}_` + appName + `.*\.log$`).MatchString(logName) return regexp.MustCompile(`^\d{8}_\d{9}_\Q` + string(appName) + `\E_\d{3}_.*\.log$`).MatchString(logName)
} }
type logKey string type logKey string

View File

@ -25,7 +25,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestGetLogTime(t *testing.T) { func TestLogging_GetLogTime(t *testing.T) {
sessionID := NewSessionID() sessionID := NewSessionID()
fp := defaultFileProvider(os.TempDir(), sessionID, "bridge-test") fp := defaultFileProvider(os.TempDir(), sessionID, "bridge-test")
wc, err := fp(0) wc, err := fp(0)
@ -40,11 +40,11 @@ func TestGetLogTime(t *testing.T) {
require.Equal(t, sessionIDTime, logTime) require.Equal(t, sessionIDTime, logTime)
} }
func TestMatchLogName(t *testing.T) { func TestLogging_MatchLogName(t *testing.T) {
bridgeLog := "20230602_094633102_000_bridge_v3.0.99+git_5b650b1be3.log" bridgeLog := "20230602_094633102_bri_000_v3.0.99+git_5b650b1be3.log"
crashLog := "20230602_094633102_000_bridge_v3.0.99+git_5b650b1be3_crash.log" crashLog := "20230602_094633102_bri_000_v3.0.99+git_5b650b1be3_crash.log"
guiLog := "20230602_094633102_000_gui_v3.0.99+git_5b650b1be3.log" guiLog := "20230602_094633102_gui_000_v3.0.99+git_5b650b1be3.log"
launcherLog := "20230602_094633102_000_launcher_v3.0.99+git_5b650b1be3.log" launcherLog := "20230602_094633102_lau_000_v3.0.99+git_5b650b1be3.log"
require.True(t, MatchBridgeLogName(bridgeLog)) require.True(t, MatchBridgeLogName(bridgeLog))
require.False(t, MatchGUILogName(bridgeLog)) require.False(t, MatchGUILogName(bridgeLog))
require.False(t, MatchLauncherLogName(bridgeLog)) require.False(t, MatchLauncherLogName(bridgeLog))
@ -58,62 +58,3 @@ func TestMatchLogName(t *testing.T) {
require.False(t, MatchGUILogName(launcherLog)) require.False(t, MatchGUILogName(launcherLog))
require.True(t, MatchLauncherLogName(launcherLog)) require.True(t, MatchLauncherLogName(launcherLog))
} }
// The test below is temporarily disabled, and will be restored when implementing new retention policy GODT-2668
// TestClearLogs tests that clearLogs removes only bridge old log files keeping last three of them.
// func TestClearLogs(t *testing.T) {
// dir := t.TempDir()
//
// // Create some old log files.
// require.NoError(t, os.WriteFile(filepath.Join(dir, "other.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.4.7_debe87f2f5_0000000001.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.4.8_debe87f2f5_0000000002.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.4.9_debe87f2f5_0000000003.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.0_debe87f2f5_0000000004.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.1_debe87f2f5_0000000005.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.2_debe87f2f5_0000000006.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.3_debe87f2f5_0000000007.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.4_debe87f2f5_0000000008.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.5_debe87f2f5_0000000009.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.6_debe87f2f5_0000000010.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.7_debe87f2f5_0000000011.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.8_debe87f2f5_0000000012.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.12_debe87f2f5_0000000013.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.9_debe87f2f5_0000000014.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.10_debe87f2f5_0000000015.log"), []byte("Hello"), 0o755))
// require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.11_debe87f2f5_0000000016.log"), []byte("Hello"), 0o755))
//
// // Clear the logs.
// require.NoError(t, clearLogs(dir, 3, 0))
//
// // We should only clear matching files, and keep the 3 most recent ones.
// checkFileNames(t, dir, []string{
// "other.log",
// "v2.5.9_debe87f2f5_0000000014.log",
// "v2.5.10_debe87f2f5_0000000015.log",
// "v2.5.11_debe87f2f5_0000000016.log",
// })
// }
//
// func checkFileNames(t *testing.T, dir string, expectedFileNames []string) {
// require.ElementsMatch(t, expectedFileNames, getFileNames(t, dir))
// }
//
// func getFileNames(t *testing.T, dir string) []string {
// files, err := os.ReadDir(dir)
// require.NoError(t, err)
//
// fileNames := []string{}
// for _, file := range files {
// fileNames = append(fileNames, file.Name())
// if file.IsDir() {
// subDir := filepath.Join(dir, file.Name())
// subFileNames := getFileNames(t, subDir)
// for _, subFileName := range subFileNames {
// fileNames = append(fileNames, file.Name()+"/"+subFileName)
// }
// }
// }
// return fileNames
// }

230
internal/logging/pruning.go Normal file
View File

@ -0,0 +1,230 @@
// Copyright (c) 2023 Proton AG
//
// This file is part of Proton Mail Bridge.
//
// Proton Mail Bridge is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Proton Mail Bridge is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
package logging
import (
"os"
"path/filepath"
"regexp"
"strings"
"github.com/bradenaw/juniper/xslices"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
const (
DefaultPruningSize = 1024 * 1024 * 200
NoPruning = -1
)
type Pruner func() (failureCount int, err error)
type logFileInfo struct {
filename string
size int64
}
type sessionInfo struct {
dir string
sessionID SessionID
launcherLogs []logFileInfo
guiLogs []logFileInfo
bridgeLogs []logFileInfo
}
func defaultPruner(logsDir string, currentSessionID SessionID, pruningSize int64) func() (failureCount int, err error) {
return func() (int, error) {
return pruneLogs(logsDir, currentSessionID, pruningSize)
}
}
func nullPruner() (failureCount int, err error) {
return 0, nil
}
// DefaultPruner gets rid of the older log files according to the following policy:
// - We will limit the total size of the log files to roughly pruningSize.
// The current session is included in this quota, in order not to grow indefinitely on setups where bridge can run uninterrupted for months.
// - If the current session's log files total size is above the pruning size, we delete all other sessions log. For the current we keep
// launcher and gui log (they're limited to a few kb at most by nature), and we have n bridge log files,
// We keep the first and list log file (startup information contains relevant information, notably the SentryID), and start deleting the other
// starting with the oldest until the total size drops below the pruning size.
// - Otherwise: If the total size of log files for all sessions exceeds pruningSize, sessions gets deleted starting with the oldest, until the size
// drops below the pruning size. Sessions are treated atomically. Current session is left untouched in that case.
func pruneLogs(logDir string, currentSessionID SessionID, pruningSize int64) (failureCount int, err error) {
sessionInfoList, err := buildSessionInfoList(logDir)
if err != nil {
return 0, err
}
// we want total size to include the current session.
totalSize := xslices.Reduce(maps.Values(sessionInfoList), int64(0), func(sum int64, info *sessionInfo) int64 { return sum + info.size() })
if totalSize <= pruningSize {
return 0, nil
}
currentSessionInfo, ok := sessionInfoList[currentSessionID]
if ok {
delete(sessionInfoList, currentSessionID)
if currentSessionInfo.size() > pruningSize {
// current session is already too big. We delete all other sessions and prune the current session.
for _, session := range sessionInfoList {
failureCount += session.deleteFiles()
}
failureCount += currentSessionInfo.pruneAsCurrentSession(pruningSize)
return failureCount, nil
}
}
// current session size if below max size, so we erase older session starting with the eldest until we go below maxFileSize
sortedSessions := maps.Values(sessionInfoList)
slices.SortFunc(sortedSessions, func(lhs, rhs *sessionInfo) bool { return lhs.sessionID < rhs.sessionID })
for _, sessionInfo := range sortedSessions {
totalSize -= sessionInfo.size()
failureCount += sessionInfo.deleteFiles()
if totalSize <= pruningSize {
return failureCount, nil
}
}
return failureCount, nil
}
func newSessionInfo(dir string, sessionID SessionID) (*sessionInfo, error) {
paths, err := filepath.Glob(filepath.Join(dir, string(sessionID)+"_*.log"))
if err != nil {
return nil, err
}
rx := regexp.MustCompile(`^\Q` + string(sessionID) + `\E_([^_]*)_\d+_.*\.log$`)
result := sessionInfo{sessionID: sessionID, dir: dir}
for _, path := range paths {
filename := filepath.Base(path)
match := rx.FindStringSubmatch(filename)
if len(match) != 2 {
continue
}
stats, err := os.Stat(path)
if err != nil {
continue
}
fileInfo := logFileInfo{
filename: filename,
size: stats.Size(),
}
switch AppName(match[1]) {
case LauncherShortAppName:
result.launcherLogs = append(result.launcherLogs, fileInfo)
case GUIShortAppName:
result.guiLogs = append(result.guiLogs, fileInfo)
case BridgeShortAppName:
result.bridgeLogs = append(result.bridgeLogs, fileInfo)
}
}
lessFunc := func(lhs, rhs logFileInfo) bool { return strings.Compare(lhs.filename, rhs.filename) < 0 }
slices.SortFunc(result.launcherLogs, lessFunc)
slices.SortFunc(result.guiLogs, lessFunc)
slices.SortFunc(result.bridgeLogs, lessFunc)
return &result, nil
}
func (s *sessionInfo) size() int64 {
summer := func(accum int64, logInfo logFileInfo) int64 { return accum + logInfo.size }
size := xslices.Reduce(s.launcherLogs, 0, summer)
size = xslices.Reduce(s.guiLogs, size, summer)
size = xslices.Reduce(s.bridgeLogs, size, summer)
return size
}
func (s *sessionInfo) deleteFiles() (failureCount int) {
var allLogs []logFileInfo
allLogs = append(allLogs, s.launcherLogs...)
allLogs = append(allLogs, s.guiLogs...)
allLogs = append(allLogs, s.bridgeLogs...)
for _, log := range allLogs {
if err := os.Remove(filepath.Join(s.dir, log.filename)); err != nil {
failureCount++
}
}
return failureCount
}
func (s *sessionInfo) pruneAsCurrentSession(pruningSize int64) (failureCount int) {
// when pruning the current session, we keep the launcher and GUI logs, the first and last bridge log file
// and we delete intermediate bridge logs until the size constraint is satisfied (or there nothing left to delete).
if len(s.bridgeLogs) < 3 {
return 0
}
size := s.size()
if size <= pruningSize {
return 0
}
for _, fileInfo := range s.bridgeLogs[1 : len(s.bridgeLogs)-1] {
if err := os.Remove(filepath.Join(s.dir, fileInfo.filename)); err != nil {
failureCount++
}
size -= fileInfo.size
if size <= pruningSize {
return failureCount
}
}
return failureCount
}
func buildSessionInfoList(dir string) (map[SessionID]*sessionInfo, error) {
result := make(map[SessionID]*sessionInfo)
entries, err := os.ReadDir(dir)
if err != nil {
return nil, err
}
for _, entry := range entries {
if entry.IsDir() {
continue
}
rx := regexp.MustCompile(`^(\d{8}_\d{9})_.*\.log$`)
match := rx.FindStringSubmatch(entry.Name())
if match == nil || len(match) < 2 {
continue
}
sessionID := SessionID(match[1])
if _, ok := result[sessionID]; !ok {
sessionInfo, err := newSessionInfo(dir, sessionID)
if err != nil {
return nil, err
}
result[sessionID] = sessionInfo
}
}
return result, nil
}

View File

@ -0,0 +1,236 @@
// Copyright (c) 2023 Proton AG
//
// This file is part of Proton Mail Bridge.
//
// Proton Mail Bridge is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Proton Mail Bridge is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
package logging
import (
"os"
"path/filepath"
"testing"
"time"
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
"github.com/stretchr/testify/require"
)
type fileInfo struct {
filename string
size int64
}
var logFileSuffix = "_v" + constants.Version + "_" + constants.Tag + ".log"
func TestLogging_Pruning(t *testing.T) {
dir := t.TempDir()
const maxLogSize = 100
sessionID1 := createDummySession(t, dir, maxLogSize, 50, 50, 250)
sessionID2 := createDummySession(t, dir, maxLogSize, 100, 100, 350)
sessionID3 := createDummySession(t, dir, maxLogSize, 150, 100, 350)
// Expected files per session
session1Files := []fileInfo{
{filename: string(sessionID1) + "_lau_000" + logFileSuffix, size: 50},
{filename: string(sessionID1) + "_gui_000" + logFileSuffix, size: 50},
{filename: string(sessionID1) + "_bri_000" + logFileSuffix, size: 100},
{filename: string(sessionID1) + "_bri_001" + logFileSuffix, size: 100},
{filename: string(sessionID1) + "_bri_002" + logFileSuffix, size: 50},
}
session2Files := []fileInfo{
{filename: string(sessionID2) + "_lau_000" + logFileSuffix, size: 100},
{filename: string(sessionID2) + "_gui_000" + logFileSuffix, size: 100},
{filename: string(sessionID2) + "_bri_000" + logFileSuffix, size: 100},
{filename: string(sessionID2) + "_bri_001" + logFileSuffix, size: 100},
{filename: string(sessionID2) + "_bri_002" + logFileSuffix, size: 100},
{filename: string(sessionID2) + "_bri_003" + logFileSuffix, size: 50},
}
session3Files := []fileInfo{
{filename: string(sessionID3) + "_lau_000" + logFileSuffix, size: 100},
{filename: string(sessionID3) + "_lau_001" + logFileSuffix, size: 50},
{filename: string(sessionID3) + "_gui_000" + logFileSuffix, size: 100},
{filename: string(sessionID3) + "_bri_000" + logFileSuffix, size: 100},
{filename: string(sessionID3) + "_bri_001" + logFileSuffix, size: 100},
{filename: string(sessionID3) + "_bri_002" + logFileSuffix, size: 100},
{filename: string(sessionID3) + "_bri_003" + logFileSuffix, size: 50},
}
allSessions := session1Files
allSessions = append(allSessions, append(session2Files, session3Files...)...)
checkFolderContent(t, dir, allSessions...)
failureCount, err := pruneLogs(dir, sessionID3, 2000) // nothing to prune
require.Equal(t, failureCount, 0)
require.NoError(t, err)
checkFolderContent(t, dir, allSessions...)
failureCount, err = pruneLogs(dir, sessionID3, 1200) // session 1 is pruned
require.Equal(t, failureCount, 0)
require.NoError(t, err)
checkFolderContent(t, dir, append(session2Files, session3Files...)...)
failureCount, err = pruneLogs(dir, sessionID3, 1000) // session 2 is pruned
require.Equal(t, failureCount, 0)
require.NoError(t, err)
checkFolderContent(t, dir, session3Files...)
}
func TestLogging_PruningBigCurrentSession(t *testing.T) {
dir := t.TempDir()
const maxLogFileSize = 1000
sessionID1 := createDummySession(t, dir, maxLogFileSize, 500, 500, 2500)
sessionID2 := createDummySession(t, dir, maxLogFileSize, 1000, 1000, 3500)
sessionID3 := createDummySession(t, dir, maxLogFileSize, 500, 500, 10500)
// Expected files per session
session1Files := []fileInfo{
{filename: string(sessionID1) + "_lau_000" + logFileSuffix, size: 500},
{filename: string(sessionID1) + "_gui_000" + logFileSuffix, size: 500},
{filename: string(sessionID1) + "_bri_000" + logFileSuffix, size: 1000},
{filename: string(sessionID1) + "_bri_001" + logFileSuffix, size: 1000},
{filename: string(sessionID1) + "_bri_002" + logFileSuffix, size: 500},
}
session2Files := []fileInfo{
{filename: string(sessionID2) + "_lau_000" + logFileSuffix, size: 1000},
{filename: string(sessionID2) + "_gui_000" + logFileSuffix, size: 1000},
{filename: string(sessionID2) + "_bri_000" + logFileSuffix, size: 1000},
{filename: string(sessionID2) + "_bri_001" + logFileSuffix, size: 1000},
{filename: string(sessionID2) + "_bri_002" + logFileSuffix, size: 1000},
{filename: string(sessionID2) + "_bri_003" + logFileSuffix, size: 500},
}
session3Files := []fileInfo{
{filename: string(sessionID3) + "_lau_000" + logFileSuffix, size: 500},
{filename: string(sessionID3) + "_gui_000" + logFileSuffix, size: 500},
{filename: string(sessionID3) + "_bri_000" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_001" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_002" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_003" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_004" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_005" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_006" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_007" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_008" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_009" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_010" + logFileSuffix, size: 500},
}
allSessions := session1Files
allSessions = append(allSessions, append(session2Files, session3Files...)...)
checkFolderContent(t, dir, allSessions...)
// current session is bigger than maxFileSize. We keep launcher and gui logs, the first and last bridge log
// and only the last bridge log that keep the total file size under the limit.
failureCount, err := pruneLogs(dir, sessionID3, 8000)
require.Equal(t, failureCount, 0)
require.NoError(t, err)
checkFolderContent(t, dir, []fileInfo{
{filename: string(sessionID3) + "_lau_000" + logFileSuffix, size: 500},
{filename: string(sessionID3) + "_gui_000" + logFileSuffix, size: 500},
{filename: string(sessionID3) + "_bri_000" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_005" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_006" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_007" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_008" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_009" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_010" + logFileSuffix, size: 500},
}...)
failureCount, err = pruneLogs(dir, sessionID3, 5000)
require.Equal(t, failureCount, 0)
require.NoError(t, err)
checkFolderContent(t, dir, []fileInfo{
{filename: string(sessionID3) + "_lau_000" + logFileSuffix, size: 500},
{filename: string(sessionID3) + "_gui_000" + logFileSuffix, size: 500},
{filename: string(sessionID3) + "_bri_000" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_008" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_009" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_010" + logFileSuffix, size: 500},
}...)
// whatever maxFileSize is, we will always keep the following files
minimalFiles := []fileInfo{
{filename: string(sessionID3) + "_lau_000" + logFileSuffix, size: 500},
{filename: string(sessionID3) + "_gui_000" + logFileSuffix, size: 500},
{filename: string(sessionID3) + "_bri_000" + logFileSuffix, size: 1000},
{filename: string(sessionID3) + "_bri_010" + logFileSuffix, size: 500},
}
failureCount, err = pruneLogs(dir, sessionID3, 2000)
require.Equal(t, failureCount, 0)
require.NoError(t, err)
checkFolderContent(t, dir, minimalFiles...)
failureCount, err = pruneLogs(dir, sessionID3, 0)
require.Equal(t, failureCount, 0)
require.NoError(t, err)
checkFolderContent(t, dir, minimalFiles...)
}
func createDummySession(t *testing.T, dir string, maxLogFileSize int64, launcherLogSize, guiLogSize, bridgeLogSize int64) SessionID {
time.Sleep(2 * time.Millisecond) // ensure our sessionID is unused.
sessionID := NewSessionID()
if launcherLogSize > 0 {
createDummyRotatedLogFile(t, dir, sessionID, LauncherShortAppName, launcherLogSize, maxLogFileSize)
}
if guiLogSize > 0 {
createDummyRotatedLogFile(t, dir, sessionID, GUIShortAppName, guiLogSize, maxLogFileSize)
}
if bridgeLogSize > 0 {
createDummyRotatedLogFile(t, dir, sessionID, BridgeShortAppName, bridgeLogSize, maxLogFileSize)
}
return sessionID
}
func createDummyRotatedLogFile(t *testing.T, dir string, sessionID SessionID, appName AppName, totalSize, maxLogFileSize int64) {
rotator, err := NewDefaultRotator(dir, sessionID, appName, maxLogFileSize, NoPruning)
require.NoError(t, err)
for i := int64(0); i < totalSize/maxLogFileSize; i++ {
count, err := rotator.Write(make([]byte, maxLogFileSize))
require.NoError(t, err)
require.Equal(t, int64(count), maxLogFileSize)
}
remainder := totalSize % maxLogFileSize
if remainder > 0 {
count, err := rotator.Write(make([]byte, remainder))
require.NoError(t, err)
require.Equal(t, int64(count), remainder)
}
require.NoError(t, rotator.wc.Close())
}
func checkFolderContent(t *testing.T, dir string, fileInfos ...fileInfo) {
for _, fi := range fileInfos {
checkFileExistsWithSize(t, dir, fi)
}
entries, err := os.ReadDir(dir)
require.NoError(t, err)
require.Equal(t, len(fileInfos), len(entries))
}
func checkFileExistsWithSize(t *testing.T, dir string, info fileInfo) {
stat, err := os.Stat(filepath.Join(dir, info.filename))
require.NoError(t, err)
require.Equal(t, stat.Size(), info.size)
}

View File

@ -27,31 +27,29 @@ import (
) )
type Rotator struct { type Rotator struct {
getFile FileProvider getFile FileProvider
wc io.WriteCloser prune Pruner
size int wc io.WriteCloser
maxSize int size int64
nextIndex int maxFileSize int64
nextIndex int
} }
type FileProvider func(index int) (io.WriteCloser, error) type FileProvider func(index int) (io.WriteCloser, error)
func defaultFileProvider(logsPath string, sessionID SessionID, appName string) FileProvider { func defaultFileProvider(logsPath string, sessionID SessionID, appName AppName) FileProvider {
return func(index int) (io.WriteCloser, error) { return func(index int) (io.WriteCloser, error) {
if err := clearLogs(logsPath, MaxLogs, MaxLogs); err != nil {
return nil, err
}
return os.Create(filepath.Join(logsPath, //nolint:gosec // G304 return os.Create(filepath.Join(logsPath, //nolint:gosec // G304
fmt.Sprintf("%v_%03d_%v_v%v_%v.log", sessionID, index, appName, constants.Version, constants.Tag), fmt.Sprintf("%v_%v_%03d_v%v_%v.log", sessionID, appName, index, constants.Version, constants.Tag),
)) ))
} }
} }
func NewRotator(maxSize int, getFile FileProvider) (*Rotator, error) { func NewRotator(maxFileSize int64, getFile FileProvider, prune Pruner) (*Rotator, error) {
r := &Rotator{ r := &Rotator{
getFile: getFile, getFile: getFile,
maxSize: maxSize, prune: prune,
maxFileSize: maxFileSize,
} }
if err := r.rotate(); err != nil { if err := r.rotate(); err != nil {
@ -61,12 +59,19 @@ func NewRotator(maxSize int, getFile FileProvider) (*Rotator, error) {
return r, nil return r, nil
} }
func NewDefaultRotator(logsPath string, sessionID SessionID, appName string, maxSize int) (*Rotator, error) { func NewDefaultRotator(logsPath string, sessionID SessionID, appName AppName, maxLogFileSize, pruningSize int64) (*Rotator, error) {
return NewRotator(maxSize, defaultFileProvider(logsPath, sessionID, appName)) var pruner Pruner
if pruningSize < 0 {
pruner = nullPruner
} else {
pruner = defaultPruner(logsPath, sessionID, pruningSize)
}
return NewRotator(maxLogFileSize, defaultFileProvider(logsPath, sessionID, appName), pruner)
} }
func (r *Rotator) Write(p []byte) (int, error) { func (r *Rotator) Write(p []byte) (int, error) {
if r.size+len(p) > r.maxSize { if r.size+int64(len(p)) > r.maxFileSize {
if err := r.rotate(); err != nil { if err := r.rotate(); err != nil {
return 0, err return 0, err
} }
@ -77,8 +82,7 @@ func (r *Rotator) Write(p []byte) (int, error) {
return n, err return n, err
} }
r.size += n r.size += int64(n)
return n, nil return n, nil
} }
@ -87,6 +91,10 @@ func (r *Rotator) rotate() error {
_ = r.wc.Close() _ = r.wc.Close()
} }
if _, err := r.prune(); err != nil {
return err
}
wc, err := r.getFile(r.nextIndex) wc, err := r.getFile(r.nextIndex)
if err != nil { if err != nil {
return err return err

View File

@ -37,7 +37,7 @@ func (c *WriteCloser) Close() error {
return nil return nil
} }
func TestRotator(t *testing.T) { func TestLogging_Rotator(t *testing.T) {
n := 0 n := 0
getFile := func(_ int) (io.WriteCloser, error) { getFile := func(_ int) (io.WriteCloser, error) {
@ -45,7 +45,7 @@ func TestRotator(t *testing.T) {
return &WriteCloser{}, nil return &WriteCloser{}, nil
} }
r, err := NewRotator(10, getFile) r, err := NewRotator(10, getFile, nullPruner)
require.NoError(t, err) require.NoError(t, err)
_, err = r.Write([]byte("12345")) _, err = r.Write([]byte("12345"))
@ -77,33 +77,16 @@ func TestRotator(t *testing.T) {
assert.Equal(t, 4, n) assert.Equal(t, 4, n)
} }
func countFilesMatching(pattern string) int { func TestLogging_DefaultRotator(t *testing.T) {
files, err := filepath.Glob(pattern)
if err != nil {
return -1
}
return len(files)
}
func cleanupLogs(t *testing.T, sessionID SessionID) {
paths, err := filepath.Glob(filepath.Join(os.TempDir(), string(sessionID)+"*.log"))
require.NoError(t, err)
for _, path := range paths {
require.NoError(t, os.Remove(path))
}
}
func TestDefaultRotator(t *testing.T) {
fiveBytes := []byte("00000") fiveBytes := []byte("00000")
tmpDir := os.TempDir() tmpDir := os.TempDir()
sessionID := NewSessionID() sessionID := NewSessionID()
basePath := filepath.Join(tmpDir, string(sessionID)) basePath := filepath.Join(tmpDir, string(sessionID))
r, err := NewDefaultRotator(tmpDir, sessionID, "bridge", 10) r, err := NewDefaultRotator(tmpDir, sessionID, "bri", 10, NoPruning)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 1, countFilesMatching(basePath+"_000_*.log")) require.Equal(t, 1, countFilesMatching(basePath+"_bri_000_*.log"))
require.Equal(t, 1, countFilesMatching(basePath+"*.log")) require.Equal(t, 1, countFilesMatching(basePath+"*.log"))
_, err = r.Write(fiveBytes) _, err = r.Write(fiveBytes)
@ -117,7 +100,7 @@ func TestDefaultRotator(t *testing.T) {
_, err = r.Write(fiveBytes) _, err = r.Write(fiveBytes)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, 2, countFilesMatching(basePath+"*.log")) require.Equal(t, 2, countFilesMatching(basePath+"*.log"))
require.Equal(t, 1, countFilesMatching(basePath+"_001_*.log")) require.Equal(t, 1, countFilesMatching(basePath+"_bri_001_*.log"))
for i := 0; i < 4; i++ { for i := 0; i < 4; i++ {
_, err = r.Write(fiveBytes) _, err = r.Write(fiveBytes)
@ -130,18 +113,53 @@ func TestDefaultRotator(t *testing.T) {
logFileCount := countFilesMatching(basePath + "*.log") logFileCount := countFilesMatching(basePath + "*.log")
require.Equal(t, 4, logFileCount) require.Equal(t, 4, logFileCount)
for i := 0; i < logFileCount; i++ { for i := 0; i < logFileCount; i++ {
require.Equal(t, 1, countFilesMatching(basePath+fmt.Sprintf("_%03d_*.log", i))) require.Equal(t, 1, countFilesMatching(basePath+fmt.Sprintf("_bri_%03d_*.log", i)))
} }
cleanupLogs(t, sessionID) cleanupLogs(t, sessionID)
} }
func BenchmarkRotate(b *testing.B) { func TestLogging_DefaultRotatorWithPruning(t *testing.T) {
benchRotate(b, MaxLogSize, getTestFile(b, b.TempDir(), MaxLogSize-1)) tenBytes := []byte("0000000000")
tmpDir := t.TempDir()
sessionID := NewSessionID()
basePath := filepath.Join(tmpDir, string(sessionID))
// fill the log dir while below the pruning quota
r, err := NewDefaultRotator(tmpDir, sessionID, "bri", 10, 40)
require.NoError(t, err)
for i := 0; i < 4; i++ {
_, err = r.Write(tenBytes)
require.NoError(t, err)
}
// from now on at every rotation, (i.e. every write in this case), we will prune, then create a new file.
// we should always have 4 files, remaining after prune, plus the newly rotated file with the last written bytes.
for i := 0; i < 10; i++ {
_, err := r.Write(tenBytes)
require.NoError(t, err)
require.Equal(t, 5, countFilesMatching(basePath+"_bri_*.log"))
}
require.NoError(t, r.wc.Close())
// Final check. 000, 010, 011, 012 are what's left after the last pruning, 013 never got to pass through pruning.
checkFolderContent(t, tmpDir, []fileInfo{
{filename: string(sessionID) + "_bri_000" + logFileSuffix, size: 10},
{filename: string(sessionID) + "_bri_010" + logFileSuffix, size: 10},
{filename: string(sessionID) + "_bri_011" + logFileSuffix, size: 10},
{filename: string(sessionID) + "_bri_012" + logFileSuffix, size: 10},
{filename: string(sessionID) + "_bri_013" + logFileSuffix, size: 10},
}...)
} }
func benchRotate(b *testing.B, logSize int, getFile func(index int) (io.WriteCloser, error)) { func BenchmarkRotate(b *testing.B) {
r, err := NewRotator(logSize, getFile) benchRotate(b, DefaultMaxLogFileSize, getTestFile(b, b.TempDir(), DefaultMaxLogFileSize-1))
}
func benchRotate(b *testing.B, logSize int64, getFile func(index int) (io.WriteCloser, error)) {
r, err := NewRotator(logSize, getFile, nullPruner)
require.NoError(b, err) require.NoError(b, err)
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
@ -174,3 +192,20 @@ func getTestFile(b *testing.B, dir string, length int) func(int) (io.WriteCloser
return f, nil return f, nil
} }
} }
func countFilesMatching(pattern string) int {
files, err := filepath.Glob(pattern)
if err != nil {
return -1
}
return len(files)
}
func cleanupLogs(t *testing.T, sessionID SessionID) {
paths, err := filepath.Glob(filepath.Join(os.TempDir(), string(sessionID)+"*.log"))
require.NoError(t, err)
for _, path := range paths {
require.NoError(t, os.Remove(path))
}
}

View File

@ -23,7 +23,7 @@ import (
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestSessionID(t *testing.T) { func TestLogging_SessionID(t *testing.T) {
now := time.Now() now := time.Now()
sessionID := NewSessionID() sessionID := NewSessionID()
sessionTime := sessionID.toTime() sessionTime := sessionID.toTime()