Other: Bump linter

This commit is contained in:
Jakub
2021-04-07 09:19:22 +02:00
parent 2f35c453a1
commit 7d0af7624c
22 changed files with 116 additions and 47 deletions

View File

@ -104,7 +104,7 @@ func writeMultipartReport(w *multipart.Writer, rep *ReportReq) error { // nolint
fmt.Sprintf(`form-data; name="%s"; filename="%s"`,
quoteEscaper.Replace(att.name), quoteEscaper.Replace(att.filename+".zip")))
h.Set("Content-Type", "application/octet-stream")
//h.Set("Content-Transfere-Encoding", "base64")
// h.Set("Content-Transfer-Encoding", "base64")
attWr, err := w.CreatePart(h)
if err != nil {
return err
@ -112,7 +112,7 @@ func writeMultipartReport(w *multipart.Writer, rep *ReportReq) error { // nolint
zipArch := zip.NewWriter(attWr)
zipWr, err := zipArch.Create(att.filename)
//b64 := base64.NewEncoder(base64.StdEncoding, zipWr)
// b64 := base64.NewEncoder(base64.StdEncoding, zipWr)
if err != nil {
return err
}
@ -121,7 +121,7 @@ func writeMultipartReport(w *multipart.Writer, rep *ReportReq) error { // nolint
return err
}
err = zipArch.Close()
//err = b64.Close()
// err = b64.Close()
if err != nil {
return err
}

View File

@ -293,7 +293,7 @@ func (c *client) doBuffered(req *http.Request, bodyBuffer []byte, retryUnauthori
retryAfter = headerAfter
}
// To avoid spikes when all clients retry at the same time, we add some random wait.
retryAfter += rand.Intn(10)
retryAfter += rand.Intn(10) //nolint[gosec] It is OK to use weak random number generator here
if hasBody {
r := bytes.NewReader(bodyBuffer)

View File

@ -60,7 +60,7 @@ type ContactEmail struct {
var errVerificationFailed = errors.New("signature verification failed")
//================= Public utility functions ======================
// ================= Public utility functions ======================
func (c *client) EncryptAndSignCards(cards []Card) ([]Card, error) {
var err error
@ -105,7 +105,7 @@ func (c *client) DecryptAndVerifyCards(cards []Card) ([]Card, error) {
return cards, nil
}
//====================== READ ===========================
// ====================== READ ===========================
type ContactsListRes struct {
Res
@ -235,7 +235,7 @@ func (c *client) GetContactEmailByEmail(email string, page int, pageSize int) (c
return
}
//============================ CREATE ====================================
// ============================ CREATE ====================================
type CardsList struct {
Cards []Card
@ -419,7 +419,7 @@ func (c *client) DeleteAllContacts() (err error) {
return
}
//===================== Private utility methods =======================
// ===================== Private utility methods =======================
func isSignedCardType(cardType int) bool {
return (cardType & CardSigned) == CardSigned

View File

@ -137,7 +137,7 @@ func newMessagePackage(
}
type sendData struct {
decryptedBodyKey *crypto.SessionKey //body session key
decryptedBodyKey *crypto.SessionKey // body session key
addressMap map[string]*MessageAddress
sharedScheme PackageFlag
ciphertext []byte

View File

@ -70,9 +70,9 @@ func HashPassword(authVersion int, password, userName string, salt, modulus []by
// CleanUserName returns the input string in lower-case without characters `_`,
// `.` and `-`.
func CleanUserName(userName string) string {
userName = strings.Replace(userName, "-", "", -1)
userName = strings.Replace(userName, ".", "", -1)
userName = strings.Replace(userName, "_", "", -1)
userName = strings.ReplaceAll(userName, "-", "")
userName = strings.ReplaceAll(userName, ".", "")
userName = strings.ReplaceAll(userName, "_", "")
return strings.ToLower(userName)
}

View File

@ -19,6 +19,7 @@ package tar
import (
"archive/tar"
"errors"
"io"
"os"
"path/filepath"
@ -27,6 +28,31 @@ import (
"github.com/sirupsen/logrus"
)
// maxFileSize limit tre single file size after decopression is not larger than 1GB
const maxFileSize = int64(1 * 1024 * 1024 * 1024) // 1 GB
// ErrFileTooLarge returned when decompressed file is too large
var ErrFileTooLarge = errors.New("trying to decompress file larger than 1GB")
type limitReader struct {
r io.Reader
n int64
}
// Read returns error if limit was exceeded. Inspired by io.LimitReader.Read
func (lr *limitReader) Read(p []byte) (n int, err error) {
if lr.n <= 0 {
return 0, ErrFileTooLarge
}
if int64(len(p)) > lr.n {
p = p[0:lr.n]
}
n, err = lr.r.Read(p)
lr.n -= int64(n)
return
}
// UntarToDir decopmress and unarchive the files into directory
func UntarToDir(r io.Reader, dir string) error {
tr := tar.NewReader(r)
@ -42,7 +68,7 @@ func UntarToDir(r io.Reader, dir string) error {
continue
}
target := filepath.Join(dir, header.Name)
target := filepath.Join(dir, filepath.Clean(header.Name)) // gosec G305
switch {
case header.Typeflag == tar.TypeSymlink:
@ -60,7 +86,8 @@ func UntarToDir(r io.Reader, dir string) error {
if err != nil {
return err
}
if _, err := io.Copy(f, tr); err != nil { // nolint[gosec]
lr := &limitReader{r: tr, n: maxFileSize} // gosec G110
if _, err := io.Copy(f, lr); err != nil {
return err
}
if runtime.GOOS != "windows" {