Fix linting errors

This commit is contained in:
Patrick Pacher
2024-03-27 16:17:58 +01:00
parent 653a365bce
commit 61176af14e
48 changed files with 167 additions and 153 deletions

View File

@@ -3,6 +3,7 @@ package core
import (
"context"
"encoding/hex"
"errors"
"fmt"
"net/http"
"net/url"
@@ -23,6 +24,8 @@ import (
"github.com/safing/portmaster/spn/captain"
)
var errInvalidReadPermission = errors.New("invalid read permission")
func registerAPIEndpoints() error {
if err := api.RegisterEndpoint(api.Endpoint{
Path: "core/shutdown",
@@ -207,10 +210,10 @@ func authorizeApp(ar *api.Request) (interface{}, error) {
// convert the requested read and write permissions to their api.Permission
// value. This ensures only "user" or "admin" permissions can be requested.
if getSavePermission(readPermStr) <= api.NotSupported {
return nil, fmt.Errorf("invalid read permission")
return nil, errInvalidReadPermission
}
if getSavePermission(writePermStr) <= api.NotSupported {
return nil, fmt.Errorf("invalid read permission")
return nil, errInvalidReadPermission
}
proc, err := process.GetProcessByRequestOrigin(ar)
@@ -281,7 +284,7 @@ func authorizeApp(ar *api.Request) (interface{}, error) {
select {
case key := <-ch:
if len(key) == 0 {
return nil, fmt.Errorf("access denied")
return nil, errors.New("access denied")
}
return map[string]interface{}{
@@ -289,6 +292,6 @@ func authorizeApp(ar *api.Request) (interface{}, error) {
"validUntil": validUntil,
}, nil
case <-ar.Context().Done():
return nil, fmt.Errorf("timeout")
return nil, errors.New("timeout")
}
}

View File

@@ -4,6 +4,7 @@ package nfq
import (
"encoding/binary"
"errors"
"fmt"
ct "github.com/florianl/go-conntrack"
@@ -35,7 +36,7 @@ func TeardownNFCT() {
// DeleteAllMarkedConnection deletes all marked entries from the conntrack table.
func DeleteAllMarkedConnection() error {
if nfct == nil {
return fmt.Errorf("nfq: nfct not initialized")
return errors.New("nfq: nfct not initialized")
}
// Delete all ipv4 marked connections
@@ -87,7 +88,7 @@ func deleteMarkedConnections(nfct *ct.Nfct, f ct.Family) (deleted int) {
// DeleteMarkedConnection removes a specific connection from the conntrack table.
func DeleteMarkedConnection(conn *network.Connection) error {
if nfct == nil {
return fmt.Errorf("nfq: nfct not initialized")
return errors.New("nfq: nfct not initialized")
}
con := ct.Con{

View File

@@ -612,18 +612,6 @@ func issueVerdict(conn *network.Connection, pkt packet.Packet, verdict network.V
}
}
// verdictRating rates the privacy and security aspect of verdicts from worst to best.
var verdictRating = []network.Verdict{
network.VerdictAccept, // Connection allowed in the open.
network.VerdictRerouteToTunnel, // Connection allowed, but protected.
network.VerdictRerouteToNameserver, // Connection allowed, but resolved via Portmaster.
network.VerdictBlock, // Connection blocked, with feedback.
network.VerdictDrop, // Connection blocked, without feedback.
network.VerdictFailed,
network.VerdictUndeterminable,
network.VerdictUndecided,
}
// func tunnelHandler(pkt packet.Packet) {
// tunnelInfo := GetTunnelInfo(pkt.Info().Dst)
// if tunnelInfo == nil {

View File

@@ -2,9 +2,9 @@ package intel
import (
"context"
"fmt"
"net"
"sort"
"strconv"
"strings"
"sync"
@@ -433,7 +433,7 @@ func (e *Entity) getASNLists(ctx context.Context) {
}
e.loadAsnListOnce.Do(func() {
asnStr := fmt.Sprintf("%d", asn)
asnStr := strconv.FormatUint(uint64(asn), 10)
list, err := filterlists.LookupASNString(asnStr)
if err != nil {
log.Tracer(ctx).Errorf("intel: failed to get ASN blocklist for %d: %s", asn, err)

View File

@@ -103,18 +103,19 @@ func parseHeader(r io.Reader) (compressed bool, format byte, err error) {
if _, err = r.Read(listHeader[:]); err != nil {
// if we have an error here we can safely abort because
// the file must be broken
return
return compressed, format, err
}
if listHeader[0] != dsd.LIST {
err = fmt.Errorf("unexpected file type: %d (%c), expected dsd list", listHeader[0], listHeader[0])
return
return compressed, format, err
}
var compression [1]byte
if _, err = r.Read(compression[:]); err != nil {
// same here, a DSDL file must have at least 2 bytes header
return
return compressed, format, err
}
if compression[0] == dsd.GZIP {
@@ -122,15 +123,16 @@ func parseHeader(r io.Reader) (compressed bool, format byte, err error) {
var formatSlice [1]byte
if _, err = r.Read(formatSlice[:]); err != nil {
return
return compressed, format, err
}
format = formatSlice[0]
return
return compressed, format, err
}
format = compression[0]
return // nolint:nakedret
return compressed, format, err
}
// byteReader extends an io.Reader to implement the ByteReader interface.

View File

@@ -1,7 +1,7 @@
package geoip
import (
"fmt"
"errors"
"net"
"github.com/oschwald/maxminddb-golang"
@@ -16,7 +16,7 @@ func getReader(ip net.IP) *maxminddb.Reader {
func GetLocation(ip net.IP) (*Location, error) {
db := getReader(ip)
if db == nil {
return nil, fmt.Errorf("geoip database not available")
return nil, errors.New("geoip database not available")
}
record := &Location{}
if err := db.Lookup(ip, record); err != nil {

View File

@@ -191,10 +191,8 @@ func handleListenError(err error, ip net.IP, port uint16, primaryListener bool)
EventID: eventIDConflictingService + secondaryEventIDSuffix,
Type: notifications.Error,
Title: "Conflicting DNS Software",
Message: fmt.Sprintf(
"Restart Portmaster after you have deactivated or properly configured the conflicting software: %s",
Message: "Restart Portmaster after you have deactivated or properly configured the conflicting software: " +
cfDescription,
),
ShowOnSystem: true,
AvailableActions: []*notifications.Action{
{

View File

@@ -21,6 +21,8 @@ import (
var hostname string
const internalError = "internal error: "
func handleRequestAsWorker(w dns.ResponseWriter, query *dns.Msg) {
err := module.RunWorker("handle dns request", func(ctx context.Context) error {
return handleRequest(ctx, w, query)
@@ -130,7 +132,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
tracer.Tracef("nameserver: delaying failing lookup until end of fail duration for %s", remainingFailingDuration.Round(time.Millisecond))
time.Sleep(remainingFailingDuration)
return reply(nsutil.ServerFailure(
"internal error: "+failingErr.Error(),
internalError+failingErr.Error(),
"delayed failing query to mitigate request flooding",
))
}
@@ -138,7 +140,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
tracer.Tracef("nameserver: delaying failing lookup for %s", failingDelay.Round(time.Millisecond))
time.Sleep(failingDelay)
return reply(nsutil.ServerFailure(
"internal error: "+failingErr.Error(),
internalError+failingErr.Error(),
"delayed failing query to mitigate request flooding",
fmt.Sprintf("error is cached for another %s", remainingFailingDuration.Round(time.Millisecond)),
))
@@ -148,7 +150,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
local, err := netenv.IsMyIP(remoteAddr.IP)
if err != nil {
tracer.Warningf("nameserver: failed to check if request for %s is local: %s", q.ID(), err)
return reply(nsutil.ServerFailure("internal error: failed to check if request is local"))
return reply(nsutil.ServerFailure(internalError + " failed to check if request is local"))
}
// Create connection ID for dns request.
@@ -170,7 +172,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
conn, err = network.NewConnectionFromExternalDNSRequest(ctx, q.FQDN, nil, connID, remoteAddr.IP)
if err != nil {
tracer.Warningf("nameserver: failed to get host/profile for request for %s%s: %s", q.FQDN, q.QType, err)
return reply(nsutil.ServerFailure("internal error: failed to get profile"))
return reply(nsutil.ServerFailure(internalError + "failed to get profile"))
}
default:
@@ -210,7 +212,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
case network.VerdictUndecided, network.VerdictAccept:
// Check if we have a response.
if rrCache == nil {
conn.Failed("internal error: no reply", "")
conn.Failed(internalError+"no reply", "")
return
}
@@ -293,7 +295,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
tracer.Warningf("nameserver: failed to resolve %s: %s", q.ID(), err)
conn.Failed(fmt.Sprintf("query failed: %s", err), "")
addFailingQuery(q, err)
return reply(nsutil.ServerFailure("internal error: " + err.Error()))
return reply(nsutil.ServerFailure(internalError + err.Error()))
}
}
// Handle special cases.
@@ -301,7 +303,7 @@ func handleRequest(ctx context.Context, w dns.ResponseWriter, request *dns.Msg)
case rrCache == nil:
tracer.Warning("nameserver: received successful, but empty reply from resolver")
addFailingQuery(q, errors.New("emptry reply from resolver"))
return reply(nsutil.ServerFailure("internal error: empty reply"))
return reply(nsutil.ServerFailure(internalError + "empty reply"))
case rrCache.RCode == dns.RcodeNameError:
// Try alternatives domain names for unofficial domain spaces.
altRRCache := checkAlternativeCaches(ctx, q)

View File

@@ -42,7 +42,7 @@ func (ch *ActiveChartHandler) ServeHTTP(resp http.ResponseWriter, req *http.Requ
orm.WithResult(&result),
orm.WithSchema(*ch.Database.Schema),
); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError)
http.Error(resp, failedQuery+err.Error(), http.StatusInternalServerError)
return
}
@@ -77,7 +77,7 @@ func (ch *ActiveChartHandler) parseRequest(req *http.Request) (*QueryActiveConne
var requestPayload QueryActiveConnectionChartPayload
blob, err := io.ReadAll(body)
if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error())
return nil, fmt.Errorf("failed to read body: %w", err)
}
body = bytes.NewReader(blob)

View File

@@ -49,7 +49,7 @@ func (ch *BandwidthChartHandler) ServeHTTP(resp http.ResponseWriter, req *http.R
orm.WithResult(&result),
orm.WithSchema(*ch.Database.Schema),
); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError)
http.Error(resp, failedQuery+err.Error(), http.StatusInternalServerError)
return
}
@@ -84,7 +84,7 @@ func (ch *BandwidthChartHandler) parseRequest(req *http.Request) (*BandwidthChar
var requestPayload BandwidthChartRequest
blob, err := io.ReadAll(body)
if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error())
return nil, fmt.Errorf("failed to read body: %w", err)
}
body = bytes.NewReader(blob)

View File

@@ -23,18 +23,18 @@ type (
// insert or an update.
// The ID of Conn is unique and can be trusted to never collide with other
// connections of the save device.
Save(context.Context, Conn, bool) error
Save(ctx context.Context, conn Conn, history bool) error
// MarkAllHistoryConnectionsEnded marks all active connections in the history
// database as ended NOW.
MarkAllHistoryConnectionsEnded(context.Context) error
MarkAllHistoryConnectionsEnded(ctx context.Context) error
// RemoveAllHistoryData removes all connections from the history database.
RemoveAllHistoryData(context.Context) error
RemoveAllHistoryData(ctx context.Context) error
// RemoveHistoryForProfile removes all connections from the history database.
// for a given profile ID (source/id)
RemoveHistoryForProfile(context.Context, string) error
RemoveHistoryForProfile(ctx context.Context, profile string) error
// UpdateBandwidth updates bandwidth data for the connection and optionally also writes
// the bandwidth data to the history database.

View File

@@ -41,13 +41,13 @@ type (
// by *sqlite.Stmt.
Stmt interface {
ColumnCount() int
ColumnName(int) string
ColumnType(int) sqlite.ColumnType
ColumnText(int) string
ColumnBool(int) bool
ColumnFloat(int) float64
ColumnInt(int) int
ColumnReader(int) *bytes.Reader
ColumnName(col int) string
ColumnType(col int) sqlite.ColumnType
ColumnText(col int) string
ColumnBool(col int) bool
ColumnFloat(col int) float64
ColumnInt(col int) int
ColumnReader(col int) *bytes.Reader
}
// DecodeFunc is called for each non-basic type during decoding.
@@ -230,7 +230,7 @@ func DatetimeDecoder(loc *time.Location) DecodeFunc {
case sqlite.TypeFloat:
// stored as Julian day numbers
return nil, false, fmt.Errorf("REAL storage type not support for time.Time")
return nil, false, errors.New("REAL storage type not support for time.Time")
case sqlite.TypeNull:
return nil, true, nil
@@ -359,7 +359,7 @@ func decodeBasic() DecodeFunc {
case reflect.Slice:
if outval.Type().Elem().Kind() != reflect.Uint8 {
return nil, false, fmt.Errorf("slices other than []byte for BLOB are not supported")
return nil, false, errors.New("slices other than []byte for BLOB are not supported")
}
if colType != sqlite.TypeBlob {

View File

@@ -2,6 +2,7 @@ package orm
import (
"context"
"errors"
"fmt"
"reflect"
"time"
@@ -171,7 +172,7 @@ func DatetimeEncoder(loc *time.Location) EncodeFunc {
valInterface := val.Interface()
t, ok = valInterface.(time.Time)
if !ok {
return nil, false, fmt.Errorf("cannot convert reflect value to time.Time")
return nil, false, errors.New("cannot convert reflect value to time.Time")
}
case valType.Kind() == reflect.String && colDef.IsTime:

View File

@@ -6,6 +6,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"zombiezen.com/go/sqlite"
)
@@ -120,7 +121,7 @@ func TestEncodeAsMap(t *testing.T) { //nolint:tparallel
c := cases[idx]
t.Run(c.Desc, func(t *testing.T) {
res, err := ToParamMap(ctx, c.Input, "", DefaultEncodeConfig, nil)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, c.Expected, res)
})
}
@@ -253,7 +254,7 @@ func TestEncodeValue(t *testing.T) { //nolint:tparallel
c := cases[idx]
t.Run(c.Desc, func(t *testing.T) {
res, err := EncodeValue(ctx, &c.Column, c.Input, DefaultEncodeConfig)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, c.Output, res)
})
}

View File

@@ -274,7 +274,7 @@ func applyStructFieldTag(fieldType reflect.StructField, def *ColumnDef) error {
case sqlite.TypeText:
def.Default = defaultValue
case sqlite.TypeBlob:
return fmt.Errorf("default values for TypeBlob not yet supported")
return errors.New("default values for TypeBlob not yet supported")
default:
return fmt.Errorf("failed to apply default value for unknown sqlite column type %s", def.Type)
}

View File

@@ -4,6 +4,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSchemaBuilder(t *testing.T) {
@@ -37,7 +38,7 @@ func TestSchemaBuilder(t *testing.T) {
c := cases[idx]
res, err := GenerateTableSchema(c.Name, c.Model)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, c.ExpectedSQL, res.CreateStatement("main", false))
}
}

View File

@@ -19,6 +19,8 @@ import (
var charOnlyRegexp = regexp.MustCompile("[a-zA-Z]+")
const failedQuery = "Failed to execute query: "
type (
// QueryHandler implements http.Handler and allows to perform SQL
@@ -78,7 +80,7 @@ func (qh *QueryHandler) ServeHTTP(resp http.ResponseWriter, req *http.Request) {
orm.WithResult(&result),
orm.WithSchema(*qh.Database.Schema),
); err != nil {
http.Error(resp, "Failed to execute query: "+err.Error(), http.StatusInternalServerError)
http.Error(resp, failedQuery+err.Error(), http.StatusInternalServerError)
return
}
@@ -230,7 +232,7 @@ func parseQueryRequestPayload[T any](req *http.Request) (*T, error) { //nolint:d
blob, err := io.ReadAll(body)
if err != nil {
return nil, fmt.Errorf("failed to read body" + err.Error())
return nil, fmt.Errorf("failed to read body: %w", err)
}
body = bytes.NewReader(blob)

View File

@@ -102,7 +102,7 @@ func TestUnmarshalQuery(t *testing.T) { //nolint:tparallel
assert.Equal(t, c.Error.Error(), err.Error())
}
} else {
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, c.Expected, q)
}
})
@@ -241,7 +241,7 @@ func TestQueryBuilder(t *testing.T) { //nolint:tparallel
assert.Equal(t, c.E.Error(), err.Error(), "test case %d", cID)
}
} else {
assert.NoError(t, err, "test case %d", cID)
require.NoError(t, err, "test case %d", cID)
assert.Equal(t, c.P, params, "test case %d", cID)
assert.Equal(t, c.R, str, "test case %d", cID)
}

View File

@@ -136,11 +136,11 @@ func AddNetworkDebugData(di *debug.Info, profile, where string) {
// Collect matching connections.
var ( //nolint:prealloc // We don't know the size.
debugConns []*Connection
accepted int
total int
debugConns []*Connection
accepted int
total int
)
for maybeConn := range it.Next {
// Switch to correct type.
conn, ok := maybeConn.(*Connection)

View File

@@ -751,12 +751,14 @@ func (conn *Connection) SaveWhenFinished() {
func (conn *Connection) Save() {
conn.UpdateMeta()
// nolint:exhaustive
switch conn.Verdict {
case VerdictAccept, VerdictRerouteToNameserver:
conn.ConnectionEstablished = true
case VerdictRerouteToTunnel:
// this is already handled when the connection tunnel has been
// established.
default:
}
// Do not save/update until data is complete.

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net"
"strconv"
"github.com/google/gopacket"
)
@@ -207,9 +208,9 @@ func (pkt *Base) FmtRemoteIP() string {
func (pkt *Base) FmtRemotePort() string {
if pkt.info.SrcPort != 0 {
if pkt.info.Inbound {
return fmt.Sprintf("%d", pkt.info.SrcPort)
return strconv.FormatUint(uint64(pkt.info.SrcPort), 10)
}
return fmt.Sprintf("%d", pkt.info.DstPort)
return strconv.FormatUint(uint64(pkt.info.DstPort), 10)
}
return "-"
}
@@ -235,10 +236,10 @@ type Packet interface {
ExpectInfo() bool
// Info.
SetCtx(context.Context)
SetCtx(ctx context.Context)
Ctx() context.Context
Info() *Info
SetPacketInfo(Info)
SetPacketInfo(info Info)
IsInbound() bool
IsOutbound() bool
SetInbound()
@@ -253,8 +254,8 @@ type Packet interface {
Payload() []byte
// Matching.
MatchesAddress(bool, IPProtocol, *net.IPNet, uint16) bool
MatchesIP(bool, *net.IPNet) bool
MatchesAddress(remote bool, protocol IPProtocol, network *net.IPNet, port uint16) bool
MatchesIP(endpoint bool, network *net.IPNet) bool
// Formatting.
String() string

View File

@@ -44,7 +44,7 @@ type Address struct {
// Info is a generic interface to both ConnectionInfo and BindInfo.
type Info interface {
GetPID() int
SetPID(int)
SetPID(pid int)
GetUID() int
GetUIDandInode() (int, int)
}

View File

@@ -2,7 +2,6 @@ package process
import (
"errors"
"fmt"
"net/http"
"strconv"
@@ -70,7 +69,7 @@ func handleGetProcessesByProfile(ar *api.Request) (any, error) {
source := ar.URLVars["source"]
id := ar.URLVars["id"]
if id == "" || source == "" {
return nil, api.ErrorWithStatus(fmt.Errorf("missing profile source/id"), http.StatusBadRequest)
return nil, api.ErrorWithStatus(errors.New("missing profile source/id"), http.StatusBadRequest)
}
result := GetProcessesWithProfile(ar.Context(), profile.ProfileSource(source), id, true)

View File

@@ -72,7 +72,8 @@ func GetProcessesWithProfile(ctx context.Context, profileSource profile.ProfileS
slices.SortFunc[[]*Process, *Process](procs, func(a, b *Process) int {
return strings.Compare(a.processKey, b.processKey)
})
slices.CompactFunc[[]*Process, *Process](procs, func(a, b *Process) bool {
procs = slices.CompactFunc[[]*Process, *Process](procs, func(a, b *Process) bool {
return a.processKey == b.processKey
})

View File

@@ -40,6 +40,6 @@ func AddToDebugInfo(di *debug.Info) {
fmt.Sprintf("Status: %s", netenv.GetOnlineStatus()),
debug.UseCodeSection|debug.AddContentLineBreaks,
fmt.Sprintf("OnlineStatus: %s", netenv.GetOnlineStatus()),
fmt.Sprintf("CaptivePortal: %s", netenv.GetCaptivePortal().URL),
"CaptivePortal: "+netenv.GetCaptivePortal().URL,
)
}

View File

@@ -25,6 +25,8 @@ const (
ReleaseChannelSupport = "support"
)
const jsonSuffix = ".json"
// SetIndexes sets the update registry indexes and also configures the registry
// to use pre-releases based on the channel.
func SetIndexes(
@@ -51,12 +53,12 @@ func SetIndexes(
// Always add the stable index as a base.
registry.AddIndex(updater.Index{
Path: ReleaseChannelStable + ".json",
Path: ReleaseChannelStable + jsonSuffix,
AutoDownload: autoDownload,
})
// Add beta index if in beta or staging channel.
indexPath := ReleaseChannelBeta + ".json"
indexPath := ReleaseChannelBeta + jsonSuffix
if releaseChannel == ReleaseChannelBeta ||
releaseChannel == ReleaseChannelStaging ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
@@ -74,7 +76,7 @@ func SetIndexes(
}
// Add staging index if in staging channel.
indexPath = ReleaseChannelStaging + ".json"
indexPath = ReleaseChannelStaging + jsonSuffix
if releaseChannel == ReleaseChannelStaging ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{
@@ -91,7 +93,7 @@ func SetIndexes(
}
// Add support index if in support channel.
indexPath = ReleaseChannelSupport + ".json"
indexPath = ReleaseChannelSupport + jsonSuffix
if releaseChannel == ReleaseChannelSupport ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{

View File

@@ -226,7 +226,7 @@ func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
updateASAP = true
case !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates():
return fmt.Errorf("automatic updating is disabled")
return errors.New("automatic updating is disabled")
default:
if forceIndexCheck {
@@ -254,7 +254,7 @@ func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
func DisableUpdateSchedule() error {
switch module.Status() {
case modules.StatusStarting, modules.StatusOnline, modules.StatusStopping:
return fmt.Errorf("module already online")
return errors.New("module already online")
}
disableTaskSchedule = true