Implemented peer review comments

This commit is contained in:
Patrick Pacher
2020-04-14 11:14:04 +02:00
parent f96f8d8d6e
commit f630df0b1f
8 changed files with 90 additions and 35 deletions

View File

@@ -10,6 +10,7 @@ import (
"github.com/safing/portbase/log"
"github.com/safing/portmaster/intel/filterlist"
"github.com/safing/portmaster/intel/geoip"
"github.com/safing/portmaster/network/netutils"
"github.com/safing/portmaster/status"
)
@@ -303,15 +304,12 @@ func (e *Entity) getIPLists() {
if ip == nil {
return
}
// abort if it's not a global unicast (not that IPv6 link local unicasts are treated
// as global)
if !ip.IsGlobalUnicast() {
return
}
// ingore linc local unicasts as well (not done by IsGlobalUnicast above).
if ip.IsLinkLocalUnicast() {
// only load lists for IP addresses that are classified as global.
if netutils.ClassifyIP(ip) != netutils.Global {
return
}
log.Debugf("intel: loading IP list for %s", ip)
e.loadIPListOnce.Do(func() {
list, err := filterlist.LookupIP(ip)

View File

@@ -2,6 +2,7 @@ package filterlist
import (
"context"
"fmt"
"os"
"sort"
"strings"
@@ -90,12 +91,33 @@ func processListFile(ctx context.Context, filter *scopedBloom, file *updater.Fil
g, ctx := errgroup.WithContext(ctx)
g.Go(func() error {
// startSafe runs fn inside the error group but wrapped
// in recovered function.
startSafe := func(fn func() error) {
g.Go(func() (err error) {
defer func() {
if x := recover(); x != nil {
if e, ok := x.(error); ok {
err = e
} else {
err = fmt.Errorf("%v", x)
}
}
}()
err = fn()
return err
})
}
startSafe(func() (err error) {
defer close(values)
return decodeFile(ctx, f, values)
err = decodeFile(ctx, f, values)
return
})
g.Go(func() error {
startSafe(func() error {
defer close(records)
for entry := range values {
if err := processEntry(ctx, filter, entry, records); err != nil {
@@ -139,7 +161,7 @@ func processListFile(ctx context.Context, filter *scopedBloom, file *updater.Fil
return batchPut(nil)
}
startBatch = func() {
g.Go(processBatch)
startSafe(processBatch)
}
startBatch()

View File

@@ -191,6 +191,8 @@ func updateListIndex() error {
return nil
}
// ResolveListIDs resolves a slice of source or category IDs into
// a slice of distinct source IDs.
func ResolveListIDs(ids []string) ([]string, error) {
index, err := getListIndexFromCache()

View File

@@ -1,17 +1,25 @@
package filterlist
import "strings"
// LookupMap is a helper type for matching a list of endpoint sources
// against a map.
type LookupMap map[string]struct{}
// Match returns Denied if a source in `list` is part of lm.
// Match checks if a source in `list` is part of lm.
// Matches are joined to string and returned.
// If nothing is found, an empty string is returned.
func (lm LookupMap) Match(list []string) string {
matches := make([]string, 0, len(list))
for _, l := range list {
if _, ok := lm[l]; ok {
return l
matches = append(matches, l)
}
}
return ""
if len(matches) == 0 {
return ""
}
return strings.Join(matches, ", ")
}

View File

@@ -101,7 +101,7 @@ func performUpdate(ctx context.Context) error {
// been updated now. Once we are done, start a worker
// for that purpose.
if cleanupRequired {
defer module.StartWorker("filterlist:cleanup", removeObsoleteFilterEntries)
defer module.StartWorker("filterlist:cleanup", removeAllObsoleteFilterEntries)
}
// try to save the highest version of our files.
@@ -113,7 +113,20 @@ func performUpdate(ctx context.Context) error {
return nil
}
func removeObsoleteFilterEntries(_ context.Context) error {
func removeAllObsoleteFilterEntries(_ context.Context) error {
for {
done, err := removeObsoleteFilterEntries(1000)
if err != nil {
return err
}
if done {
return nil
}
}
}
func removeObsoleteFilterEntries(batchSize int) (bool, error) {
log.Infof("intel/filterlists: cleanup task started, removing obsolete filter list entries ...")
iter, err := cache.Query(
@@ -124,20 +137,33 @@ func removeObsoleteFilterEntries(_ context.Context) error {
),
)
if err != nil {
return err
return false, err
}
keys := make([]string, 0, batchSize)
var cnt int
for r := range iter.Next {
cnt++
r.Meta().Delete()
if err := cache.Put(r); err != nil {
log.Errorf("intel/filterlists: failed to remove stale cache entry %q: %s", r.Key(), err)
keys = append(keys, r.Key())
if cnt == batchSize {
break
}
}
iter.Cancel()
for _, key := range keys {
if err := cache.Delete(key); err != nil {
log.Errorf("intel/filterlists: failed to remove stale cache entry %q: %s", key, err)
}
}
log.Debugf("intel/filterlists: successfully removed %d obsolete entries", cnt)
return nil
// if we removed less entries that the batch size we
// are done and no more entries exist
return cnt < batchSize, nil
}
// getUpgradableFiles returns a slice of filterlist files