wip: migrate to mono-repo. SPN has already been moved to spn/

This commit is contained in:
Patrick Pacher
2024-03-15 11:55:13 +01:00
parent b30fd00ccf
commit 8579430db9
577 changed files with 35981 additions and 818 deletions

77
service/profile/active.go Normal file
View File

@@ -0,0 +1,77 @@
package profile
import (
"context"
"sync"
"time"
)
const (
activeProfileCleanerTickDuration = 5 * time.Minute
activeProfileCleanerThreshold = 1 * time.Hour
)
var (
activeProfiles = make(map[string]*Profile)
activeProfilesLock sync.RWMutex
)
// getActiveProfile returns a cached copy of an active profile and
// nil if it isn't found.
func getActiveProfile(scopedID string) *Profile {
activeProfilesLock.RLock()
defer activeProfilesLock.RUnlock()
return activeProfiles[scopedID]
}
// getAllActiveProfiles returns a slice of active profiles.
func getAllActiveProfiles() []*Profile {
activeProfilesLock.RLock()
defer activeProfilesLock.RUnlock()
result := make([]*Profile, 0, len(activeProfiles))
for _, p := range activeProfiles {
result = append(result, p)
}
return result
}
// addActiveProfile registers a active profile.
func addActiveProfile(profile *Profile) {
activeProfilesLock.Lock()
defer activeProfilesLock.Unlock()
// Mark any previous profile as outdated.
if previous, ok := activeProfiles[profile.ScopedID()]; ok {
previous.outdated.Set()
}
// Mark new profile active and add to active profiles.
profile.MarkStillActive()
activeProfiles[profile.ScopedID()] = profile
}
func cleanActiveProfiles(ctx context.Context) error {
for {
select {
case <-time.After(activeProfileCleanerTickDuration):
threshold := time.Now().Add(-activeProfileCleanerThreshold).Unix()
activeProfilesLock.Lock()
for id, profile := range activeProfiles {
// Remove profile if it hasn't been used for a while.
if profile.LastActive() < threshold {
profile.outdated.Set()
delete(activeProfiles, id)
}
}
activeProfilesLock.Unlock()
case <-ctx.Done():
return nil
}
}
}

164
service/profile/api.go Normal file
View File

@@ -0,0 +1,164 @@
package profile
import (
"errors"
"fmt"
"net/http"
"path/filepath"
"strings"
"github.com/safing/portbase/api"
"github.com/safing/portbase/formats/dsd"
"github.com/safing/portbase/utils"
"github.com/safing/portmaster/service/profile/binmeta"
)
func registerAPIEndpoints() error {
if err := api.RegisterEndpoint(api.Endpoint{
Name: "Merge profiles",
Description: "Merge multiple profiles into a new one.",
Path: "profile/merge",
Write: api.PermitUser,
BelongsTo: module,
StructFunc: handleMergeProfiles,
}); err != nil {
return err
}
if err := api.RegisterEndpoint(api.Endpoint{
Name: "Get Profile Icon",
Description: "Returns the requested profile icon.",
Path: "profile/icon/{id:[a-f0-9]*\\.[a-z]{3,4}}",
Read: api.PermitUser,
BelongsTo: module,
DataFunc: handleGetProfileIcon,
}); err != nil {
return err
}
if err := api.RegisterEndpoint(api.Endpoint{
Name: "Update Profile Icon",
Description: "Updates a profile icon.",
Path: "profile/icon",
Write: api.PermitUser,
BelongsTo: module,
StructFunc: handleUpdateProfileIcon,
}); err != nil {
return err
}
return nil
}
type mergeProfilesRequest struct {
Name string `json:"name"` // Name of the new merged profile.
To string `json:"to"` // Profile scoped ID.
From []string `json:"from"` // Profile scoped IDs.
}
type mergeprofilesResponse struct {
New string `json:"new"` // Profile scoped ID.
}
func handleMergeProfiles(ar *api.Request) (i interface{}, err error) {
request := &mergeProfilesRequest{}
_, err = dsd.MimeLoad(ar.InputData, ar.Header.Get("Content-Type"), request)
if err != nil {
return nil, fmt.Errorf("failed to parse request: %w", err)
}
// Get all profiles.
var (
primary *Profile
secondaries = make([]*Profile, 0, len(request.From))
)
if primary, err = getProfile(request.To); err != nil {
return nil, fmt.Errorf("failed to get profile %s: %w", request.To, err)
}
for _, from := range request.From {
sp, err := getProfile(from)
if err != nil {
return nil, fmt.Errorf("failed to get profile %s: %w", request.To, err)
}
secondaries = append(secondaries, sp)
}
newProfile, err := MergeProfiles(request.Name, primary, secondaries...)
if err != nil {
return nil, fmt.Errorf("failed to merge profiles: %w", err)
}
return &mergeprofilesResponse{
New: newProfile.ScopedID(),
}, nil
}
func handleGetProfileIcon(ar *api.Request) (data []byte, err error) {
name := ar.URLVars["id"]
ext := filepath.Ext(name)
// Get profile icon.
data, err = binmeta.GetProfileIcon(name)
if err != nil {
return nil, err
}
// Set content type for icon.
contentType, ok := utils.MimeTypeByExtension(ext)
if ok {
ar.ResponseHeader.Set("Content-Type", contentType)
}
return data, nil
}
type updateProfileIconResponse struct {
Filename string `json:"filename"`
}
//nolint:goconst
func handleUpdateProfileIcon(ar *api.Request) (any, error) {
// Check input.
if len(ar.InputData) == 0 {
return nil, api.ErrorWithStatus(errors.New("no content"), http.StatusBadRequest)
}
mimeType := ar.Header.Get("Content-Type")
if mimeType == "" {
return nil, api.ErrorWithStatus(errors.New("no content type"), http.StatusBadRequest)
}
// Derive image format from content type.
mimeType = strings.TrimSpace(mimeType)
mimeType = strings.ToLower(mimeType)
mimeType, _, _ = strings.Cut(mimeType, ";")
var ext string
switch mimeType {
case "image/gif":
ext = "gif"
case "image/jpeg":
ext = "jpg"
case "image/jpg":
ext = "jpg"
case "image/png":
ext = "png"
case "image/svg+xml":
ext = "svg"
case "image/tiff":
ext = "tiff"
case "image/webp":
ext = "webp"
default:
return "", api.ErrorWithStatus(errors.New("unsupported image format"), http.StatusBadRequest)
}
// Update profile icon.
filename, err := binmeta.UpdateProfileIcon(ar.InputData, ext)
if err != nil {
return nil, err
}
return &updateProfileIconResponse{
Filename: filename,
}, nil
}

View File

@@ -0,0 +1,32 @@
package binmeta
import (
"bytes"
"fmt"
"image"
_ "image/png" // Register png support for image package
"github.com/fogleman/gg"
_ "github.com/mat/besticon/ico" // Register ico support for image package
)
// ConvertICOtoPNG converts a an .ico to a .png image.
func ConvertICOtoPNG(ico []byte) (png []byte, err error) {
// Decode the ICO.
icon, _, err := image.Decode(bytes.NewReader(ico))
if err != nil {
return nil, fmt.Errorf("failed to decode ICO: %w", err)
}
// Convert to raw image.
img := gg.NewContextForImage(icon)
// Convert to PNG.
imgBuf := &bytes.Buffer{}
err = img.EncodePNG(imgBuf)
if err != nil {
return nil, fmt.Errorf("failed to encode PNG: %w", err)
}
return imgBuf.Bytes(), nil
}

View File

@@ -0,0 +1,10 @@
//go:build !linux && !windows
package binmeta
import "context"
// GetIconAndName returns zero values for unsupported platforms.
func GetIconAndName(ctx context.Context, binPath string, homeDir string) (icon *Icon, name string, err error) {
return nil, "", nil
}

View File

@@ -0,0 +1,111 @@
package binmeta
import (
"context"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
)
// GetIconAndName returns an icon and name of the given binary path.
// Providing the home directory of the user running the process of that binary can improve results.
// Even if an error is returned, the other return values are valid, if set.
func GetIconAndName(ctx context.Context, binPath string, homeDir string) (icon *Icon, name string, err error) {
// Derive name from binary.
name = GenerateBinaryNameFromPath(binPath)
// Search for icon.
iconPath, err := searchForIcon(binPath, homeDir)
if iconPath == "" {
if err != nil {
return nil, name, fmt.Errorf("failed to find icon for %s: %w", binPath, err)
}
return nil, name, nil
}
// Save icon to internal storage.
icon, err = LoadAndSaveIcon(ctx, iconPath)
if err != nil {
return nil, name, fmt.Errorf("failed to store icon for %s: %w", binPath, err)
}
return icon, name, nil
}
func searchForIcon(binPath string, homeDir string) (iconPath string, err error) {
binPath = strings.ToLower(binPath)
// Search for icon path.
for _, iconLoc := range iconLocations {
basePath := iconLoc.GetPath(binPath, homeDir)
if basePath == "" {
continue
}
switch iconLoc.Type {
case FlatDir:
iconPath, err = searchDirectory(basePath, binPath)
case XDGIcons:
iconPath, err = searchXDGIconStructure(basePath, binPath)
}
if iconPath != "" {
return
}
}
return
}
func searchXDGIconStructure(baseDirectory string, binPath string) (iconPath string, err error) {
for _, xdgIconDir := range xdgIconPaths {
directory := filepath.Join(baseDirectory, xdgIconDir)
iconPath, err = searchDirectory(directory, binPath)
if iconPath != "" {
return
}
}
return
}
func searchDirectory(directory string, binPath string) (iconPath string, err error) {
entries, err := os.ReadDir(directory)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return "", nil
}
return "", fmt.Errorf("failed to read directory %s: %w", directory, err)
}
// DEBUG:
// fmt.Println(directory)
var (
bestMatch string
bestMatchExcessChars int
)
for _, entry := range entries {
// Skip dirs.
if entry.IsDir() {
continue
}
iconName := strings.ToLower(entry.Name())
iconName = strings.TrimSuffix(iconName, filepath.Ext(iconName))
switch {
case len(iconName) < len(binPath):
// Continue to next.
case iconName == binPath:
// Exact match, return immediately.
return filepath.Join(directory, entry.Name()), nil
case strings.HasPrefix(iconName, binPath):
excessChars := len(iconName) - len(binPath)
if bestMatch == "" || excessChars < bestMatchExcessChars {
bestMatch = entry.Name()
bestMatchExcessChars = excessChars
}
}
}
return bestMatch, nil
}

View File

@@ -0,0 +1,32 @@
package binmeta
import (
"os"
"testing"
)
func TestFindIcon(t *testing.T) {
if testing.Short() {
t.Skip("test depends on linux desktop environment")
}
t.Parallel()
home := os.Getenv("HOME")
testFindIcon(t, "evolution", home)
testFindIcon(t, "nextcloud", home)
}
func testFindIcon(t *testing.T, binName string, homeDir string) {
t.Helper()
iconPath, err := searchForIcon(binName, homeDir)
if err != nil {
t.Error(err)
return
}
if iconPath == "" {
t.Errorf("no icon found for %s", binName)
return
}
t.Logf("icon for %s found: %s", binName, iconPath)
}

View File

@@ -0,0 +1,119 @@
package binmeta
import (
"bytes"
"context"
"errors"
"fmt"
"os"
"github.com/tc-hib/winres"
"github.com/tc-hib/winres/version"
)
// GetIconAndName returns an icon and name of the given binary path.
// Providing the home directory of the user running the process of that binary can improve results.
// Even if an error is returned, the other return values are valid, if set.
func GetIconAndName(ctx context.Context, binPath string, homeDir string) (icon *Icon, name string, err error) {
// Get name and png from exe.
png, name, err := getIconAndNamefromRSS(ctx, binPath)
// Fall back to name generation if name is not set.
if name == "" {
name = GenerateBinaryNameFromPath(binPath)
}
// Handle previous error.
if err != nil {
return nil, name, err
}
// Update profile icon and return icon object.
filename, err := UpdateProfileIcon(png, "png")
if err != nil {
return nil, name, fmt.Errorf("failed to store icon: %w", err)
}
return &Icon{
Type: IconTypeAPI,
Value: filename,
Source: IconSourceCore,
}, name, nil
}
func getIconAndNamefromRSS(ctx context.Context, binPath string) (png []byte, name string, err error) {
// Open .exe file.
exeFile, err := os.Open(binPath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, "", nil
}
return nil, "", fmt.Errorf("failed to open exe %s to get icon: %w", binPath, err)
}
defer exeFile.Close() //nolint:errcheck
// Load .exe resources.
rss, err := winres.LoadFromEXE(exeFile)
if err != nil {
return nil, "", fmt.Errorf("failed to get rss: %w", err)
}
// DEBUG: Print all available resources:
// rss.Walk(func(typeID, resID winres.Identifier, langID uint16, data []byte) bool {
// fmt.Printf("typeID=%d resID=%d langID=%d\n", typeID, resID, langID)
// return true
// })
// Get name from version record.
var (
versionInfo *version.Info
versionInfoErr error
)
rss.WalkType(winres.RT_VERSION, func(resID winres.Identifier, langID uint16, data []byte) bool {
versionInfo, versionInfoErr = version.FromBytes(data)
switch {
case versionInfoErr != nil:
return true
case versionInfo == nil:
return true
}
// Get metadata table and main language.
table := versionInfo.Table().GetMainTranslation()
if table == nil {
return true
}
name = table[version.ProductName]
return name == ""
})
name = cleanFileDescription(name)
// Get first icon.
var (
icon *winres.Icon
iconErr error
)
rss.WalkType(winres.RT_GROUP_ICON, func(resID winres.Identifier, langID uint16, _ []byte) bool {
icon, iconErr = rss.GetIconTranslation(resID, langID)
return iconErr != nil
})
if iconErr != nil {
return nil, name, fmt.Errorf("failed to get icon: %w", err)
}
if icon == nil {
return nil, name, errors.New("no icon in resources")
}
// Convert icon, if it exists.
icoBuf := &bytes.Buffer{}
err = icon.SaveICO(icoBuf)
if err != nil {
return nil, name, fmt.Errorf("failed to save ico: %w", err)
}
png, err = ConvertICOtoPNG(icoBuf.Bytes())
if err != nil {
return nil, name, fmt.Errorf("failed to convert ico to png: %w", err)
}
return png, name, nil
}

View File

@@ -0,0 +1,27 @@
package binmeta
import (
"context"
"os"
"testing"
)
func TestFindIcon(t *testing.T) {
if testing.Short() {
t.Skip("test meant for compiling and running on desktop")
}
t.Parallel()
binName := os.Args[len(os.Args)-1]
t.Logf("getting name and icon for %s", binName)
png, name, err := getIconAndNamefromRSS(context.Background(), binName)
if err != nil {
t.Fatal(err)
}
t.Logf("name: %s", name)
err = os.WriteFile("icon.png", png, 0o0600)
if err != nil {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,165 @@
package binmeta
import (
"errors"
"fmt"
"strings"
"sync"
"github.com/vincent-petithory/dataurl"
"golang.org/x/exp/slices"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/record"
)
// Icon describes an icon.
type Icon struct {
Type IconType
Value string
Source IconSource
}
// IconType describes the type of an Icon.
type IconType string
// Supported icon types.
const (
IconTypeFile IconType = "path"
IconTypeDatabase IconType = "database"
IconTypeAPI IconType = "api"
)
func (t IconType) sortOrder() int {
switch t {
case IconTypeAPI:
return 1
case IconTypeDatabase:
return 2
case IconTypeFile:
return 3
default:
return 9
}
}
// IconSource describes the source of an Icon.
type IconSource string
// Supported icon sources.
const (
IconSourceUser IconSource = "user"
IconSourceImport IconSource = "import"
IconSourceUI IconSource = "ui"
IconSourceCore IconSource = "core"
)
func (s IconSource) sortOrder() int {
switch s {
case IconSourceUser:
return 10
case IconSourceImport:
return 20
case IconSourceUI:
return 30
case IconSourceCore:
return 40
default:
return 90
}
}
func (icon Icon) sortOrder() int {
return icon.Source.sortOrder() + icon.Type.sortOrder()
}
// SortAndCompactIcons sorts and compacts a list of icons.
func SortAndCompactIcons(icons []Icon) []Icon {
// Sort.
slices.SortFunc[[]Icon, Icon](icons, func(a, b Icon) int {
aOrder := a.sortOrder()
bOrder := b.sortOrder()
switch {
case aOrder != bOrder:
return aOrder - bOrder
case a.Value != b.Value:
return strings.Compare(a.Value, b.Value)
default:
return 0
}
})
// De-duplicate.
icons = slices.CompactFunc[[]Icon, Icon](icons, func(a, b Icon) bool {
return a.Type == b.Type && a.Value == b.Value
})
return icons
}
// GetIconAsDataURL returns the icon data as a data URL.
func (icon Icon) GetIconAsDataURL() (bloburl string, err error) {
switch icon.Type {
case IconTypeFile:
return "", errors.New("getting icon from file is not supported")
case IconTypeDatabase:
if !strings.HasPrefix(icon.Value, "cache:icons/") {
return "", errors.New("invalid icon db key")
}
r, err := iconDB.Get(icon.Value)
if err != nil {
return "", err
}
dbIcon, err := EnsureIconInDatabase(r)
if err != nil {
return "", err
}
return dbIcon.IconData, nil
case IconTypeAPI:
data, err := GetProfileIcon(icon.Value)
if err != nil {
return "", err
}
return dataurl.EncodeBytes(data), nil
default:
return "", errors.New("unknown icon type")
}
}
var iconDB = database.NewInterface(&database.Options{
Local: true,
Internal: true,
})
// IconInDatabase represents an icon saved to the database.
type IconInDatabase struct {
sync.Mutex
record.Base
IconData string `json:"iconData,omitempty"` // DataURL
}
// EnsureIconInDatabase ensures that the given record is a *IconInDatabase, and returns it.
func EnsureIconInDatabase(r record.Record) (*IconInDatabase, error) {
// unwrap
if r.IsWrapped() {
// only allocate a new struct, if we need it
newIcon := &IconInDatabase{}
err := record.Unwrap(r, newIcon)
if err != nil {
return nil, err
}
return newIcon, nil
}
// or adjust type
newIcon, ok := r.(*IconInDatabase)
if !ok {
return nil, fmt.Errorf("record not of type *IconInDatabase, but %T", r)
}
return newIcon, nil
}

View File

@@ -0,0 +1,101 @@
package binmeta
import (
"context"
"crypto"
"encoding/hex"
"errors"
"fmt"
"net/http"
"os"
"path/filepath"
"strings"
"github.com/safing/portbase/api"
)
// ProfileIconStoragePath defines the location where profile icons are stored.
// Must be set before anything else from this package is called.
// Must not be changed once set.
var ProfileIconStoragePath = ""
// GetProfileIcon returns the profile icon with the given ID and extension.
func GetProfileIcon(name string) (data []byte, err error) {
// Check if enabled.
if ProfileIconStoragePath == "" {
return nil, errors.New("api icon storage not configured")
}
// Build storage path.
iconPath := filepath.Clean(
filepath.Join(ProfileIconStoragePath, name),
)
iconPath, err = filepath.Abs(iconPath)
if err != nil {
return nil, fmt.Errorf("failed to check icon path: %w", err)
}
// Do a quick check if we are still within the right directory.
// This check is not entirely correct, but is sufficient for this use case.
if filepath.Dir(iconPath) != ProfileIconStoragePath {
return nil, api.ErrorWithStatus(errors.New("invalid icon"), http.StatusBadRequest)
}
return os.ReadFile(iconPath)
}
// UpdateProfileIcon creates or updates the given icon.
func UpdateProfileIcon(data []byte, ext string) (filename string, err error) {
// Check icon size.
if len(data) > 1_000_000 {
return "", errors.New("icon too big")
}
// Calculate sha1 sum of icon.
h := crypto.SHA1.New()
if _, err := h.Write(data); err != nil {
return "", err
}
sum := hex.EncodeToString(h.Sum(nil))
// Check ext.
ext = strings.ToLower(ext)
switch ext {
case "gif":
case "jpeg":
ext = "jpg"
case "jpg":
case "png":
case "svg":
case "tiff":
case "webp":
default:
return "", errors.New("unsupported icon format")
}
// Save to disk.
filename = sum + "." + ext
return filename, os.WriteFile(filepath.Join(ProfileIconStoragePath, filename), data, 0o0644) //nolint:gosec
}
// LoadAndSaveIcon loads an icon from disk, updates it in the icon database
// and returns the icon object.
func LoadAndSaveIcon(ctx context.Context, iconPath string) (*Icon, error) {
// Load icon and save it.
data, err := os.ReadFile(iconPath)
if err != nil {
return nil, fmt.Errorf("failed to read icon %s: %w", iconPath, err)
}
filename, err := UpdateProfileIcon(data, filepath.Ext(iconPath))
if err != nil {
return nil, fmt.Errorf("failed to import icon %s: %w", iconPath, err)
}
return &Icon{
Type: IconTypeAPI,
Value: filename,
Source: IconSourceCore,
}, nil
}
// TODO: Clean up icons regularly.

View File

@@ -0,0 +1,68 @@
package binmeta
import (
"fmt"
)
// IconLocation describes an icon location.
type IconLocation struct {
Directory string
Type IconLocationType
PathArg PathArg
}
// IconLocationType describes an icon location type.
type IconLocationType uint8
// Icon Location Types.
const (
FlatDir IconLocationType = iota
XDGIcons
)
// PathArg describes an icon location path argument.
type PathArg uint8
// Path Args.
const (
NoPathArg PathArg = iota
Home
BinName
)
var (
iconLocations = []IconLocation{
{Directory: "/usr/share/pixmaps", Type: FlatDir},
{Directory: "/usr/share", Type: XDGIcons},
{Directory: "%s/.local/share", Type: XDGIcons, PathArg: Home},
{Directory: "%s/.local/share/flatpak/exports/share", Type: XDGIcons, PathArg: Home},
{Directory: "/usr/share/%s", Type: XDGIcons, PathArg: BinName},
}
xdgIconPaths = []string{
// UI currently uses 48x48, so 256x256 should suffice for the future, even at 2x. (12.2023)
"icons/hicolor/256x256/apps",
"icons/hicolor/192x192/apps",
"icons/hicolor/128x128/apps",
"icons/hicolor/96x96/apps",
"icons/hicolor/72x72/apps",
"icons/hicolor/64x64/apps",
"icons/hicolor/48x48/apps",
"icons/hicolor/512x512/apps",
}
)
// GetPath returns the path of an icon.
func (il IconLocation) GetPath(binName string, homeDir string) string {
switch il.PathArg {
case NoPathArg:
return il.Directory
case Home:
if homeDir != "" {
return fmt.Sprintf(il.Directory, homeDir)
}
case BinName:
return fmt.Sprintf(il.Directory, binName)
}
return ""
}

View File

@@ -0,0 +1,121 @@
package binmeta
import (
"path/filepath"
"regexp"
"strings"
)
var (
segmentsSplitter = regexp.MustCompile("[^A-Za-z0-9]*[A-Z]?[a-z0-9]*")
nameOnly = regexp.MustCompile("^[A-Za-z0-9]+$")
delimitersAtStart = regexp.MustCompile("^[^A-Za-z0-9]+")
delimitersOnly = regexp.MustCompile("^[^A-Za-z0-9]+$")
removeQuotes = strings.NewReplacer(`"`, ``, `'`, ``)
)
// GenerateBinaryNameFromPath generates a more human readable binary name from
// the given path. This function is used as fallback in the GetBinaryName
// functions.
func GenerateBinaryNameFromPath(path string) string {
// Get file name from path.
_, fileName := filepath.Split(path)
// Split up into segments.
segments := segmentsSplitter.FindAllString(fileName, -1)
// Remove last segment if it's an extension.
if len(segments) >= 2 {
switch strings.ToLower(segments[len(segments)-1]) {
case
".exe", // Windows Executable
".msi", // Windows Installer
".bat", // Windows Batch File
".cmd", // Windows Command Script
".ps1", // Windows Powershell Cmdlet
".run", // Linux Executable
".appimage", // Linux AppImage
".app", // MacOS Executable
".action", // MacOS Automator Action
".out": // Generic Compiled Executable
segments = segments[:len(segments)-1]
}
}
// Debugging snippet:
// fmt.Printf("segments: %s\n", segments)
// Go through segments and collect name parts.
nameParts := make([]string, 0, len(segments))
var fragments string
for _, segment := range segments {
// Group very short segments.
if len(delimitersAtStart.ReplaceAllString(segment, "")) <= 2 {
fragments += segment
continue
} else if fragments != "" {
nameParts = append(nameParts, fragments)
fragments = ""
}
// Add segment to name.
nameParts = append(nameParts, segment)
}
// Add last fragment.
if fragments != "" {
nameParts = append(nameParts, fragments)
}
// Debugging snippet:
// fmt.Printf("parts: %s\n", nameParts)
// Post-process name parts
for i := range nameParts {
// Remove any leading delimiters.
nameParts[i] = delimitersAtStart.ReplaceAllString(nameParts[i], "")
// Title-case name-only parts.
if nameOnly.MatchString(nameParts[i]) {
nameParts[i] = strings.Title(nameParts[i]) //nolint:staticcheck
}
}
// Debugging snippet:
// fmt.Printf("final: %s\n", nameParts)
return strings.Join(nameParts, " ")
}
func cleanFileDescription(fileDescr string) string {
fields := strings.Fields(fileDescr)
// Clean out and `"` and `'`.
for i := range fields {
fields[i] = removeQuotes.Replace(fields[i])
}
// If there is a 1 or 2 character delimiter field, only use fields before it.
endIndex := len(fields)
for i, field := range fields {
// Ignore the first field as well as fields with more than two characters.
if i >= 1 && len(field) <= 2 && !nameOnly.MatchString(field) {
endIndex = i
break
}
}
// Concatenate name
binName := strings.Join(fields[:endIndex], " ")
// If there are multiple sentences, only use the first.
if strings.Contains(binName, ". ") {
binName = strings.SplitN(binName, ". ", 2)[0]
}
// If does not have any characters or numbers, return an empty string.
if delimitersOnly.MatchString(binName) {
return ""
}
return strings.TrimSpace(binName)
}

View File

@@ -0,0 +1,48 @@
package binmeta
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestGenerateBinaryNameFromPath(t *testing.T) {
t.Parallel()
assert.Equal(t, "Nslookup", GenerateBinaryNameFromPath("nslookup.exe"))
assert.Equal(t, "System Settings", GenerateBinaryNameFromPath("SystemSettings.exe"))
assert.Equal(t, "One Drive Setup", GenerateBinaryNameFromPath("OneDriveSetup.exe"))
assert.Equal(t, "Msedge", GenerateBinaryNameFromPath("msedge.exe"))
assert.Equal(t, "SIH Client", GenerateBinaryNameFromPath("SIHClient.exe"))
assert.Equal(t, "Openvpn Gui", GenerateBinaryNameFromPath("openvpn-gui.exe"))
assert.Equal(t, "Portmaster Core v0-1-2", GenerateBinaryNameFromPath("portmaster-core_v0-1-2.exe"))
assert.Equal(t, "Win Store App", GenerateBinaryNameFromPath("WinStore.App.exe"))
assert.Equal(t, "Test Script", GenerateBinaryNameFromPath(".test-script"))
assert.Equal(t, "Browser Broker", GenerateBinaryNameFromPath("browser_broker.exe"))
assert.Equal(t, "Virtual Box VM", GenerateBinaryNameFromPath("VirtualBoxVM"))
assert.Equal(t, "Io Elementary Appcenter", GenerateBinaryNameFromPath("io.elementary.appcenter"))
assert.Equal(t, "Microsoft Windows Store", GenerateBinaryNameFromPath("Microsoft.WindowsStore"))
}
func TestCleanFileDescription(t *testing.T) {
t.Parallel()
assert.Equal(t, "Product Name", cleanFileDescription("Product Name"))
assert.Equal(t, "Product Name", cleanFileDescription("Product Name. Does this and that."))
assert.Equal(t, "Product Name", cleanFileDescription("Product Name - Does this and that."))
assert.Equal(t, "Product Name", cleanFileDescription("Product Name / Does this and that."))
assert.Equal(t, "Product Name", cleanFileDescription("Product Name :: Does this and that."))
assert.Equal(t, "/ Product Name", cleanFileDescription("/ Product Name"))
assert.Equal(t, "Product", cleanFileDescription("Product / Name"))
assert.Equal(t, "Software 2", cleanFileDescription("Software 2"))
assert.Equal(t, "Launcher for Software 2", cleanFileDescription("Launcher for 'Software 2'"))
assert.Equal(t, "", cleanFileDescription(". / Name"))
assert.Equal(t, "", cleanFileDescription(". "))
assert.Equal(t, "", cleanFileDescription("."))
assert.Equal(t, "N/A", cleanFileDescription("N/A"))
assert.Equal(t,
"Product Name a Does this and that.",
cleanFileDescription("Product Name a Does this and that."),
)
}

View File

@@ -0,0 +1,157 @@
package profile
import (
"context"
"fmt"
"sync"
"time"
"github.com/safing/portbase/modules"
"github.com/safing/portmaster/service/intel/filterlists"
"github.com/safing/portmaster/service/profile/endpoints"
)
var (
cfgLock sync.RWMutex
cfgDefaultAction uint8
cfgEndpoints endpoints.Endpoints
cfgServiceEndpoints endpoints.Endpoints
cfgSPNUsagePolicy endpoints.Endpoints
cfgSPNTransitHubPolicy endpoints.Endpoints
cfgSPNExitHubPolicy endpoints.Endpoints
cfgFilterLists []string
)
func registerConfigUpdater() error {
return module.RegisterEventHook(
"config",
"config change",
"update global config profile",
func(ctx context.Context, _ interface{}) error {
return updateGlobalConfigProfile(ctx, nil)
},
)
}
const globalConfigProfileErrorID = "profile:global-profile-error"
func updateGlobalConfigProfile(ctx context.Context, task *modules.Task) error {
cfgLock.Lock()
defer cfgLock.Unlock()
var err error
var lastErr error
action := cfgOptionDefaultAction()
switch action {
case DefaultActionPermitValue:
cfgDefaultAction = DefaultActionPermit
case DefaultActionAskValue:
cfgDefaultAction = DefaultActionAsk
case DefaultActionBlockValue:
cfgDefaultAction = DefaultActionBlock
default:
// TODO: module error?
lastErr = fmt.Errorf(`default action "%s" invalid`, action)
cfgDefaultAction = DefaultActionBlock // default to block in worst case
}
list := cfgOptionEndpoints()
cfgEndpoints, err = endpoints.ParseEndpoints(list)
if err != nil {
// TODO: module error?
lastErr = err
}
list = cfgOptionServiceEndpoints()
cfgServiceEndpoints, err = endpoints.ParseEndpoints(list)
if err != nil {
// TODO: module error?
lastErr = err
}
list = cfgOptionFilterLists()
cfgFilterLists, err = filterlists.ResolveListIDs(list)
if err != nil {
lastErr = err
}
list = cfgOptionSPNUsagePolicy()
cfgSPNUsagePolicy, err = endpoints.ParseEndpoints(list)
if err != nil {
// TODO: module error?
lastErr = err
}
list = cfgOptionTransitHubPolicy()
cfgSPNTransitHubPolicy, err = endpoints.ParseEndpoints(list)
if err != nil {
// TODO: module error?
lastErr = err
}
list = cfgOptionExitHubPolicy()
cfgSPNExitHubPolicy, err = endpoints.ParseEndpoints(list)
if err != nil {
// TODO: module error?
lastErr = err
}
// Build config.
newConfig := make(map[string]interface{})
// fill profile config options
for key, value := range cfgStringOptions {
newConfig[key] = value()
}
for key, value := range cfgStringArrayOptions {
newConfig[key] = value()
}
for key, value := range cfgIntOptions {
newConfig[key] = value()
}
for key, value := range cfgBoolOptions {
newConfig[key] = value()
}
// Build global profile for reference.
profile := New(&Profile{
ID: "global-config",
Source: SourceSpecial,
Name: "Global Configuration",
Config: newConfig,
Internal: true,
})
// save profile
err = profile.Save()
if err != nil && lastErr == nil {
// other errors are more important
lastErr = err
}
// If there was any error, try again later until it succeeds.
if lastErr == nil {
module.Resolve(globalConfigProfileErrorID)
} else {
// Create task after first failure.
if task == nil {
task = module.NewTask(
"retry updating global config profile",
updateGlobalConfigProfile,
)
}
// Schedule task.
task.Schedule(time.Now().Add(15 * time.Second))
// Add module warning to inform user.
module.Warning(
globalConfigProfileErrorID,
"Internal Settings Failure",
fmt.Sprintf("Some global settings might not be applied correctly. You can try restarting the Portmaster to resolve this problem. Error: %s", err),
)
}
return lastErr
}

823
service/profile/config.go Normal file
View File

@@ -0,0 +1,823 @@
package profile
import (
"strings"
"github.com/safing/portbase/config"
"github.com/safing/portmaster/service/profile/endpoints"
"github.com/safing/portmaster/service/status"
"github.com/safing/portmaster/spn/access/account"
)
// Configuration Keys.
var (
cfgStringOptions = make(map[string]config.StringOption)
cfgStringArrayOptions = make(map[string]config.StringArrayOption)
cfgIntOptions = make(map[string]config.IntOption)
cfgBoolOptions = make(map[string]config.BoolOption)
// General.
// Setting "Enable Filter" at order 0.
CfgOptionDefaultActionKey = "filter/defaultAction"
cfgOptionDefaultAction config.StringOption
cfgOptionDefaultActionOrder = 1
DefaultActionPermitValue = "permit"
DefaultActionBlockValue = "block"
DefaultActionAskValue = "ask"
// Setting "Prompt Desktop Notifications" at order 2.
// Setting "Prompt Timeout" at order 3.
// Network Scopes.
CfgOptionBlockScopeInternetKey = "filter/blockInternet"
cfgOptionBlockScopeInternet config.BoolOption
cfgOptionBlockScopeInternetOrder = 16
CfgOptionBlockScopeLANKey = "filter/blockLAN"
cfgOptionBlockScopeLAN config.BoolOption
cfgOptionBlockScopeLANOrder = 17
CfgOptionBlockScopeLocalKey = "filter/blockLocal"
cfgOptionBlockScopeLocal config.BoolOption
cfgOptionBlockScopeLocalOrder = 18
// Connection Types.
CfgOptionBlockP2PKey = "filter/blockP2P"
cfgOptionBlockP2P config.BoolOption
cfgOptionBlockP2POrder = 19
CfgOptionBlockInboundKey = "filter/blockInbound"
cfgOptionBlockInbound config.BoolOption
cfgOptionBlockInboundOrder = 20
// Rules.
CfgOptionEndpointsKey = "filter/endpoints"
cfgOptionEndpoints config.StringArrayOption
cfgOptionEndpointsOrder = 32
CfgOptionServiceEndpointsKey = "filter/serviceEndpoints"
cfgOptionServiceEndpoints config.StringArrayOption
cfgOptionServiceEndpointsOrder = 33
CfgOptionFilterListsKey = "filter/lists"
cfgOptionFilterLists config.StringArrayOption
cfgOptionFilterListsOrder = 34
// Setting "Custom Filter List" at order 35.
CfgOptionFilterSubDomainsKey = "filter/includeSubdomains"
cfgOptionFilterSubDomains config.BoolOption
cfgOptionFilterSubDomainsOrder = 36
// DNS Filtering.
CfgOptionFilterCNAMEKey = "filter/includeCNAMEs"
cfgOptionFilterCNAME config.BoolOption
cfgOptionFilterCNAMEOrder = 48
CfgOptionRemoveOutOfScopeDNSKey = "filter/removeOutOfScopeDNS"
cfgOptionRemoveOutOfScopeDNS config.BoolOption
cfgOptionRemoveOutOfScopeDNSOrder = 49
CfgOptionRemoveBlockedDNSKey = "filter/removeBlockedDNS"
cfgOptionRemoveBlockedDNS config.BoolOption
cfgOptionRemoveBlockedDNSOrder = 50
CfgOptionDomainHeuristicsKey = "filter/domainHeuristics"
cfgOptionDomainHeuristics config.BoolOption
cfgOptionDomainHeuristicsOrder = 51
// Advanced.
CfgOptionPreventBypassingKey = "filter/preventBypassing"
cfgOptionPreventBypassing config.BoolOption
cfgOptionPreventBypassingOrder = 64
CfgOptionDisableAutoPermitKey = "filter/disableAutoPermit"
cfgOptionDisableAutoPermit config.BoolOption
cfgOptionDisableAutoPermitOrder = 65
// Setting "Permanent Verdicts" at order 80.
// Network History.
CfgOptionEnableHistoryKey = "history/enable"
cfgOptionEnableHistory config.BoolOption
cfgOptionEnableHistoryOrder = 96
CfgOptionKeepHistoryKey = "history/keep"
cfgOptionKeepHistory config.IntOption
cfgOptionKeepHistoryOrder = 97
// Setting "Enable SPN" at order 128.
CfgOptionUseSPNKey = "spn/use"
cfgOptionUseSPN config.BoolOption
cfgOptionUseSPNOrder = 129
CfgOptionSPNUsagePolicyKey = "spn/usagePolicy"
cfgOptionSPNUsagePolicy config.StringArrayOption
cfgOptionSPNUsagePolicyOrder = 130
CfgOptionRoutingAlgorithmKey = "spn/routingAlgorithm"
cfgOptionRoutingAlgorithm config.StringOption
cfgOptionRoutingAlgorithmOrder = 144
DefaultRoutingProfileID = "double-hop" // Copied due to import loop.
// Setting "Home Node Rules" at order 145.
CfgOptionTransitHubPolicyKey = "spn/transitHubPolicy"
cfgOptionTransitHubPolicy config.StringArrayOption
cfgOptionTransitHubPolicyOrder = 146
CfgOptionExitHubPolicyKey = "spn/exitHubPolicy"
cfgOptionExitHubPolicy config.StringArrayOption
cfgOptionExitHubPolicyOrder = 147
// Setting "DNS Exit Node Rules" at order 148.
)
var (
// SPNRulesQuickSettings are now generated automatically shorty after start.
SPNRulesQuickSettings = []config.QuickSetting{
{Name: "Loading...", Action: config.QuickMergeTop, Value: []string{""}},
}
// SPNRulesVerdictNames defines the verdicts names to be used for SPN Rules.
SPNRulesVerdictNames = map[string]string{
"-": "Exclude", // Default.
"+": "Allow",
}
// SPNRulesHelp defines the help text for SPN related Hub selection rules.
SPNRulesHelp = strings.ReplaceAll(`Rules are checked from top to bottom, stopping after the first match. They can match the following attributes of SPN Nodes:
- Country (based on IPs): "US" (two-letter country codes according to ISO 3166-1 alpha-2)
- AS number: "AS123456"
- Address: "192.168.0.1"
- Network: "192.168.0.1/24"
- Anything: "*"
`, `"`, "`")
)
func registerConfiguration() error { //nolint:maintidx
// Default Filter Action
// permit - blocklist mode: everything is allowed unless blocked
// ask - ask mode: if not verdict is found, the user is consulted
// block - allowlist mode: everything is blocked unless explicitly allowed
err := config.Register(&config.Option{
Name: "Default Network Action",
Key: CfgOptionDefaultActionKey,
Description: `The default network action is applied when nothing else allows or blocks a connection. This affects both outgoing and incoming connections. This setting is the weakest of all and is commonly overruled by Force Block settings or Rules.`,
OptType: config.OptTypeString,
DefaultValue: DefaultActionPermitValue,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: config.DisplayHintOneOf,
config.DisplayOrderAnnotation: cfgOptionDefaultActionOrder,
config.CategoryAnnotation: "General",
},
PossibleValues: []config.PossibleValue{
{
Name: "Allow",
Value: DefaultActionPermitValue,
Description: "Allow all connections",
},
{
Name: "Block",
Value: DefaultActionBlockValue,
Description: "Block all connections",
},
{
Name: "Prompt",
Value: DefaultActionAskValue,
Description: "Prompt for decisions",
},
},
})
if err != nil {
return err
}
cfgOptionDefaultAction = config.Concurrent.GetAsString(CfgOptionDefaultActionKey, DefaultActionPermitValue)
cfgStringOptions[CfgOptionDefaultActionKey] = cfgOptionDefaultAction
// Disable Auto Permit
err = config.Register(&config.Option{
// TODO: Check how to best handle negation here.
Name: "Disable Auto Allow",
Key: CfgOptionDisableAutoPermitKey,
Description: `Auto Allow searches for a relation between an app and the destination of a connection - if there is a correlation, the connection will be allowed.`,
OptType: config.OptTypeBool,
ReleaseLevel: config.ReleaseLevelBeta,
DefaultValue: true,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayOrderAnnotation: cfgOptionDisableAutoPermitOrder,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.CategoryAnnotation: "Advanced",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionDisableAutoPermit = config.Concurrent.GetAsBool(CfgOptionDisableAutoPermitKey, true)
cfgBoolOptions[CfgOptionDisableAutoPermitKey] = cfgOptionDisableAutoPermit
// Enable History
err = config.Register(&config.Option{
Name: "Enable Network History",
Key: CfgOptionEnableHistoryKey,
Description: `Save connections in a database (on disk) in order to view and search them later. Changes might take a couple minutes to apply to all connections.
In order to reduce noise optimize performance, internal and device-only (localhost) connections are not saved to history.`,
OptType: config.OptTypeBool,
ReleaseLevel: config.ReleaseLevelStable,
ExpertiseLevel: config.ExpertiseLevelUser,
DefaultValue: false,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayOrderAnnotation: cfgOptionEnableHistoryOrder,
config.CategoryAnnotation: "General",
config.RequiresFeatureIDAnnotation: account.FeatureHistory,
},
})
if err != nil {
return err
}
cfgOptionEnableHistory = config.Concurrent.GetAsBool(CfgOptionEnableHistoryKey, false)
cfgBoolOptions[CfgOptionEnableHistoryKey] = cfgOptionEnableHistory
err = config.Register(&config.Option{
Name: "Keep Network History",
Key: CfgOptionKeepHistoryKey,
Description: `Specify how many days the network history data should be kept. Please keep in mind that more available history data makes reports (coming soon) a lot more useful.
Older data is deleted in intervals and cleared from the database continually. If in a hurry, shutdown or restart Portmaster to clear deleted entries immediately.
Set to 0 days to keep network history forever. Depending on your device, this might affect performance.`,
OptType: config.OptTypeInt,
ReleaseLevel: config.ReleaseLevelStable,
ExpertiseLevel: config.ExpertiseLevelUser,
DefaultValue: 30,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.UnitAnnotation: "Days",
config.DisplayOrderAnnotation: cfgOptionKeepHistoryOrder,
config.CategoryAnnotation: "General",
config.RequiresFeatureIDAnnotation: account.FeatureHistory,
},
})
if err != nil {
return err
}
cfgOptionKeepHistory = config.Concurrent.GetAsInt(CfgOptionKeepHistoryKey, 30)
cfgIntOptions[CfgOptionKeepHistoryKey] = cfgOptionKeepHistory
rulesHelp := strings.ReplaceAll(`Rules are checked from top to bottom, stopping after the first match. They can match:
- By address: "192.168.0.1"
- By network: "192.168.0.0/24"
- By network scope: "Localhost", "LAN" or "Internet"
- By domain:
- Matching a distinct domain: "example.com"
- Matching a domain with subdomains: ".example.com"
- Matching with a wildcard prefix: "*xample.com"
- Matching with a wildcard suffix: "example.*"
- Matching domains containing text: "*example*"
- By country (based on IP): "US" ([two-letter country codes according to ISO 3166-1 alpha-2](https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2))
- By continent (based on IP): "C:US" (prefix "AF", "AN", "AS", "EU", "NA", "OC", or "SA" with "C:")
- By AS number: "AS123456"
- By filter list - use the filterlist ID prefixed with "L:": "L:MAL"
- Match anything: "*"
Additionally, you may supply a protocol and port using this format: "<host> <IP protocol>/<port>".
Protocols and ports may be specified using numbers ("6/80") or names ("TCP/HTTP").
Port ranges are defined by using a hyphen ("TCP/1-1024"). Omit the port to match any.
Use a "*" for matching any protocol. If matching ports with any protocol, protocols without ports will not match.
Rules with protocol and port definitions only match if the protocol and port also match.
Ports are always compared to the destination port, thus, the local listening port for incoming connections.
Examples:
- "192.168.0.1 TCP/HTTP"
- "LAN UDP/50000-55000"
- "example.com */HTTPS"
- "1.1.1.1 ICMP"
Important: DNS Requests are only matched against domain and filter list rules, all others require an IP address and are checked only with the following IP connection.
Pro Tip: You can use "#" to add a comment to a rule.
`, `"`, "`")
// rulesVerdictNames defines the verdicts names to be used for filter rules.
rulesVerdictNames := map[string]string{
"-": "Block", // Default.
"+": "Allow",
}
// Endpoint Filter List
err = config.Register(&config.Option{
Name: "Outgoing Rules",
Key: CfgOptionEndpointsKey,
Description: "Rules that apply to outgoing network connections. Cannot overrule Network Scopes and Connection Types (see above).",
Help: rulesHelp,
Sensitive: true,
OptType: config.OptTypeStringArray,
DefaultValue: []string{},
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.StackableAnnotation: true,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
config.DisplayOrderAnnotation: cfgOptionEndpointsOrder,
config.CategoryAnnotation: "Rules",
endpoints.EndpointListVerdictNamesAnnotation: rulesVerdictNames,
},
ValidationRegex: endpoints.ListEntryValidationRegex,
ValidationFunc: endpoints.ValidateEndpointListConfigOption,
})
if err != nil {
return err
}
cfgOptionEndpoints = config.Concurrent.GetAsStringArray(CfgOptionEndpointsKey, []string{})
cfgStringArrayOptions[CfgOptionEndpointsKey] = cfgOptionEndpoints
// Service Endpoint Filter List
err = config.Register(&config.Option{
Name: "Incoming Rules",
Key: CfgOptionServiceEndpointsKey,
Description: "Rules that apply to incoming network connections. Cannot overrule Network Scopes and Connection Types (see above).",
Help: rulesHelp,
Sensitive: true,
OptType: config.OptTypeStringArray,
DefaultValue: []string{},
ExpertiseLevel: config.ExpertiseLevelExpert,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.StackableAnnotation: true,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
config.DisplayOrderAnnotation: cfgOptionServiceEndpointsOrder,
config.CategoryAnnotation: "Rules",
endpoints.EndpointListVerdictNamesAnnotation: rulesVerdictNames,
config.QuickSettingsAnnotation: []config.QuickSetting{
{
Name: "Allow SSH",
Action: config.QuickMergeTop,
Value: []string{"+ * tcp/22"},
},
{
Name: "Allow HTTP/s",
Action: config.QuickMergeTop,
Value: []string{"+ * tcp/80", "+ * tcp/443"},
},
{
Name: "Allow RDP",
Action: config.QuickMergeTop,
Value: []string{"+ * */3389"},
},
{
Name: "Allow all from LAN",
Action: config.QuickMergeTop,
Value: []string{"+ LAN"},
},
{
Name: "Allow all from Internet",
Action: config.QuickMergeTop,
Value: []string{"+ Internet"},
},
{
Name: "Block everything else",
Action: config.QuickMergeBottom,
Value: []string{"- *"},
},
},
},
ValidationRegex: endpoints.ListEntryValidationRegex,
ValidationFunc: endpoints.ValidateEndpointListConfigOption,
})
if err != nil {
return err
}
cfgOptionServiceEndpoints = config.Concurrent.GetAsStringArray(CfgOptionServiceEndpointsKey, []string{})
cfgStringArrayOptions[CfgOptionServiceEndpointsKey] = cfgOptionServiceEndpoints
// Filter list IDs
defaultFilterListsValue := []string{"TRAC", "MAL", "BAD", "UNBREAK"}
err = config.Register(&config.Option{
Name: "Filter Lists",
Key: CfgOptionFilterListsKey,
Description: "Block connections that match enabled filter lists.",
OptType: config.OptTypeStringArray,
DefaultValue: defaultFilterListsValue,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: "filter list",
config.DisplayOrderAnnotation: cfgOptionFilterListsOrder,
config.CategoryAnnotation: "Filter Lists",
},
ValidationRegex: `^[a-zA-Z0-9\-]+$`,
})
if err != nil {
return err
}
cfgOptionFilterLists = config.Concurrent.GetAsStringArray(CfgOptionFilterListsKey, defaultFilterListsValue)
cfgStringArrayOptions[CfgOptionFilterListsKey] = cfgOptionFilterLists
// Include CNAMEs
err = config.Register(&config.Option{
Name: "Block Domain Aliases",
Key: CfgOptionFilterCNAMEKey,
Description: "Block a domain if a resolved CNAME (alias) is blocked by a rule or filter list.",
OptType: config.OptTypeBool,
DefaultValue: true,
ExpertiseLevel: config.ExpertiseLevelExpert,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionFilterCNAMEOrder,
config.CategoryAnnotation: "DNS Filtering",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionFilterCNAME = config.Concurrent.GetAsBool(CfgOptionFilterCNAMEKey, true)
cfgBoolOptions[CfgOptionFilterCNAMEKey] = cfgOptionFilterCNAME
// Include subdomains
err = config.Register(&config.Option{
Name: "Block Subdomains of Filter List Entries",
Key: CfgOptionFilterSubDomainsKey,
Description: "Additionally block all subdomains of entries in selected filter lists.",
OptType: config.OptTypeBool,
DefaultValue: true,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionFilterSubDomainsOrder,
config.CategoryAnnotation: "Filter Lists",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionFilterSubDomains = config.Concurrent.GetAsBool(CfgOptionFilterSubDomainsKey, true)
cfgBoolOptions[CfgOptionFilterSubDomainsKey] = cfgOptionFilterSubDomains
// Block Scope Local
err = config.Register(&config.Option{
Name: "Force Block Device-Local Connections",
Key: CfgOptionBlockScopeLocalKey,
Description: "Force Block all internal connections on your own device, ie. localhost. Is stronger than Rules (see below).",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelExpert,
DefaultValue: false,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionBlockScopeLocalOrder,
config.CategoryAnnotation: "Network Scope",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionBlockScopeLocal = config.Concurrent.GetAsBool(CfgOptionBlockScopeLocalKey, false)
cfgBoolOptions[CfgOptionBlockScopeLocalKey] = cfgOptionBlockScopeLocal
// Block Scope LAN
err = config.Register(&config.Option{
Name: "Force Block LAN",
Key: CfgOptionBlockScopeLANKey,
Description: "Force Block all connections from and to the Local Area Network. Is stronger than Rules (see below).",
OptType: config.OptTypeBool,
DefaultValue: false,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionBlockScopeLANOrder,
config.CategoryAnnotation: "Network Scope",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionBlockScopeLAN = config.Concurrent.GetAsBool(CfgOptionBlockScopeLANKey, false)
cfgBoolOptions[CfgOptionBlockScopeLANKey] = cfgOptionBlockScopeLAN
// Block Scope Internet
err = config.Register(&config.Option{
Name: "Force Block Internet Access",
Key: CfgOptionBlockScopeInternetKey,
Description: "Force Block connections from and to the Internet. Is stronger than Rules (see below).",
OptType: config.OptTypeBool,
DefaultValue: false,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionBlockScopeInternetOrder,
config.CategoryAnnotation: "Network Scope",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionBlockScopeInternet = config.Concurrent.GetAsBool(CfgOptionBlockScopeInternetKey, false)
cfgBoolOptions[CfgOptionBlockScopeInternetKey] = cfgOptionBlockScopeInternet
// Block Peer to Peer Connections
err = config.Register(&config.Option{
Name: "Force Block P2P/Direct Connections",
Key: CfgOptionBlockP2PKey,
Description: "These are connections that are established directly to an IP address or peer on the Internet without resolving a domain name via DNS first. Is stronger than Rules (see below).",
OptType: config.OptTypeBool,
DefaultValue: false,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionBlockP2POrder,
config.CategoryAnnotation: "Connection Types",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionBlockP2P = config.Concurrent.GetAsBool(CfgOptionBlockP2PKey, false)
cfgBoolOptions[CfgOptionBlockP2PKey] = cfgOptionBlockP2P
// Block Inbound Connections
err = config.Register(&config.Option{
Name: "Force Block Incoming Connections",
Key: CfgOptionBlockInboundKey,
Description: "Connections initiated towards your device from the LAN or Internet. This will usually only be the case if you are running a network service or are using peer to peer software. Is stronger than Rules (see below).",
OptType: config.OptTypeBool,
DefaultValue: true,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionBlockInboundOrder,
config.CategoryAnnotation: "Connection Types",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionBlockInbound = config.Concurrent.GetAsBool(CfgOptionBlockInboundKey, false)
cfgBoolOptions[CfgOptionBlockInboundKey] = cfgOptionBlockInbound
// Filter Out-of-Scope DNS Records
err = config.Register(&config.Option{
Name: "Enforce Global/Private Split-View",
Key: CfgOptionRemoveOutOfScopeDNSKey,
Description: "Reject private IP addresses (RFC1918 et al.) from public DNS responses. If the system resolver is in use, the resulting connection will be blocked instead of the DNS request.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
DefaultValue: true,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionRemoveOutOfScopeDNSOrder,
config.CategoryAnnotation: "DNS Filtering",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionRemoveOutOfScopeDNS = config.Concurrent.GetAsBool(CfgOptionRemoveOutOfScopeDNSKey, true)
cfgBoolOptions[CfgOptionRemoveOutOfScopeDNSKey] = cfgOptionRemoveOutOfScopeDNS
// Filter DNS Records that would be blocked
err = config.Register(&config.Option{
Name: "Reject Blocked IPs",
Key: CfgOptionRemoveBlockedDNSKey,
Description: "Reject blocked IP addresses directly from the DNS response instead of handing them over to the app and blocking a resulting connection. This settings does not affect privacy and only takes effect when the system resolver is not in use.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelDeveloper,
DefaultValue: true,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionRemoveBlockedDNSOrder,
config.CategoryAnnotation: "DNS Filtering",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionRemoveBlockedDNS = config.Concurrent.GetAsBool(CfgOptionRemoveBlockedDNSKey, true)
cfgBoolOptions[CfgOptionRemoveBlockedDNSKey] = cfgOptionRemoveBlockedDNS
// Domain heuristics
err = config.Register(&config.Option{
Name: "Enable Domain Heuristics",
Key: CfgOptionDomainHeuristicsKey,
Description: "Checks for suspicious domain names and blocks them. This option currently targets domain names generated by malware and DNS data exfiltration channels.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelExpert,
DefaultValue: true,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionDomainHeuristicsOrder,
config.CategoryAnnotation: "DNS Filtering",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionDomainHeuristics = config.Concurrent.GetAsBool(CfgOptionDomainHeuristicsKey, true)
cfgBoolOptions[CfgOptionDomainHeuristicsKey] = cfgOptionDomainHeuristics
// Bypass prevention
err = config.Register(&config.Option{
Name: "Block Secure DNS Bypassing",
Key: CfgOptionPreventBypassingKey,
Description: `Prevent apps from bypassing Portmaster's Secure DNS resolver.
If disabled, Portmaster might have reduced information to correctly enforce rules and filter lists.
Important: Portmaster's firewall itself cannot be bypassed.
Current Features:
- Disable Firefox' internal DNS-over-HTTPs resolver
- Block direct access to public DNS resolvers
Please note that DNS bypass attempts might be additionally blocked in the System DNS Client App.`,
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelUser,
ReleaseLevel: config.ReleaseLevelStable,
DefaultValue: true,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: status.DisplayHintSecurityLevel,
config.DisplayOrderAnnotation: cfgOptionPreventBypassingOrder,
config.CategoryAnnotation: "Advanced",
},
Migrations: []config.MigrationFunc{status.MigrateSecurityLevelToBoolean},
})
if err != nil {
return err
}
cfgOptionPreventBypassing = config.Concurrent.GetAsBool(CfgOptionPreventBypassingKey, true)
cfgBoolOptions[CfgOptionPreventBypassingKey] = cfgOptionPreventBypassing
// Use SPN
err = config.Register(&config.Option{
Name: "Use SPN",
Key: CfgOptionUseSPNKey,
Description: "Protect network traffic with the Safing Privacy Network. If the SPN is not available or the connection is interrupted, network traffic will be blocked.",
OptType: config.OptTypeBool,
DefaultValue: true,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayOrderAnnotation: cfgOptionUseSPNOrder,
config.CategoryAnnotation: "General",
},
})
if err != nil {
return err
}
cfgOptionUseSPN = config.Concurrent.GetAsBool(CfgOptionUseSPNKey, true)
cfgBoolOptions[CfgOptionUseSPNKey] = cfgOptionUseSPN
// SPN Rules
err = config.Register(&config.Option{
Name: "SPN Rules",
Key: CfgOptionSPNUsagePolicyKey,
Description: `Customize which websites should or should not be routed through the SPN. Only active if "Use SPN" is enabled.`,
Help: rulesHelp,
Sensitive: true,
OptType: config.OptTypeStringArray,
DefaultValue: []string{},
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.StackableAnnotation: true,
config.CategoryAnnotation: "General",
config.DisplayOrderAnnotation: cfgOptionSPNUsagePolicyOrder,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
endpoints.EndpointListVerdictNamesAnnotation: SPNRulesVerdictNames,
},
ValidationRegex: endpoints.ListEntryValidationRegex,
ValidationFunc: endpoints.ValidateEndpointListConfigOption,
})
if err != nil {
return err
}
cfgOptionSPNUsagePolicy = config.Concurrent.GetAsStringArray(CfgOptionSPNUsagePolicyKey, []string{})
cfgStringArrayOptions[CfgOptionSPNUsagePolicyKey] = cfgOptionSPNUsagePolicy
// Transit Node Rules
err = config.Register(&config.Option{
Name: "Transit Node Rules",
Key: CfgOptionTransitHubPolicyKey,
Description: `Customize which countries should or should not be used as Transit Nodes. Transit Nodes are used to transit the SPN from your Home to your Exit Node.`,
Help: SPNRulesHelp,
Sensitive: true,
OptType: config.OptTypeStringArray,
ExpertiseLevel: config.ExpertiseLevelExpert,
DefaultValue: []string{},
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.StackableAnnotation: true,
config.CategoryAnnotation: "Routing",
config.DisplayOrderAnnotation: cfgOptionTransitHubPolicyOrder,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
config.QuickSettingsAnnotation: SPNRulesQuickSettings,
endpoints.EndpointListVerdictNamesAnnotation: SPNRulesVerdictNames,
},
ValidationRegex: endpoints.ListEntryValidationRegex,
ValidationFunc: endpoints.ValidateEndpointListConfigOption,
})
if err != nil {
return err
}
cfgOptionTransitHubPolicy = config.Concurrent.GetAsStringArray(CfgOptionTransitHubPolicyKey, []string{})
cfgStringArrayOptions[CfgOptionTransitHubPolicyKey] = cfgOptionTransitHubPolicy
// Exit Node Rules
err = config.Register(&config.Option{
Name: "Exit Node Rules",
Key: CfgOptionExitHubPolicyKey,
Description: `Customize which countries should or should not be used for your Exit Nodes. Exit Nodes are used to exit the SPN and establish a connection to your destination.
By default, the Portmaster tries to choose the node closest to the destination as the Exit Node. This reduces your exposure to the open Internet. Exit Nodes are chosen for every destination separately.`,
Help: SPNRulesHelp,
Sensitive: true,
OptType: config.OptTypeStringArray,
DefaultValue: []string{},
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.StackableAnnotation: true,
config.CategoryAnnotation: "Routing",
config.DisplayOrderAnnotation: cfgOptionExitHubPolicyOrder,
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
config.QuickSettingsAnnotation: SPNRulesQuickSettings,
endpoints.EndpointListVerdictNamesAnnotation: SPNRulesVerdictNames,
},
ValidationRegex: endpoints.ListEntryValidationRegex,
ValidationFunc: endpoints.ValidateEndpointListConfigOption,
})
if err != nil {
return err
}
cfgOptionExitHubPolicy = config.Concurrent.GetAsStringArray(CfgOptionExitHubPolicyKey, []string{})
cfgStringArrayOptions[CfgOptionExitHubPolicyKey] = cfgOptionExitHubPolicy
// Select SPN Routing Algorithm
err = config.Register(&config.Option{
Name: "Select SPN Routing Algorithm",
Key: CfgOptionRoutingAlgorithmKey,
Description: "Select the routing algorithm for your connections through the SPN. Configure your preferred balance between speed and privacy. Portmaster may automatically upgrade the routing algorithm if necessary to protect your privacy.",
OptType: config.OptTypeString,
DefaultValue: DefaultRoutingProfileID,
Annotations: config.Annotations{
config.SettablePerAppAnnotation: true,
config.DisplayHintAnnotation: config.DisplayHintOneOf,
config.DisplayOrderAnnotation: cfgOptionRoutingAlgorithmOrder,
config.CategoryAnnotation: "Routing",
},
PossibleValues: []config.PossibleValue{
{
Name: "Plain VPN Mode",
Value: "home",
Description: "Always connect to the destination directly from the Home Hub. Only provides very basic privacy, as the Home Hub both knows where you are coming from and where you are connecting to.",
},
{
Name: "Speed Focused",
Value: "single-hop",
Description: "Optimize routes with a minimum of one hop. Provides good speeds. This will often use the Home Hub to connect to destinations near you, but will use more hops to far away destinations for better privacy over long distances.",
},
{
Name: "Balanced",
Value: "double-hop",
Description: "Optimize routes with a minimum of two hops. Provides good privacy as well as good speeds. No single node knows where you are coming from *and* where you are connecting to.",
},
{
Name: "Privacy Focused",
Value: "triple-hop",
Description: "Optimize routes with a minimum of three hops. Provides very good privacy. No single node knows where you are coming from *and* where you are connecting to - with an additional hop just to be sure.",
},
},
})
if err != nil {
return err
}
cfgOptionRoutingAlgorithm = config.Concurrent.GetAsString(CfgOptionRoutingAlgorithmKey, DefaultRoutingProfileID)
cfgStringOptions[CfgOptionRoutingAlgorithmKey] = cfgOptionRoutingAlgorithm
return nil
}

144
service/profile/database.go Normal file
View File

@@ -0,0 +1,144 @@
package profile
import (
"context"
"errors"
"strings"
"github.com/safing/portbase/config"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/log"
)
// Database paths:
// core:profiles/<scope>/<id>
// cache:profiles/index/<identifier>/<value>
// ProfilesDBPath is the base database path for profiles.
const ProfilesDBPath = "core:profiles/"
var profileDB = database.NewInterface(&database.Options{
Local: true,
Internal: true,
})
// MakeScopedID returns a scoped profile ID.
func MakeScopedID(source ProfileSource, id string) string {
return string(source) + "/" + id
}
// MakeProfileKey returns a profile key.
func MakeProfileKey(source ProfileSource, id string) string {
return ProfilesDBPath + string(source) + "/" + id
}
func registerValidationDBHook() (err error) {
_, err = database.RegisterHook(query.New(ProfilesDBPath), &databaseHook{})
return
}
func startProfileUpdateChecker() error {
module.StartServiceWorker("update active profiles", 0, func(ctx context.Context) (err error) {
profilesSub, err := profileDB.Subscribe(query.New(ProfilesDBPath))
if err != nil {
return err
}
defer func() {
err := profilesSub.Cancel()
if err != nil {
log.Warningf("profile: failed to cancel subscription for updating active profiles: %s", err)
}
}()
profileFeed:
for {
select {
case r := <-profilesSub.Feed:
// Check if subscription was canceled.
if r == nil {
return errors.New("subscription canceled")
}
// Get active profile.
scopedID := strings.TrimPrefix(r.Key(), ProfilesDBPath)
activeProfile := getActiveProfile(scopedID)
if activeProfile == nil {
// Check if profile is being deleted.
if r.Meta().IsDeleted() {
meta.MarkDeleted(scopedID)
}
// Don't do any additional actions if the profile is not active.
continue profileFeed
}
// Always increase the revision counter of the layer profile.
// This marks previous connections in the UI as decided with outdated settings.
if activeProfile.layeredProfile != nil {
activeProfile.layeredProfile.increaseRevisionCounter(true)
}
// Always mark as outdated if the record is being deleted.
if r.Meta().IsDeleted() {
activeProfile.outdated.Set()
meta.MarkDeleted(scopedID)
module.TriggerEvent(DeletedEvent, scopedID)
continue
}
// If the profile is saved externally (eg. via the API), have the
// next one to use it reload the profile from the database.
receivedProfile, err := EnsureProfile(r)
if err != nil || !receivedProfile.savedInternally {
activeProfile.outdated.Set()
module.TriggerEvent(ConfigChangeEvent, scopedID)
}
case <-ctx.Done():
return nil
}
}
})
return nil
}
type databaseHook struct {
database.HookBase
}
// UsesPrePut implements the Hook interface and returns false.
func (h *databaseHook) UsesPrePut() bool {
return true
}
// PrePut implements the Hook interface.
func (h *databaseHook) PrePut(r record.Record) (record.Record, error) {
// Do not intervene with metadata key.
if r.Key() == profilesMetadataKey {
return r, nil
}
// convert
profile, err := EnsureProfile(r)
if err != nil {
return nil, err
}
// clean config
config.CleanHierarchicalConfig(profile.Config)
// prepare profile
profile.prepProfile()
// parse config
err = profile.parseConfig()
if err != nil {
// error here, warning when loading
return nil, err
}
return profile, nil
}

View File

@@ -0,0 +1,13 @@
package endpoints
// DisplayHintEndpointList marks an option as an endpoint
// list option. It's meant to be used with DisplayHintAnnotation.
const DisplayHintEndpointList = "endpoint list"
// EndpointListVerdictNamesAnnotation is the annotation identifier used in
// configuration options to hint the UI on names to be used for endpoint list
// verdicts.
// If configured, it must be of type map[string]string, mapping the verdict
// symbol to a name to be displayed in the UI.
// May only used when config.DisplayHintAnnotation is set to DisplayHintEndpointList.
const EndpointListVerdictNamesAnnotation = "safing/portmaster:ui:endpoint-list:verdict-names"

View File

@@ -0,0 +1,29 @@
package endpoints
import (
"context"
"github.com/safing/portmaster/service/intel"
)
// EndpointAny matches anything.
type EndpointAny struct {
EndpointBase
}
// Matches checks whether the given entity matches this endpoint definition.
func (ep *EndpointAny) Matches(_ context.Context, entity *intel.Entity) (EPResult, Reason) {
return ep.match(ep, entity, "*", "matches")
}
func (ep *EndpointAny) String() string {
return ep.renderPPP("*")
}
func parseTypeAny(fields []string) (Endpoint, error) {
if fields[1] == "*" {
ep := &EndpointAny{}
return ep.parsePPP(ep, fields)
}
return nil, nil
}

View File

@@ -0,0 +1,65 @@
package endpoints
import (
"context"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/safing/portmaster/service/intel"
)
var asnRegex = regexp.MustCompile("^AS[0-9]+$")
// EndpointASN matches ASNs.
type EndpointASN struct {
EndpointBase
ASN uint
}
// Matches checks whether the given entity matches this endpoint definition.
func (ep *EndpointASN) Matches(ctx context.Context, entity *intel.Entity) (EPResult, Reason) {
if entity.IP == nil {
return NoMatch, nil
}
if !entity.IPScope.IsGlobal() {
return NoMatch, nil
}
asn, ok := entity.GetASN(ctx)
if !ok {
asnStr := strconv.Itoa(int(ep.ASN))
return MatchError, ep.makeReason(ep, asnStr, "ASN data not available to match")
}
if asn == ep.ASN {
asnStr := strconv.Itoa(int(ep.ASN))
return ep.match(ep, entity, asnStr, "IP is part of AS")
}
return NoMatch, nil
}
func (ep *EndpointASN) String() string {
return ep.renderPPP("AS" + strconv.FormatInt(int64(ep.ASN), 10))
}
func parseTypeASN(fields []string) (Endpoint, error) {
if asnRegex.MatchString(fields[1]) {
asnString := strings.TrimPrefix(fields[1], "AS")
asn, err := strconv.ParseUint(asnString, 10, 64)
if err != nil {
return nil, fmt.Errorf("failed to parse AS number %s", asnString)
}
ep := &EndpointASN{
ASN: uint(asn),
}
return ep.parsePPP(ep, fields)
}
return nil, nil
}

View File

@@ -0,0 +1,63 @@
package endpoints
import (
"context"
"fmt"
"regexp"
"strings"
"github.com/safing/portmaster/service/intel"
)
var (
continentCodePrefix = "C:"
continentRegex = regexp.MustCompile(`^C:[A-Z]{2}$`)
)
// EndpointContinent matches countries.
type EndpointContinent struct {
EndpointBase
ContinentCode string
}
// Matches checks whether the given entity matches this endpoint definition.
func (ep *EndpointContinent) Matches(ctx context.Context, entity *intel.Entity) (EPResult, Reason) {
if entity.IP == nil {
return NoMatch, nil
}
if !entity.IPScope.IsGlobal() {
return NoMatch, nil
}
countryInfo := entity.GetCountryInfo(ctx)
if countryInfo == nil {
return MatchError, ep.makeReason(ep, "", "country data not available to match")
}
if ep.ContinentCode == countryInfo.Continent.Code {
return ep.match(
ep, entity,
fmt.Sprintf("%s (%s)", countryInfo.Continent.Name, countryInfo.Continent.Code),
"IP is located in",
)
}
return NoMatch, nil
}
func (ep *EndpointContinent) String() string {
return ep.renderPPP(continentCodePrefix + ep.ContinentCode)
}
func parseTypeContinent(fields []string) (Endpoint, error) {
if continentRegex.MatchString(fields[1]) {
ep := &EndpointContinent{
ContinentCode: strings.TrimPrefix(strings.ToUpper(fields[1]), continentCodePrefix),
}
return ep.parsePPP(ep, fields)
}
return nil, nil
}

View File

@@ -0,0 +1,59 @@
package endpoints
import (
"context"
"fmt"
"regexp"
"strings"
"github.com/safing/portmaster/service/intel"
)
var countryRegex = regexp.MustCompile(`^[A-Z]{2}$`)
// EndpointCountry matches countries.
type EndpointCountry struct {
EndpointBase
CountryCode string
}
// Matches checks whether the given entity matches this endpoint definition.
func (ep *EndpointCountry) Matches(ctx context.Context, entity *intel.Entity) (EPResult, Reason) {
if entity.IP == nil {
return NoMatch, nil
}
if !entity.IPScope.IsGlobal() {
return NoMatch, nil
}
countryInfo := entity.GetCountryInfo(ctx)
if countryInfo == nil {
return MatchError, ep.makeReason(ep, "", "country data not available to match")
}
if ep.CountryCode == countryInfo.Code {
return ep.match(
ep, entity,
fmt.Sprintf("%s (%s)", countryInfo.Name, countryInfo.Code),
"IP is located in",
)
}
return NoMatch, nil
}
func (ep *EndpointCountry) String() string {
return ep.renderPPP(ep.CountryCode)
}
func parseTypeCountry(fields []string) (Endpoint, error) {
if countryRegex.MatchString(fields[1]) {
ep := &EndpointCountry{
CountryCode: strings.ToUpper(fields[1]),
}
return ep.parsePPP(ep, fields)
}
return nil, nil
}

View File

@@ -0,0 +1,170 @@
package endpoints
import (
"context"
"errors"
"regexp"
"strings"
"github.com/safing/portmaster/service/intel"
"github.com/safing/portmaster/service/network/netutils"
)
const (
domainMatchTypeExact uint8 = iota
domainMatchTypeZone
domainMatchTypeSuffix
domainMatchTypePrefix
domainMatchTypeContains
)
var (
allowedDomainChars = regexp.MustCompile(`^[a-z0-9\.-]+$`)
// looksLikeAnIP matches domains that look like an IP address.
looksLikeAnIP = regexp.MustCompile(`^[0-9\.:]+$`)
)
// EndpointDomain matches domains.
type EndpointDomain struct {
EndpointBase
OriginalValue string
Domain string
DomainZone string
MatchType uint8
}
func (ep *EndpointDomain) check(entity *intel.Entity, domain string) (EPResult, Reason) {
result, reason := ep.match(ep, entity, ep.OriginalValue, "domain matches")
switch ep.MatchType {
case domainMatchTypeExact:
if domain == ep.Domain {
return result, reason
}
case domainMatchTypeZone:
if domain == ep.Domain {
return result, reason
}
if strings.HasSuffix(domain, ep.DomainZone) {
return result, reason
}
case domainMatchTypeSuffix:
if strings.HasSuffix(domain, ep.Domain) {
return result, reason
}
case domainMatchTypePrefix:
if strings.HasPrefix(domain, ep.Domain) {
return result, reason
}
case domainMatchTypeContains:
if strings.Contains(domain, ep.Domain) {
return result, reason
}
}
return NoMatch, nil
}
// Matches checks whether the given entity matches this endpoint definition.
func (ep *EndpointDomain) Matches(ctx context.Context, entity *intel.Entity) (EPResult, Reason) {
domain, ok := entity.GetDomain(ctx, true /* mayUseReverseDomain */)
if !ok {
return NoMatch, nil
}
result, reason := ep.check(entity, domain)
if result != NoMatch {
return result, reason
}
if entity.CNAMECheckEnabled() {
for _, cname := range entity.CNAME {
result, reason = ep.check(entity, cname)
if result == Denied {
return result, reason
}
}
}
return NoMatch, nil
}
func (ep *EndpointDomain) String() string {
return ep.renderPPP(ep.OriginalValue)
}
func parseTypeDomain(fields []string) (Endpoint, error) {
domain := fields[1]
ep := &EndpointDomain{
OriginalValue: domain,
}
// Fix domain ending.
switch domain[len(domain)-1] {
case '.', '*':
default:
domain += "."
}
// Check if this looks like an IP address.
// At least the TLDs has characters.
if looksLikeAnIP.MatchString(domain) {
return nil, nil
}
// Fix domain case.
domain = strings.ToLower(domain)
needValidFQDN := true
switch {
case strings.HasPrefix(domain, "*") && strings.HasSuffix(domain, "*"):
ep.MatchType = domainMatchTypeContains
ep.Domain = strings.TrimPrefix(domain, "*")
ep.Domain = strings.TrimSuffix(ep.Domain, "*")
needValidFQDN = false
case strings.HasSuffix(domain, "*"):
ep.MatchType = domainMatchTypePrefix
ep.Domain = strings.TrimSuffix(domain, "*")
needValidFQDN = false
// Prefix matching cannot be combined with zone matching
if strings.HasPrefix(ep.Domain, ".") {
return nil, nil
}
// Do not accept domains that look like an IP address and have a suffix wildcard.
// This is confusing, because it looks like an IP Netmask matching rule.
if looksLikeAnIP.MatchString(ep.Domain) {
return nil, errors.New("use CIDR notation (eg. 10.0.0.0/24) for matching ip address ranges")
}
case strings.HasPrefix(domain, "*"):
ep.MatchType = domainMatchTypeSuffix
ep.Domain = strings.TrimPrefix(domain, "*")
needValidFQDN = false
case strings.HasPrefix(domain, "."):
ep.MatchType = domainMatchTypeZone
ep.Domain = strings.TrimPrefix(domain, ".")
ep.DomainZone = "." + ep.Domain
default:
ep.MatchType = domainMatchTypeExact
ep.Domain = domain
}
// Validate domain "content".
switch {
case needValidFQDN && !netutils.IsValidFqdn(ep.Domain):
return nil, nil
case !needValidFQDN && !allowedDomainChars.MatchString(ep.Domain):
return nil, nil
case strings.Contains(ep.Domain, ".."):
// The above regex does not catch double dots.
return nil, nil
}
return ep.parsePPP(ep, fields)
}

View File

@@ -0,0 +1,42 @@
package endpoints
import (
"context"
"net"
"github.com/safing/portmaster/service/intel"
)
// EndpointIP matches IPs.
type EndpointIP struct {
EndpointBase
IP net.IP
}
// Matches checks whether the given entity matches this endpoint definition.
func (ep *EndpointIP) Matches(_ context.Context, entity *intel.Entity) (EPResult, Reason) {
if entity.IP == nil {
return NoMatch, nil
}
if ep.IP.Equal(entity.IP) {
return ep.match(ep, entity, ep.IP.String(), "IP matches")
}
return NoMatch, nil
}
func (ep *EndpointIP) String() string {
return ep.renderPPP(ep.IP.String())
}
func parseTypeIP(fields []string) (Endpoint, error) {
ip := net.ParseIP(fields[1])
if ip != nil {
ep := &EndpointIP{
IP: ip,
}
return ep.parsePPP(ep, fields)
}
return nil, nil
}

View File

@@ -0,0 +1,42 @@
package endpoints
import (
"context"
"net"
"github.com/safing/portmaster/service/intel"
)
// EndpointIPRange matches IP ranges.
type EndpointIPRange struct {
EndpointBase
Net *net.IPNet
}
// Matches checks whether the given entity matches this endpoint definition.
func (ep *EndpointIPRange) Matches(_ context.Context, entity *intel.Entity) (EPResult, Reason) {
if entity.IP == nil {
return NoMatch, nil
}
if ep.Net.Contains(entity.IP) {
return ep.match(ep, entity, ep.Net.String(), "IP is in")
}
return NoMatch, nil
}
func (ep *EndpointIPRange) String() string {
return ep.renderPPP(ep.Net.String())
}
func parseTypeIPRange(fields []string) (Endpoint, error) {
_, net, err := net.ParseCIDR(fields[1])
if err == nil {
ep := &EndpointIPRange{
Net: net,
}
return ep.parsePPP(ep, fields)
}
return nil, nil
}

View File

@@ -0,0 +1,41 @@
package endpoints
import (
"context"
"strings"
"github.com/safing/portmaster/service/intel"
)
// EndpointLists matches endpoint lists.
type EndpointLists struct {
EndpointBase
ListSet []string
Lists string
}
// Matches checks whether the given entity matches this endpoint definition.
func (ep *EndpointLists) Matches(ctx context.Context, entity *intel.Entity) (EPResult, Reason) {
if entity.MatchLists(ep.ListSet) {
return ep.match(ep, entity, ep.Lists, "filterlist contains", "filterlist", entity.ListBlockReason())
}
return NoMatch, nil
}
func (ep *EndpointLists) String() string {
return ep.renderPPP(ep.Lists)
}
func parseTypeList(fields []string) (Endpoint, error) {
if strings.HasPrefix(fields[1], "L:") {
lists := strings.Split(strings.TrimPrefix(fields[1], "L:"), ",")
ep := &EndpointLists{
ListSet: lists,
Lists: "L:" + strings.Join(lists, ","),
}
return ep.parsePPP(ep, fields)
}
return nil, nil
}

View File

@@ -0,0 +1,107 @@
package endpoints
import (
"context"
"strings"
"github.com/safing/portmaster/service/intel"
"github.com/safing/portmaster/service/network/netutils"
)
const (
scopeLocalhost = 1
scopeLocalhostName = "Localhost"
scopeLocalhostMatcher = "localhost"
scopeLAN = 2
scopeLANName = "LAN"
scopeLANMatcher = "lan"
scopeInternet = 4
scopeInternetName = "Internet"
scopeInternetMatcher = "internet"
)
// EndpointScope matches network scopes.
type EndpointScope struct {
EndpointBase
scopes uint8
}
// Matches checks whether the given entity matches this endpoint definition.
func (ep *EndpointScope) Matches(_ context.Context, entity *intel.Entity) (EPResult, Reason) {
if entity.IP == nil {
return NoMatch, nil
}
var scope uint8
switch entity.IPScope {
case netutils.HostLocal:
scope = scopeLocalhost
case netutils.LinkLocal:
scope = scopeLAN
case netutils.SiteLocal:
scope = scopeLAN
case netutils.Global:
scope = scopeInternet
case netutils.LocalMulticast:
scope = scopeLAN
case netutils.GlobalMulticast:
scope = scopeInternet
case netutils.Undefined, netutils.Invalid:
return NoMatch, nil
}
if ep.scopes&scope > 0 {
return ep.match(ep, entity, ep.Scopes(), "scope matches")
}
return NoMatch, nil
}
// Scopes returns the string representation of all scopes.
func (ep *EndpointScope) Scopes() string {
// single scope
switch ep.scopes {
case scopeLocalhost:
return scopeLocalhostName
case scopeLAN:
return scopeLANName
case scopeInternet:
return scopeInternetName
}
// multiple scopes
var s []string
if ep.scopes&scopeLocalhost > 0 {
s = append(s, scopeLocalhostName)
}
if ep.scopes&scopeLAN > 0 {
s = append(s, scopeLANName)
}
if ep.scopes&scopeInternet > 0 {
s = append(s, scopeInternetName)
}
return strings.Join(s, ",")
}
func (ep *EndpointScope) String() string {
return ep.renderPPP(ep.Scopes())
}
func parseTypeScope(fields []string) (Endpoint, error) {
ep := &EndpointScope{}
for _, val := range strings.Split(strings.ToLower(fields[1]), ",") {
switch val {
case scopeLocalhostMatcher:
ep.scopes ^= scopeLocalhost
case scopeLANMatcher:
ep.scopes ^= scopeLAN
case scopeInternetMatcher:
ep.scopes ^= scopeInternet
default:
return nil, nil
}
}
return ep.parsePPP(ep, fields)
}

View File

@@ -0,0 +1,258 @@
package endpoints
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/safing/portmaster/service/intel"
"github.com/safing/portmaster/service/network/reference"
)
// Endpoint describes an Endpoint Matcher.
type Endpoint interface {
Matches(ctx context.Context, entity *intel.Entity) (EPResult, Reason)
String() string
}
// EndpointBase provides general functions for implementing an Endpoint to reduce boilerplate.
type EndpointBase struct { //nolint:maligned // TODO
Protocol uint8
StartPort uint16
EndPort uint16
Permitted bool
}
func (ep *EndpointBase) match(s fmt.Stringer, entity *intel.Entity, value, desc string, keyval ...interface{}) (EPResult, Reason) {
result := ep.matchesPPP(entity)
if result == NoMatch {
return result, nil
}
return result, ep.makeReason(s, value, desc, keyval...)
}
func (ep *EndpointBase) makeReason(s fmt.Stringer, value, desc string, keyval ...interface{}) Reason {
r := &reason{
description: desc,
Filter: s.String(),
Permitted: ep.Permitted,
Value: value,
}
r.Extra = make(map[string]interface{})
for idx := 0; idx < len(keyval)/2; idx += 2 {
key := keyval[idx]
val := keyval[idx+1]
if keyName, ok := key.(string); ok {
r.Extra[keyName] = val
}
}
return r
}
func (ep *EndpointBase) matchesPPP(entity *intel.Entity) (result EPResult) {
// only check if protocol is defined
if ep.Protocol > 0 {
// if protocol does not match, return NoMatch
if entity.Protocol != ep.Protocol {
return NoMatch
}
}
// only check if port is defined
if ep.StartPort > 0 {
// if port does not match, return NoMatch
if entity.DstPort() < ep.StartPort || entity.DstPort() > ep.EndPort {
return NoMatch
}
}
// protocol and port matched or were defined as any
if ep.Permitted {
return Permitted
}
return Denied
}
func (ep *EndpointBase) renderPPP(s string) string {
var rendered string
if ep.Permitted {
rendered = "+ " + s
} else {
rendered = "- " + s
}
if ep.Protocol > 0 || ep.StartPort > 0 {
if ep.Protocol > 0 {
rendered += " " + reference.GetProtocolName(ep.Protocol)
} else {
rendered += " *"
}
if ep.StartPort > 0 {
if ep.StartPort == ep.EndPort {
rendered += "/" + reference.GetPortName(ep.StartPort)
} else {
rendered += "/" + strconv.Itoa(int(ep.StartPort)) + "-" + strconv.Itoa(int(ep.EndPort))
}
}
}
return rendered
}
func (ep *EndpointBase) parsePPP(typedEp Endpoint, fields []string) (Endpoint, error) { //nolint:gocognit // TODO
switch len(fields) {
case 2:
// nothing else to do here
case 3:
// parse protocol and port(s)
var ok bool
splitted := strings.Split(fields[2], "/")
if len(splitted) > 2 {
return nil, invalidDefinitionError(fields, "protocol and port must be in format <protocol>/<port>")
}
// protocol
switch splitted[0] {
case "":
return nil, invalidDefinitionError(fields, "protocol can't be empty")
case "*":
// any protocol that supports ports
default:
n, err := strconv.ParseUint(splitted[0], 10, 8)
n8 := uint8(n)
if err != nil {
// maybe it's a name?
n8, ok = reference.GetProtocolNumber(splitted[0])
if !ok {
return nil, invalidDefinitionError(fields, "protocol number parsing error")
}
}
ep.Protocol = n8
}
// port(s)
if len(splitted) > 1 {
switch splitted[1] {
case "", "*":
return nil, invalidDefinitionError(fields, "omit port if should match any")
default:
portSplitted := strings.Split(splitted[1], "-")
if len(portSplitted) > 2 {
return nil, invalidDefinitionError(fields, "ports must be in format from-to")
}
// parse start port
n, err := strconv.ParseUint(portSplitted[0], 10, 16)
n16 := uint16(n)
if err != nil {
// maybe it's a name?
n16, ok = reference.GetPortNumber(portSplitted[0])
if !ok {
return nil, invalidDefinitionError(fields, "port number parsing error")
}
}
if n16 == 0 {
return nil, invalidDefinitionError(fields, "port number cannot be 0")
}
ep.StartPort = n16
// parse end port
if len(portSplitted) > 1 {
n, err = strconv.ParseUint(portSplitted[1], 10, 16)
n16 = uint16(n)
if err != nil {
// maybe it's a name?
n16, ok = reference.GetPortNumber(portSplitted[1])
if !ok {
return nil, invalidDefinitionError(fields, "port number parsing error")
}
}
}
if n16 == 0 {
return nil, invalidDefinitionError(fields, "port number cannot be 0")
}
ep.EndPort = n16
}
}
// check if anything was parsed
if ep.Protocol == 0 && ep.StartPort == 0 {
return nil, invalidDefinitionError(fields, "omit protocol/port if should match any")
}
default:
return nil, invalidDefinitionError(fields, "there should be only 2 or 3 segments")
}
switch fields[0] {
case "+":
ep.Permitted = true
case "-":
ep.Permitted = false
default:
return nil, invalidDefinitionError(fields, "invalid permission prefix")
}
return typedEp, nil
}
func invalidDefinitionError(fields []string, msg string) error {
return fmt.Errorf(`invalid endpoint definition: "%s" - %s`, strings.Join(fields, " "), msg)
}
//nolint:gocognit,nakedret
func parseEndpoint(value string) (endpoint Endpoint, err error) {
fields := strings.Fields(value)
if len(fields) < 2 {
return nil, fmt.Errorf(`invalid endpoint definition: "%s"`, value)
}
// Remove comment.
for i, field := range fields {
if strings.HasPrefix(field, "#") {
fields = fields[:i]
break
}
}
// any
if endpoint, err = parseTypeAny(fields); endpoint != nil || err != nil {
return
}
// ip
if endpoint, err = parseTypeIP(fields); endpoint != nil || err != nil {
return
}
// ip range
if endpoint, err = parseTypeIPRange(fields); endpoint != nil || err != nil {
return
}
// country
if endpoint, err = parseTypeCountry(fields); endpoint != nil || err != nil {
return
}
// continent
if endpoint, err = parseTypeContinent(fields); endpoint != nil || err != nil {
return
}
// asn
if endpoint, err = parseTypeASN(fields); endpoint != nil || err != nil {
return
}
// scopes
if endpoint, err = parseTypeScope(fields); endpoint != nil || err != nil {
return
}
// lists
if endpoint, err = parseTypeList(fields); endpoint != nil || err != nil {
return
}
// domain
if endpoint, err = parseTypeDomain(fields); endpoint != nil || err != nil {
return
}
return nil, fmt.Errorf(`unknown endpoint definition: "%s"`, value)
}

View File

@@ -0,0 +1,99 @@
package endpoints
import (
"strings"
"testing"
)
func TestEndpointParsing(t *testing.T) {
t.Parallel()
// any (basics)
testParsing(t, "- *")
testParsing(t, "+ *")
// domain
testDomainParsing(t, "- *bad*", domainMatchTypeContains, "bad")
testDomainParsing(t, "- bad*", domainMatchTypePrefix, "bad")
testDomainParsing(t, "- *bad.com", domainMatchTypeSuffix, "bad.com.")
testDomainParsing(t, "- .bad.com", domainMatchTypeZone, "bad.com.")
testDomainParsing(t, "- bad.com", domainMatchTypeExact, "bad.com.")
testDomainParsing(t, "- www.bad.com.", domainMatchTypeExact, "www.bad.com.")
testDomainParsing(t, "- www.bad.com", domainMatchTypeExact, "www.bad.com.")
// ip
testParsing(t, "+ 127.0.0.1")
testParsing(t, "+ 192.168.0.1")
testParsing(t, "+ ::1")
testParsing(t, "+ 2606:4700:4700::1111")
// ip
testParsing(t, "+ 127.0.0.0/8")
testParsing(t, "+ 192.168.0.0/24")
testParsing(t, "+ 2606:4700:4700::/48")
// country
testParsing(t, "+ DE")
testParsing(t, "+ AT")
testParsing(t, "+ CH")
testParsing(t, "+ AS")
// asn
testParsing(t, "+ AS1")
testParsing(t, "+ AS12")
testParsing(t, "+ AS123")
testParsing(t, "+ AS1234")
testParsing(t, "+ AS12345")
// network scope
testParsing(t, "+ Localhost")
testParsing(t, "+ LAN")
testParsing(t, "+ Internet")
testParsing(t, "+ Localhost,LAN,Internet")
// protocol and ports
testParsing(t, "+ * TCP/1-1024")
testParsing(t, "+ * */DNS")
testParsing(t, "+ * ICMP")
testParsing(t, "+ * 127")
testParsing(t, "+ * UDP/1234")
testParsing(t, "+ * TCP/HTTP")
testParsing(t, "+ * TCP/80-443")
// TODO: Test fails:
// testParsing(t, "+ 1234")
}
func testParsing(t *testing.T, value string) {
t.Helper()
ep, err := parseEndpoint(value)
if err != nil {
t.Error(err)
return
}
// t.Logf("%T: %+v", ep, ep)
if value != ep.String() {
t.Errorf(`stringified endpoint mismatch: original was "%s", parsed is "%s"`, value, ep.String())
}
}
func testDomainParsing(t *testing.T, value string, matchType uint8, matchValue string) {
t.Helper()
testParsing(t, value)
epGeneric, err := parseTypeDomain(strings.Fields(value))
if err != nil {
t.Error(err)
return
}
ep := epGeneric.(*EndpointDomain) //nolint:forcetypeassert
if ep.MatchType != matchType {
t.Errorf(`error parsing domain endpoint "%s": match type should be %d, was %d`, value, matchType, ep.MatchType)
}
if ep.Domain != matchValue {
t.Errorf(`error parsing domain endpoint "%s": match domain value should be %s, was %s`, value, matchValue, ep.Domain)
}
}

View File

@@ -0,0 +1,149 @@
package endpoints
import (
"context"
"errors"
"fmt"
"strings"
"github.com/safing/portmaster/service/intel"
)
// Endpoints is a list of permitted or denied endpoints.
type Endpoints []Endpoint
// EPResult represents the result of a check against an EndpointPermission.
type EPResult uint8
// Endpoint matching return values.
const (
NoMatch EPResult = iota
MatchError
Denied
Permitted
)
// IsDecision returns true if result represents a decision
// and false if result is NoMatch or Undeterminable.
func IsDecision(result EPResult) bool {
return result == Denied || result == Permitted || result == MatchError
}
// ParseEndpoints parses a list of endpoints and returns a list of Endpoints for matching.
func ParseEndpoints(entries []string) (Endpoints, error) {
var firstErr error
var errCnt int
endpoints := make(Endpoints, 0, len(entries))
entriesLoop:
for _, entry := range entries {
ep, err := parseEndpoint(entry)
if err != nil {
errCnt++
if firstErr == nil {
firstErr = err
}
continue entriesLoop
}
endpoints = append(endpoints, ep)
}
if firstErr != nil {
if errCnt > 0 {
return endpoints, fmt.Errorf("encountered %d errors, first was: %w", errCnt, firstErr)
}
return endpoints, firstErr
}
return endpoints, nil
}
// ListEntryValidationRegex is a regex to bullshit check endpoint list entries.
var ListEntryValidationRegex = strings.Join([]string{
`^(\+|\-) `, // Rule verdict.
`(! +)?`, // Invert matching.
`[A-z0-9\.:\-*/]+`, // Entity matching.
`( `, // Start of optional matching.
`[A-z0-9*]+`, // Protocol matching.
`(/[A-z0-9]+(\-[A-z0-9]+)?)?`, // Port and port range matching.
`)?`, // End of optional matching.
`( +#.*)?`, // Optional comment.
}, "")
// ValidateEndpointListConfigOption validates the given value.
func ValidateEndpointListConfigOption(value interface{}) error {
list, ok := value.([]string)
if !ok {
return errors.New("invalid type")
}
_, err := ParseEndpoints(list)
return err
}
// IsSet returns whether the Endpoints object is "set".
func (e Endpoints) IsSet() bool {
return len(e) > 0
}
// Match checks whether the given entity matches any of the endpoint definitions in the list.
func (e Endpoints) Match(ctx context.Context, entity *intel.Entity) (result EPResult, reason Reason) {
for _, entry := range e {
if entry == nil {
continue
}
if result, reason = entry.Matches(ctx, entity); result != NoMatch {
return
}
}
return NoMatch, nil
}
// MatchMulti checks whether the given entities match any of the endpoint
// definitions in the list. Every rule is evaluated against all given entities
// and only if not match was registered, the next rule is evaluated.
func (e Endpoints) MatchMulti(ctx context.Context, entities ...*intel.Entity) (result EPResult, reason Reason) {
for _, entry := range e {
if entry == nil {
continue
}
for _, entity := range entities {
if entity == nil {
continue
}
if result, reason = entry.Matches(ctx, entity); result != NoMatch {
return
}
}
}
return NoMatch, nil
}
func (e Endpoints) String() string {
s := make([]string, 0, len(e))
for _, entry := range e {
s = append(s, entry.String())
}
return fmt.Sprintf("[%s]", strings.Join(s, ", "))
}
func (epr EPResult) String() string {
switch epr {
case NoMatch:
return "No Match"
case MatchError:
return "Match Error"
case Denied:
return "Denied"
case Permitted:
return "Permitted"
default:
return "Unknown"
}
}

View File

@@ -0,0 +1,432 @@
package endpoints
import (
"context"
"net"
"runtime"
"testing"
"github.com/stretchr/testify/assert"
"github.com/safing/portmaster/service/core/pmtesting"
"github.com/safing/portmaster/service/intel"
)
func TestMain(m *testing.M) {
pmtesting.TestMain(m, intel.Module)
}
func testEndpointMatch(t *testing.T, ep Endpoint, entity *intel.Entity, expectedResult EPResult) {
t.Helper()
result, _ := ep.Matches(context.TODO(), entity)
if result != expectedResult {
t.Errorf(
"line %d: unexpected result for endpoint %s and entity %+v: result=%s, expected=%s",
getLineNumberOfCaller(1),
ep,
entity,
result,
expectedResult,
)
}
}
func testFormat(t *testing.T, endpoint string, shouldSucceed bool) {
t.Helper()
_, err := parseEndpoint(endpoint)
if shouldSucceed {
assert.NoError(t, err)
} else {
assert.Error(t, err)
}
}
func TestEndpointFormat(t *testing.T) {
t.Parallel()
testFormat(t, "+ .", false)
testFormat(t, "+ .at", true)
testFormat(t, "+ .at.", true)
testFormat(t, "+ 1.at", true)
testFormat(t, "+ 1.at.", true)
testFormat(t, "+ 1.f.ix.de.", true)
testFormat(t, "+ *contains*", true)
testFormat(t, "+ *has.suffix", true)
testFormat(t, "+ *.has.suffix", true)
testFormat(t, "+ *has.prefix*", true)
testFormat(t, "+ *has.prefix.*", true)
testFormat(t, "+ .sub.and.prefix.*", false)
testFormat(t, "+ *.sub..and.prefix.*", false)
}
func TestEndpointMatching(t *testing.T) { //nolint:maintidx // TODO
t.Parallel()
// ANY
ep, err := parseEndpoint("+ *")
if err != nil {
t.Fatal(err)
}
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), Permitted)
// DOMAIN
// wildcard domains
ep, err = parseEndpoint("+ *example.com")
if err != nil {
t.Fatal(err)
}
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc.example.com.",
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc-example.com.",
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc.example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc-example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), Permitted)
ep, err = parseEndpoint("+ *.example.com")
if err != nil {
t.Fatal(err)
}
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc.example.com.",
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc-example.com.",
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc.example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc-example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), NoMatch)
ep, err = parseEndpoint("+ .example.com")
if err != nil {
t.Fatal(err)
}
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc.example.com.",
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc-example.com.",
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc.example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc-example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), NoMatch)
ep, err = parseEndpoint("+ example.*")
if err != nil {
t.Fatal(err)
}
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc.example.com.",
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc.example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), NoMatch)
ep, err = parseEndpoint("+ *.exampl*")
if err != nil {
t.Fatal(err)
}
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc.example.com.",
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "abc.example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), Permitted)
ep, err = parseEndpoint("+ *.com.")
if err != nil {
t.Fatal(err)
}
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.org.",
}).Init(0), NoMatch)
// protocol
ep, err = parseEndpoint("+ example.com UDP")
if err != nil {
t.Fatal(err)
}
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 17,
Port: 443,
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
}).Init(0), NoMatch)
// ports
ep, err = parseEndpoint("+ example.com 17/442-444")
if err != nil {
t.Fatal(err)
}
entity := (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 17,
Port: 441,
}).Init(0)
testEndpointMatch(t, ep, entity, NoMatch)
entity.Port = 442
entity.Init(0)
testEndpointMatch(t, ep, entity, Permitted)
entity.Port = 443
entity.Init(0)
testEndpointMatch(t, ep, entity, Permitted)
entity.Port = 444
entity.Init(0)
testEndpointMatch(t, ep, entity, Permitted)
entity.Port = 445
entity.Init(0)
testEndpointMatch(t, ep, entity, NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
}).Init(0), NoMatch)
// IP
ep, err = parseEndpoint("+ 10.2.3.4")
if err != nil {
t.Fatal(err)
}
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.4"),
Protocol: 17,
Port: 443,
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "",
IP: net.ParseIP("10.2.3.3"),
Protocol: 6,
Port: 443,
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
IP: net.ParseIP("10.2.3.5"),
Protocol: 17,
Port: 443,
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
Domain: "example.com.",
}).Init(0), NoMatch)
// IP Range
ep, err = parseEndpoint("+ 10.2.3.0/24")
if err != nil {
t.Fatal(err)
}
testEndpointMatch(t, ep, (&intel.Entity{
IP: net.ParseIP("10.2.2.4"),
}).Init(0), NoMatch)
testEndpointMatch(t, ep, (&intel.Entity{
IP: net.ParseIP("10.2.3.4"),
}).Init(0), Permitted)
testEndpointMatch(t, ep, (&intel.Entity{
IP: net.ParseIP("10.2.4.4"),
}).Init(0), NoMatch)
// Skip test that need the geoip database in CI.
if !testing.Short() {
// ASN
ep, err = parseEndpoint("+ AS15169")
if err != nil {
t.Fatal(err)
}
entity = (&intel.Entity{IP: net.IPv4(8, 8, 8, 8)}).Init(0)
testEndpointMatch(t, ep, entity, Permitted)
entity = (&intel.Entity{IP: net.IPv4(1, 1, 1, 1)}).Init(0)
testEndpointMatch(t, ep, entity, NoMatch)
// Country
ep, err = parseEndpoint("+ AT")
if err != nil {
t.Fatal(err)
}
entity = (&intel.Entity{IP: net.IPv4(194, 232, 104, 1)}).Init(0) // orf.at
testEndpointMatch(t, ep, entity, Permitted)
entity = (&intel.Entity{IP: net.IPv4(151, 101, 1, 164)}).Init(0) // nytimes.com
testEndpointMatch(t, ep, entity, NoMatch)
}
// Scope
ep, err = parseEndpoint("+ Localhost,LAN")
if err != nil {
t.Fatal(err)
}
entity = (&intel.Entity{IP: net.IPv4(192, 168, 0, 1)}).Init(0)
testEndpointMatch(t, ep, entity, Permitted)
entity = (&intel.Entity{IP: net.IPv4(151, 101, 1, 164)}).Init(0) // nytimes.com
testEndpointMatch(t, ep, entity, NoMatch)
// Port with protocol wildcard
ep, err = parseEndpoint("+ * */443")
if err != nil {
t.Fatal(err)
}
entity = &intel.Entity{
Domain: "",
IP: net.ParseIP("10.2.3.4"),
Protocol: 6,
Port: 443,
}
entity.Init(0)
testEndpointMatch(t, ep, entity, Permitted)
// Lists
// Skip test that need the filter lists in CI.
if !testing.Short() {
_, err = parseEndpoint("+ L:A,B,C")
if err != nil {
t.Fatal(err)
}
}
// TODO: write test for lists matcher
}
func getLineNumberOfCaller(levels int) int {
_, _, line, _ := runtime.Caller(levels + 1) //nolint:dogsled
return line
}

View File

@@ -0,0 +1,34 @@
package endpoints
// Reason describes the reason why an endpoint has been
// permitted or blocked.
type Reason interface {
// String should return a human readable string
// describing the decision reason.
String() string
// Context returns the context that was used
// for the decision.
Context() interface{}
}
type reason struct {
description string
Filter string
Value string
Permitted bool
Extra map[string]interface{}
}
func (r *reason) String() string {
prefix := "denied by rule: "
if r.Permitted {
prefix = "allowed by rule: "
}
return prefix + r.description + " " + r.Filter[2:]
}
func (r *reason) Context() interface{} {
return r
}

View File

@@ -0,0 +1,438 @@
package profile
import (
"fmt"
"regexp"
"strings"
"golang.org/x/exp/slices"
"github.com/safing/jess/lhash"
"github.com/safing/portbase/container"
)
// # Matching and Scores
//
// There are three levels:
//
// 1. Type: What matched?
// 1. Tag: 50.000 points
// 2. Cmdline: 40.000 points
// 3. Env: 30.000 points
// 4. MatchingPath: 20.000 points
// 5. Path: 10.000 points
// 2. Operation: How was it mached?
// 1. Equals: 3.000 points
// 2. Prefix: 2.000 points
// 3. Regex: 1.000 points
// 3. How "strong" was the match?
// 1. Equals: Length of path (irrelevant)
// 2. Prefix: Length of prefix
// 3. Regex: Length of match
// Fingerprint Type IDs.
const (
FingerprintTypeTagID = "tag"
FingerprintTypeCmdlineID = "cmdline"
FingerprintTypeEnvID = "env"
FingerprintTypePathID = "path" // Matches both MatchingPath and Path.
FingerprintOperationEqualsID = "equals"
FingerprintOperationPrefixID = "prefix"
FingerprintOperationRegexID = "regex"
tagMatchBaseScore = 50_000
cmdlineMatchBaseScore = 40_000
envMatchBaseScore = 30_000
matchingPathMatchBaseScore = 20_000
pathMatchBaseScore = 10_000
fingerprintEqualsBaseScore = 3_000
fingerprintPrefixBaseScore = 2_000
fingerprintRegexBaseScore = 1_000
maxMatchStrength = 499
)
type (
// Fingerprint defines a way of matching a process.
// The Key is only valid - but required - for some types.
Fingerprint struct {
Type string
Key string // Key must always fully match.
Operation string
Value string
// MergedFrom holds the ID of the profile from which this fingerprint was
// merged from. The merged profile should create a new profile ID derived
// from the new fingerprints and add all fingerprints with this field set
// to the originating profile ID
MergedFrom string // `json:"mergedFrom,omitempty"`
}
// Tag represents a simple key/value kind of tag used in process metadata
// and fingerprints.
Tag struct {
Key string
Value string
}
// MatchingData is an interface to fetching data in the matching process.
MatchingData interface {
Tags() []Tag
Env() map[string]string
Path() string
MatchingPath() string
Cmdline() string
}
matchingFingerprint interface {
MatchesKey(key string) bool
Match(value string) (score int)
}
)
// MatchesKey returns whether the optional fingerprint key (for some types
// only) matches the given key.
func (fp Fingerprint) MatchesKey(key string) bool {
return key == fp.Key
}
// KeyInTags checks is the given key is in the tags.
func KeyInTags(tags []Tag, key string) bool {
for _, tag := range tags {
if key == tag.Key {
return true
}
}
return false
}
// KeyAndValueInTags checks is the given key/value pair is in the tags.
func KeyAndValueInTags(tags []Tag, key, value string) bool {
for _, tag := range tags {
if key == tag.Key && value == tag.Value {
return true
}
}
return false
}
type fingerprintEquals struct {
Fingerprint
}
func (fp fingerprintEquals) Match(value string) (score int) {
if value == fp.Value {
return fingerprintEqualsBaseScore + checkMatchStrength(len(fp.Value))
}
return 0
}
type fingerprintPrefix struct {
Fingerprint
}
func (fp fingerprintPrefix) Match(value string) (score int) {
if strings.HasPrefix(value, fp.Value) {
return fingerprintPrefixBaseScore + checkMatchStrength(len(fp.Value))
}
return 0
}
type fingerprintRegex struct {
Fingerprint
regex *regexp.Regexp
}
func (fp fingerprintRegex) Match(value string) (score int) {
// Find best match.
for _, match := range fp.regex.FindAllString(value, -1) {
// Save match length if higher than score.
// This will also ignore empty matches.
if len(match) > score {
score = len(match)
}
}
// Add base score and return if anything was found.
if score > 0 {
return fingerprintRegexBaseScore + checkMatchStrength(score)
}
return 0
}
// ParsedFingerprints holds parsed fingerprints for fast usage.
type ParsedFingerprints struct {
tagPrints []matchingFingerprint
envPrints []matchingFingerprint
pathPrints []matchingFingerprint
cmdlinePrints []matchingFingerprint
}
// ParseFingerprints parses the fingerprints to make them ready for matching.
func ParseFingerprints(raw []Fingerprint, deprecatedLinkedPath string) (parsed *ParsedFingerprints, firstErr error) {
parsed = &ParsedFingerprints{}
// Add deprecated LinkedPath to fingerprints, if they are empty.
// TODO: Remove in v1.5
if len(raw) == 0 && deprecatedLinkedPath != "" {
parsed.pathPrints = append(parsed.pathPrints, &fingerprintEquals{
Fingerprint: Fingerprint{
Type: FingerprintTypePathID,
Operation: FingerprintOperationEqualsID,
Value: deprecatedLinkedPath,
},
})
}
// Parse all fingerprints.
// Do not fail when one fails, instead return the first encountered error.
for _, entry := range raw {
// Check type and required key.
switch entry.Type {
case FingerprintTypeTagID, FingerprintTypeEnvID:
if entry.Key == "" {
if firstErr == nil {
firstErr = fmt.Errorf("%s fingerprint is missing key", entry.Type)
}
continue
}
case FingerprintTypePathID, FingerprintTypeCmdlineID:
// Don't need a key.
default:
// Unknown type.
if firstErr == nil {
firstErr = fmt.Errorf("unknown fingerprint type: %q", entry.Type)
}
continue
}
// Create and/or collect operation match functions.
switch entry.Operation {
case FingerprintOperationEqualsID:
parsed.addMatchingFingerprint(entry, fingerprintEquals{entry})
case FingerprintOperationPrefixID:
parsed.addMatchingFingerprint(entry, fingerprintPrefix{entry})
case FingerprintOperationRegexID:
regex, err := regexp.Compile(entry.Value)
if err != nil {
if firstErr == nil {
firstErr = fmt.Errorf("failed to compile regex fingerprint: %s", entry.Value)
}
} else {
parsed.addMatchingFingerprint(entry, fingerprintRegex{
Fingerprint: entry,
regex: regex,
})
}
default:
if firstErr == nil {
firstErr = fmt.Errorf("unknown fingerprint operation: %q", entry.Operation)
}
}
}
return parsed, firstErr
}
func (parsed *ParsedFingerprints) addMatchingFingerprint(fp Fingerprint, matchingPrint matchingFingerprint) {
switch fp.Type {
case FingerprintTypeTagID:
parsed.tagPrints = append(parsed.tagPrints, matchingPrint)
case FingerprintTypeEnvID:
parsed.envPrints = append(parsed.envPrints, matchingPrint)
case FingerprintTypePathID:
parsed.pathPrints = append(parsed.pathPrints, matchingPrint)
case FingerprintTypeCmdlineID:
parsed.cmdlinePrints = append(parsed.cmdlinePrints, matchingPrint)
default:
// This should never happen, as the types are checked already.
panic(fmt.Sprintf("unknown fingerprint type: %q", fp.Type))
}
}
// MatchFingerprints returns the highest matching score of the given
// fingerprints and matching data.
func MatchFingerprints(prints *ParsedFingerprints, md MatchingData) (highestScore int) {
// Check tags.
tags := md.Tags()
if len(tags) > 0 {
for _, tagPrint := range prints.tagPrints {
for _, tag := range tags {
// Check if tag key matches.
if !tagPrint.MatchesKey(tag.Key) {
continue
}
// Try matching the tag value.
score := tagPrint.Match(tag.Value)
if score > highestScore {
highestScore = score
}
}
}
// If something matched, add base score and return.
if highestScore > 0 {
return tagMatchBaseScore + highestScore
}
}
// Check cmdline.
cmdline := md.Cmdline()
if cmdline != "" {
for _, cmdlinePrint := range prints.cmdlinePrints {
if score := cmdlinePrint.Match(cmdline); score > highestScore {
highestScore = score
}
}
if highestScore > 0 {
return cmdlineMatchBaseScore + highestScore
}
}
// Check env.
for _, envPrint := range prints.envPrints {
for key, value := range md.Env() {
// Check if env key matches.
if !envPrint.MatchesKey(key) {
continue
}
// Try matching the env value.
score := envPrint.Match(value)
if score > highestScore {
highestScore = score
}
}
}
// If something matched, add base score and return.
if highestScore > 0 {
return envMatchBaseScore + highestScore
}
// Check matching path.
matchingPath := md.MatchingPath()
if matchingPath != "" {
for _, pathPrint := range prints.pathPrints {
// Try matching the path value.
score := pathPrint.Match(matchingPath)
if score > highestScore {
highestScore = score
}
}
// If something matched, add base score and return.
if highestScore > 0 {
return matchingPathMatchBaseScore + highestScore
}
}
// Check path.
path := md.Path()
if path != "" {
for _, pathPrint := range prints.pathPrints {
// Try matching the path value.
score := pathPrint.Match(path)
if score > highestScore {
highestScore = score
}
}
// If something matched, add base score and return.
if highestScore > 0 {
return pathMatchBaseScore + highestScore
}
}
// Nothing matched.
return 0
}
func checkMatchStrength(value int) int {
if value > maxMatchStrength {
return maxMatchStrength
}
if value < -maxMatchStrength {
return -maxMatchStrength
}
return value
}
const (
deriveFPKeyIDForItemStart = iota + 1
deriveFPKeyIDForType
deriveFPKeyIDForKey
deriveFPKeyIDForOperation
deriveFPKeyIDForValue
)
// DeriveProfileID derives a profile ID from the given fingerprints.
func DeriveProfileID(fps []Fingerprint) string {
// Sort the fingerprints.
sortAndCompactFingerprints(fps)
// Compile data for hashing.
c := container.New(nil)
c.AppendInt(len(fps))
for _, fp := range fps {
c.AppendNumber(deriveFPKeyIDForItemStart)
if fp.Type != "" {
c.AppendNumber(deriveFPKeyIDForType)
c.AppendAsBlock([]byte(fp.Type))
}
if fp.Key != "" {
c.AppendNumber(deriveFPKeyIDForKey)
c.AppendAsBlock([]byte(fp.Key))
}
if fp.Operation != "" {
c.AppendNumber(deriveFPKeyIDForOperation)
c.AppendAsBlock([]byte(fp.Operation))
}
if fp.Value != "" {
c.AppendNumber(deriveFPKeyIDForValue)
c.AppendAsBlock([]byte(fp.Value))
}
}
// Hash and return.
h := lhash.Digest(lhash.SHA3_256, c.CompileData())
return h.Base58()
}
func sortAndCompactFingerprints(fps []Fingerprint) []Fingerprint {
// Sort.
slices.SortFunc[[]Fingerprint, Fingerprint](fps, func(a, b Fingerprint) int {
switch {
case a.Type != b.Type:
return strings.Compare(a.Type, b.Type)
case a.Key != b.Key:
return strings.Compare(a.Key, b.Key)
case a.Operation != b.Operation:
return strings.Compare(a.Operation, b.Operation)
case a.Value != b.Value:
return strings.Compare(a.Value, b.Value)
case a.MergedFrom != b.MergedFrom:
return strings.Compare(a.MergedFrom, b.MergedFrom)
default:
return 0
}
})
// De-duplicate.
// Important: Even if the fingerprint is the same, but MergedFrom is
// different, we need to keep the separate fingerprint, so that new installs
// will cleanly update to the synced state: Auto-generated profiles need to
// be automatically replaced by the merged version.
fps = slices.CompactFunc[[]Fingerprint, Fingerprint](fps, func(a, b Fingerprint) bool {
return a.Type == b.Type &&
a.Key == b.Key &&
a.Operation == b.Operation &&
a.Value == b.Value &&
a.MergedFrom == b.MergedFrom
})
return fps
}

View File

@@ -0,0 +1,53 @@
package profile
import (
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestDeriveProfileID(t *testing.T) {
t.Parallel()
fps := []Fingerprint{
{
Type: FingerprintTypePathID,
Operation: FingerprintOperationEqualsID,
Value: "/sbin/init",
},
{
Type: FingerprintTypePathID,
Operation: FingerprintOperationPrefixID,
Value: "/",
},
{
Type: FingerprintTypeEnvID,
Key: "PORTMASTER_PROFILE",
Operation: FingerprintOperationEqualsID,
Value: "TEST-1",
},
{
Type: FingerprintTypeTagID,
Key: "tag-key-1",
Operation: FingerprintOperationEqualsID,
Value: "tag-key-2",
},
}
// Create rand source for shuffling.
rnd := rand.New(rand.NewSource(time.Now().UnixNano())) //nolint:gosec
// Test 100 times.
for i := 0; i < 100; i++ {
// Shuffle fingerprints.
rnd.Shuffle(len(fps), func(i, j int) {
fps[i], fps[j] = fps[j], fps[i]
})
// Check if fingerprint matches.
id := DeriveProfileID(fps)
assert.Equal(t, "PTSRP7rdCnmvdjRoPMTrtjj7qk7PxR1a9YdBWUGwnZXJh2", id)
}
}

View File

@@ -0,0 +1,76 @@
package profile
// DEACTIVATED
// import (
// "fmt"
// "os"
// "path/filepath"
// "regexp"
// "strings"
//
// "github.com/safing/portbase/log"
// )
//
// type Framework struct {
// // go hirarchy up
// FindParent uint8 `json:",omitempty bson:",omitempty"`
// // get path from parent, amount of levels to go up the tree (1 means parent, 2 means parent of parents, and so on)
// MergeWithParent bool `json:",omitempty bson:",omitempty"`
// // instead of getting the path of the parent, merge with it by presenting connections as if they were from that parent
//
// // go hirarchy down
// Find string `json:",omitempty bson:",omitempty"`
// // Regular expression for finding path elements
// Build string `json:",omitempty bson:",omitempty"`
// // Path definitions for building path
// Virtual bool `json:",omitempty bson:",omitempty"`
// // Treat resulting path as virtual, do not check if valid
// }
//
// func (f *Framework) GetNewPath(command string, cwd string) (string, error) {
// // "/usr/bin/python script"
// // to
// // "/path/to/script"
// regex, err := regexp.Compile(f.Find)
// if err != nil {
// return "", fmt.Errorf("profiles(framework): failed to compile framework regex: %s", err)
// }
// matched := regex.FindAllStringSubmatch(command, -1)
// if len(matched) == 0 || len(matched[0]) < 2 {
// return "", fmt.Errorf("profiles(framework): regex \"%s\" for constructing path did not match command \"%s\"", f.Find, command)
// }
//
// var lastError error
// var buildPath string
// for _, buildPath = range strings.Split(f.Build, "|") {
//
// buildPath = strings.Replace(buildPath, "{CWD}", cwd, -1)
// for i := 1; i < len(matched[0]); i++ {
// buildPath = strings.Replace(buildPath, fmt.Sprintf("{%d}", i), matched[0][i], -1)
// }
//
// buildPath = filepath.Clean(buildPath)
//
// if !f.Virtual {
// if !strings.HasPrefix(buildPath, "~/") && !filepath.IsAbs(buildPath) {
// lastError = fmt.Errorf("constructed path \"%s\" from framework is not absolute", buildPath)
// continue
// }
// if _, err := os.Stat(buildPath); errors.Is(err, fs.ErrNotExist) {
// lastError = fmt.Errorf("constructed path \"%s\" does not exist", buildPath)
// continue
// }
// }
//
// lastError = nil
// break
//
// }
//
// if lastError != nil {
// return "", fmt.Errorf("profiles(framework): failed to construct valid path, last error: %s", lastError)
// }
// log.Tracef("profiles(framework): transformed \"%s\" (%s) to \"%s\"", command, cwd, buildPath)
// return buildPath, nil
// }

View File

@@ -0,0 +1,30 @@
package profile
// DEACTIVATED
// import (
// "testing"
// )
//
// func testGetNewPath(t *testing.T, f *Framework, command, cwd, expect string) {
// newPath, err := f.GetNewPath(command, cwd)
// if err != nil {
// t.Errorf("GetNewPath failed: %s", err)
// }
// if newPath != expect {
// t.Errorf("GetNewPath return unexpected result: got %s, expected %s", newPath, expect)
// }
// }
//
// func TestFramework(t *testing.T) {
// f1 := &Framework{
// Find: "([^ ]+)$",
// Build: "{CWD}/{1}",
// }
// testGetNewPath(t, f1, "/usr/bin/python bash", "/bin", "/bin/bash")
// f2 := &Framework{
// Find: "([^ ]+)$",
// Build: "{1}|{CWD}/{1}",
// }
// testGetNewPath(t, f2, "/usr/bin/python /bin/bash", "/tmp", "/bin/bash")
// }

345
service/profile/get.go Normal file
View File

@@ -0,0 +1,345 @@
package profile
import (
"context"
"errors"
"fmt"
"path"
"strings"
"sync"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/log"
"github.com/safing/portbase/notifications"
)
var getProfileLock sync.Mutex
// GetLocalProfile fetches a profile. This function ensures that the loaded profile
// is shared among all callers. Always provide all available data points.
// Passing an ID without MatchingData is valid, but could lead to inconsistent
// data - use with caution.
func GetLocalProfile(id string, md MatchingData, createProfileCallback func() *Profile) ( //nolint:gocognit
profile *Profile,
err error,
) {
// Globally lock getting a profile.
// This does not happen too often, and it ensures we really have integrity
// and no race conditions.
getProfileLock.Lock()
defer getProfileLock.Unlock()
var previousVersion *Profile
// Get active profile based on the ID, if available.
if id != "" {
// Check if there already is an active profile.
profile = getActiveProfile(MakeScopedID(SourceLocal, id))
if profile != nil {
// Mark active and return if not outdated.
if profile.outdated.IsNotSet() {
profile.MarkStillActive()
return profile, nil
}
// If outdated, get from database.
previousVersion = profile
profile = nil
}
}
// In some cases, we might need to get a profile directly, without matching data.
// This could lead to inconsistent data - use with caution.
// Example: Saving prompt results to profile should always be to the same ID!
if md == nil {
if id == "" {
return nil, errors.New("cannot get local profiles without ID and matching data")
}
profile, err = getProfile(MakeScopedID(SourceLocal, id))
if err != nil {
return nil, fmt.Errorf("failed to load profile %s by ID: %w", MakeScopedID(SourceLocal, id), err)
}
}
// Check if we are requesting a special profile.
var created, special bool
if id != "" && isSpecialProfileID(id) {
special = true
// Get special profile from DB.
if profile == nil {
profile, err = getProfile(MakeScopedID(SourceLocal, id))
if err != nil && !errors.Is(err, database.ErrNotFound) {
log.Warningf("profile: failed to get special profile %s: %s", id, err)
}
}
// Create profile if not found or if it needs a reset.
if profile == nil || specialProfileNeedsReset(profile) {
profile = createSpecialProfile(id, md.Path())
created = true
}
}
// If we don't have a profile yet, find profile based on matching data.
if profile == nil {
profile, err = findProfile(SourceLocal, md)
if err != nil {
return nil, fmt.Errorf("failed to search for profile: %w", err)
}
}
// If we still don't have a profile, create a new one.
if profile == nil {
created = true
// Try the profile creation callback, if we have one.
if createProfileCallback != nil {
profile = createProfileCallback()
}
// If that did not work, create a standard profile.
if profile == nil {
fpPath := md.MatchingPath()
if fpPath == "" {
fpPath = md.Path()
}
profile = New(&Profile{
ID: id,
Source: SourceLocal,
PresentationPath: md.Path(),
UsePresentationPath: true,
Fingerprints: []Fingerprint{
{
Type: FingerprintTypePathID,
Operation: FingerprintOperationEqualsID,
Value: fpPath,
},
},
})
}
}
// Initialize and update profile.
// Update metadata.
var changed bool
if md != nil {
if special {
changed = updateSpecialProfileMetadata(profile, md.Path())
} else {
changed = profile.updateMetadata(md.Path())
}
}
// Save if created or changed.
if created || changed {
// Save profile.
err := profile.Save()
if err != nil {
log.Warningf("profile: failed to save profile %s after creation: %s", profile.ScopedID(), err)
}
}
// Trigger further metadata fetching from system if profile was created.
if created && profile.UsePresentationPath && !special {
module.StartWorker("get profile metadata", func(ctx context.Context) error {
return profile.updateMetadataFromSystem(ctx, md)
})
}
// Prepare profile for first use.
// Process profiles are coming directly from the database or are new.
// As we don't use any caching, these will be new objects.
// Add a layeredProfile.
// If we are refetching, assign the layered profile from the previous version.
// The internal references will be updated when the layered profile checks for updates.
if previousVersion != nil && previousVersion.layeredProfile != nil {
profile.layeredProfile = previousVersion.layeredProfile
}
// Profiles must have a layered profile, create a new one if it
// does not yet exist.
if profile.layeredProfile == nil {
profile.layeredProfile = NewLayeredProfile(profile)
}
// Add the profile to the currently active profiles.
addActiveProfile(profile)
return profile, nil
}
// getProfile fetches the profile for the given scoped ID.
func getProfile(scopedID string) (profile *Profile, err error) {
// Get profile from the database.
r, err := profileDB.Get(ProfilesDBPath + scopedID)
if err != nil {
return nil, err
}
// Parse and prepare the profile, return the result.
return loadProfile(r)
}
// findProfile searches for a profile with the given linked path. If it cannot
// find one, it will create a new profile for the given linked path.
func findProfile(source ProfileSource, md MatchingData) (profile *Profile, err error) {
// TODO: Loading every profile from database and parsing it for every new
// process might be quite expensive. Measure impact and possibly improve.
// Get iterator over all profiles.
it, err := profileDB.Query(query.New(ProfilesDBPath + MakeScopedID(source, "")))
if err != nil {
return nil, fmt.Errorf("failed to query for profiles: %w", err)
}
// Find best matching profile.
var (
highestScore int
bestMatch record.Record
)
profileFeed:
for r := range it.Next {
// Parse fingerprints.
prints, err := loadProfileFingerprints(r)
if err != nil {
log.Debugf("profile: failed to load fingerprints of %s: %s", r.Key(), err)
}
// Continue with any returned fingerprints.
if prints == nil {
continue profileFeed
}
// Get matching score and compare.
score := MatchFingerprints(prints, md)
switch {
case score == 0:
// Continue to next.
case score > highestScore:
highestScore = score
bestMatch = r
case score == highestScore:
// Notify user of conflict and abort.
// Use first match - this should be consistent.
notifyConflictingProfiles(bestMatch, r, md)
it.Cancel()
break profileFeed
}
}
// Check if there was an error while iterating.
if it.Err() != nil {
return nil, fmt.Errorf("failed to iterate over profiles: %w", err)
}
// Return nothing if no profile matched.
if bestMatch == nil {
return nil, nil
}
// If we have a match, parse and return the profile.
profile, err = loadProfile(bestMatch)
if err != nil {
return nil, fmt.Errorf("failed to parse selected profile %s: %w", bestMatch.Key(), err)
}
// Check if this profile is already active and return the active version instead.
if activeProfile := getActiveProfile(profile.ScopedID()); activeProfile != nil && !activeProfile.IsOutdated() {
return activeProfile, nil
}
// Return nothing if no profile matched.
return profile, nil
}
func loadProfileFingerprints(r record.Record) (parsed *ParsedFingerprints, err error) {
// Ensure it's a profile.
profile, err := EnsureProfile(r)
if err != nil {
return nil, err
}
// Parse and return fingerprints.
return ParseFingerprints(profile.Fingerprints, profile.LinkedPath)
}
func loadProfile(r record.Record) (*Profile, error) {
// ensure its a profile
profile, err := EnsureProfile(r)
if err != nil {
return nil, err
}
// prepare profile
profile.prepProfile()
// parse config
err = profile.parseConfig()
if err != nil {
log.Errorf("profiles: profile %s has (partly) invalid configuration: %s", profile.ID, err)
}
// Set saved internally to suppress outdating profiles if saving internally.
profile.savedInternally = true
// Mark as recently seen.
meta.UpdateLastSeen(profile.ScopedID())
// return parsed profile
return profile, nil
}
func notifyConflictingProfiles(a, b record.Record, md MatchingData) {
// Get profile names.
var idA, nameA, idB, nameB string
profileA, err := EnsureProfile(a)
if err == nil {
idA = profileA.ScopedID()
nameA = profileA.Name
} else {
idA = strings.TrimPrefix(a.Key(), ProfilesDBPath)
nameA = path.Base(idA)
}
profileB, err := EnsureProfile(b)
if err == nil {
idB = profileB.ScopedID()
nameB = profileB.Name
} else {
idB = strings.TrimPrefix(b.Key(), ProfilesDBPath)
nameB = path.Base(idB)
}
// Notify user about conflict.
notifications.NotifyWarn(
fmt.Sprintf("profiles:match-conflict:%s:%s", idA, idB),
"App Settings Match Conflict",
fmt.Sprintf(
"Multiple app settings match the app at %q with the same priority, please change on of them: %q or %q",
md.Path(),
nameA,
nameB,
),
notifications.Action{
Text: "Change (1)",
Type: notifications.ActionTypeOpenProfile,
Payload: idA,
},
notifications.Action{
Text: "Change (2)",
Type: notifications.ActionTypeOpenProfile,
Payload: idB,
},
notifications.Action{
ID: "ack",
Text: "OK",
},
)
}

104
service/profile/merge.go Normal file
View File

@@ -0,0 +1,104 @@
package profile
import (
"errors"
"fmt"
"sync"
"time"
"github.com/safing/portbase/database/record"
"github.com/safing/portmaster/service/profile/binmeta"
)
// MergeProfiles merges multiple profiles into a new one.
// The new profile is saved and returned.
// Only the icon and fingerprints are inherited from other profiles.
// All other information is taken only from the primary profile.
func MergeProfiles(name string, primary *Profile, secondaries ...*Profile) (newProfile *Profile, err error) {
if primary == nil || len(secondaries) == 0 {
return nil, errors.New("must supply both a primary and at least one secondary profile for merging")
}
// Fill info from primary profile.
nowUnix := time.Now().Unix()
newProfile = &Profile{
Base: record.Base{},
RWMutex: sync.RWMutex{},
ID: "", // Omit ID to derive it from the new fingerprints.
Source: primary.Source,
Name: name,
Description: primary.Description,
Homepage: primary.Homepage,
UsePresentationPath: false, // Disable presentation path.
Config: primary.Config,
Created: nowUnix,
}
// Fall back to name of primary profile, if none is set.
if newProfile.Name == "" {
newProfile.Name = primary.Name
}
// If any profile was edited, set LastEdited to now.
if primary.LastEdited > 0 {
newProfile.LastEdited = nowUnix
} else {
for _, sp := range secondaries {
if sp.LastEdited > 0 {
newProfile.LastEdited = nowUnix
break
}
}
}
// Collect all icons.
newProfile.Icons = make([]binmeta.Icon, 0, len(secondaries)+1) // Guess the needed space.
newProfile.Icons = append(newProfile.Icons, primary.Icons...)
for _, sp := range secondaries {
newProfile.Icons = append(newProfile.Icons, sp.Icons...)
}
newProfile.Icons = binmeta.SortAndCompactIcons(newProfile.Icons)
// Collect all fingerprints.
newProfile.Fingerprints = make([]Fingerprint, 0, len(primary.Fingerprints)+len(secondaries)) // Guess the needed space.
newProfile.Fingerprints = addFingerprints(newProfile.Fingerprints, primary.Fingerprints, primary.ScopedID())
for _, sp := range secondaries {
newProfile.Fingerprints = addFingerprints(newProfile.Fingerprints, sp.Fingerprints, sp.ScopedID())
}
newProfile.Fingerprints = sortAndCompactFingerprints(newProfile.Fingerprints)
// Save new profile.
newProfile = New(newProfile)
if err := newProfile.Save(); err != nil {
return nil, fmt.Errorf("failed to save merged profile: %w", err)
}
// Delete all previous profiles.
if err := primary.delete(); err != nil {
return nil, fmt.Errorf("failed to delete primary profile %s: %w", primary.ScopedID(), err)
}
module.TriggerEvent(MigratedEvent, []string{primary.ScopedID(), newProfile.ScopedID()})
for _, sp := range secondaries {
if err := sp.delete(); err != nil {
return nil, fmt.Errorf("failed to delete secondary profile %s: %w", sp.ScopedID(), err)
}
module.TriggerEvent(MigratedEvent, []string{sp.ScopedID(), newProfile.ScopedID()})
}
return newProfile, nil
}
func addFingerprints(existing, add []Fingerprint, from string) []Fingerprint {
// Copy all fingerprints and add the they are from.
for _, addFP := range add {
existing = append(existing, Fingerprint{
Type: addFP.Type,
Key: addFP.Key,
Operation: addFP.Operation,
Value: addFP.Value,
MergedFrom: from,
})
}
return existing
}

184
service/profile/meta.go Normal file
View File

@@ -0,0 +1,184 @@
package profile
import (
"fmt"
"sync"
"time"
"github.com/safing/portbase/database/record"
)
// ProfilesMetadata holds metadata about all profiles that are not fit to be
// stored with the profiles themselves.
type ProfilesMetadata struct {
record.Base
sync.Mutex
States map[string]*MetaState
}
// MetaState describes the state of a profile.
type MetaState struct {
State string
At time.Time
}
// Profile metadata states.
const (
MetaStateSeen = "seen"
MetaStateDeleted = "deleted"
)
// EnsureProfilesMetadata ensures that the given record is a *ProfilesMetadata, and returns it.
func EnsureProfilesMetadata(r record.Record) (*ProfilesMetadata, error) {
// unwrap
if r.IsWrapped() {
// only allocate a new struct, if we need it
newMeta := &ProfilesMetadata{}
err := record.Unwrap(r, newMeta)
if err != nil {
return nil, err
}
return newMeta, nil
}
// or adjust type
newMeta, ok := r.(*ProfilesMetadata)
if !ok {
return nil, fmt.Errorf("record not of type *Profile, but %T", r)
}
return newMeta, nil
}
var (
profilesMetadataKey = "core:profile-states"
meta *ProfilesMetadata
removeDeletedEntriesAfter = 30 * 24 * time.Hour
)
// loadProfilesMetadata loads the profile metadata from the database.
// It may only be called during module starting, as there is no lock for "meta" itself.
func loadProfilesMetadata() error {
r, err := profileDB.Get(profilesMetadataKey)
if err != nil {
return err
}
loadedMeta, err := EnsureProfilesMetadata(r)
if err != nil {
return err
}
// Set package variable.
meta = loadedMeta
return nil
}
func (meta *ProfilesMetadata) check() {
if meta.States == nil {
meta.States = make(map[string]*MetaState)
}
}
// Save saves the profile metadata to the database.
func (meta *ProfilesMetadata) Save() error {
if meta == nil {
return nil
}
func() {
meta.Lock()
defer meta.Unlock()
if !meta.KeyIsSet() {
meta.SetKey(profilesMetadataKey)
}
}()
meta.Clean()
return profileDB.Put(meta)
}
// Clean removes old entries.
func (meta *ProfilesMetadata) Clean() {
if meta == nil {
return
}
meta.Lock()
defer meta.Unlock()
for key, state := range meta.States {
switch {
case state == nil:
delete(meta.States, key)
case state.State != MetaStateDeleted:
continue
case time.Since(state.At) > removeDeletedEntriesAfter:
delete(meta.States, key)
}
}
}
// GetLastSeen returns when the profile with the given ID was last seen.
func (meta *ProfilesMetadata) GetLastSeen(scopedID string) *time.Time {
if meta == nil {
return nil
}
meta.Lock()
defer meta.Unlock()
state := meta.States[scopedID]
switch {
case state == nil:
return nil
case state.State == MetaStateSeen:
return &state.At
default:
return nil
}
}
// UpdateLastSeen sets the profile with the given ID as last seen now.
func (meta *ProfilesMetadata) UpdateLastSeen(scopedID string) {
if meta == nil {
return
}
meta.Lock()
defer meta.Unlock()
meta.States[scopedID] = &MetaState{
State: MetaStateSeen,
At: time.Now().UTC(),
}
}
// MarkDeleted marks the profile with the given ID as deleted.
func (meta *ProfilesMetadata) MarkDeleted(scopedID string) {
if meta == nil {
return
}
meta.Lock()
defer meta.Unlock()
meta.States[scopedID] = &MetaState{
State: MetaStateDeleted,
At: time.Now().UTC(),
}
}
// RemoveState removes any state of the profile with the given ID.
func (meta *ProfilesMetadata) RemoveState(scopedID string) {
if meta == nil {
return
}
meta.Lock()
defer meta.Unlock()
delete(meta.States, scopedID)
}

View File

@@ -0,0 +1,229 @@
package profile
import (
"context"
"fmt"
"regexp"
"github.com/hashicorp/go-version"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/migration"
"github.com/safing/portbase/database/query"
"github.com/safing/portbase/log"
"github.com/safing/portmaster/service/profile/binmeta"
)
func registerMigrations() error {
return migrations.Add(
migration.Migration{
Description: "Migrate from LinkedPath to Fingerprints and PresentationPath",
Version: "v0.9.9",
MigrateFunc: migrateLinkedPath,
},
migration.Migration{
Description: "Migrate from Icon Fields to Icon List",
Version: "v1.4.7",
MigrateFunc: migrateIcons,
},
migration.Migration{
Description: "Migrate from random profile IDs to fingerprint-derived IDs",
Version: "v1.6.3", // Re-run after mixed results in v1.6.0
MigrateFunc: migrateToDerivedIDs,
},
)
}
func migrateLinkedPath(ctx context.Context, _, to *version.Version, db *database.Interface) error {
// Get iterator over all profiles.
it, err := db.Query(query.New(ProfilesDBPath))
if err != nil {
log.Tracer(ctx).Errorf("profile: failed to migrate from linked path: failed to start query: %s", err)
return nil
}
// Migrate all profiles.
for r := range it.Next {
// Parse profile.
profile, err := EnsureProfile(r)
if err != nil {
log.Tracer(ctx).Debugf("profiles: failed to parse profile %s for migration: %s", r.Key(), err)
continue
}
// Skip if there is no LinkedPath to migrate from.
if profile.LinkedPath == "" {
continue
}
// Update metadata and save if changed.
if profile.updateMetadata("") {
err = db.Put(profile)
if err != nil {
log.Tracer(ctx).Debugf("profiles: failed to save profile %s after migration: %s", r.Key(), err)
} else {
log.Tracer(ctx).Tracef("profiles: migrated profile %s to %s", r.Key(), to)
}
}
}
// Check if there was an error while iterating.
if err := it.Err(); err != nil {
log.Tracer(ctx).Errorf("profile: failed to migrate from linked path: failed to iterate over profiles for migration: %s", err)
}
return nil
}
func migrateIcons(ctx context.Context, _, to *version.Version, db *database.Interface) error {
// Get iterator over all profiles.
it, err := db.Query(query.New(ProfilesDBPath))
if err != nil {
log.Tracer(ctx).Errorf("profile: failed to migrate from icon fields: failed to start query: %s", err)
return nil
}
// Migrate all profiles.
var (
lastErr error
failed int
total int
)
for r := range it.Next {
// Parse profile.
profile, err := EnsureProfile(r)
if err != nil {
log.Tracer(ctx).Debugf("profiles: failed to parse profile %s for migration: %s", r.Key(), err)
continue
}
// Skip if there is no (valid) icon defined or the icon list is already populated.
if profile.Icon == "" || profile.IconType == "" || len(profile.Icons) > 0 {
continue
}
// Migrate to icon list.
profile.Icons = []binmeta.Icon{{
Type: profile.IconType,
Value: profile.Icon,
}}
// Save back to DB.
err = db.Put(profile)
if err != nil {
failed++
lastErr = err
log.Tracer(ctx).Debugf("profiles: failed to save profile %s after migration: %s", r.Key(), err)
} else {
log.Tracer(ctx).Tracef("profiles: migrated profile %s to %s", r.Key(), to)
}
total++
}
// Check if there was an error while iterating.
if err := it.Err(); err != nil {
log.Tracer(ctx).Errorf("profile: failed to migrate from icon fields: failed to iterate over profiles for migration: %s", err)
}
// Log migration failure and try again next time.
if lastErr != nil {
// Normally, an icon migration would not be such a big error, but this is a test
// run for the profile IDs and we absolutely need to know if anything went wrong.
module.Error(
"migration-failed",
"Profile Migration Failed",
fmt.Sprintf("Failed to migrate icons of %d profiles (out of %d pending). The last error was: %s\n\nPlease restart Portmaster to try the migration again.", failed, total, lastErr),
)
return fmt.Errorf("failed to migrate %d profiles (out of %d pending) - last error: %w", failed, total, lastErr)
}
return lastErr
}
var randomUUIDRegex = regexp.MustCompile(`^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$`)
func migrateToDerivedIDs(ctx context.Context, _, to *version.Version, db *database.Interface) error {
var profilesToDelete []string //nolint:prealloc // We don't know how many profiles there are.
// Get iterator over all profiles.
it, err := db.Query(query.New(ProfilesDBPath))
if err != nil {
log.Tracer(ctx).Errorf("profile: failed to migrate to derived profile IDs: failed to start query: %s", err)
return nil
}
// Migrate all profiles.
var (
lastErr error
failed int
total int
)
for r := range it.Next {
// Parse profile.
profile, err := EnsureProfile(r)
if err != nil {
failed++
lastErr = err
log.Tracer(ctx).Debugf("profiles: failed to parse profile %s for migration: %s", r.Key(), err)
continue
}
// Skip if the ID does not look like a random UUID.
if !randomUUIDRegex.MatchString(profile.ID) {
continue
}
// Generate new ID.
oldScopedID := profile.ScopedID()
newID := DeriveProfileID(profile.Fingerprints)
// If they match, skip migration for this profile.
if profile.ID == newID {
continue
}
// Reset key.
profile.ResetKey()
// Set new ID and rebuild the key.
profile.ID = newID
profile.makeKey()
// Save back to DB.
err = db.Put(profile)
if err != nil {
failed++
lastErr = err
log.Tracer(ctx).Debugf("profiles: failed to save profile %s after migration: %s", r.Key(), err)
} else {
log.Tracer(ctx).Tracef("profiles: migrated profile %s to %s", r.Key(), to)
// Add old ID to profiles that we need to delete.
profilesToDelete = append(profilesToDelete, oldScopedID)
}
total++
}
// Check if there was an error while iterating.
if err := it.Err(); err != nil {
log.Tracer(ctx).Errorf("profile: failed to migrate to derived profile IDs: failed to iterate over profiles for migration: %s", err)
}
// Delete old migrated profiles.
for _, scopedID := range profilesToDelete {
if err := db.Delete(ProfilesDBPath + scopedID); err != nil {
log.Tracer(ctx).Errorf("profile: failed to delete old profile %s during migration: %s", scopedID, err)
}
}
// Log migration failure and try again next time.
if lastErr != nil {
module.Error(
"migration-failed",
"Profile Migration Failed",
fmt.Sprintf("Failed to migrate profile IDs of %d profiles (out of %d pending). The last error was: %s\n\nPlease restart Portmaster to try the migration again.", failed, total, lastErr),
)
return fmt.Errorf("failed to migrate %d profiles (out of %d pending) - last error: %w", failed, total, lastErr)
}
return nil
}

110
service/profile/module.go Normal file
View File

@@ -0,0 +1,110 @@
package profile
import (
"errors"
"fmt"
"os"
"github.com/safing/portbase/database"
"github.com/safing/portbase/database/migration"
"github.com/safing/portbase/dataroot"
"github.com/safing/portbase/log"
"github.com/safing/portbase/modules"
_ "github.com/safing/portmaster/service/core/base"
"github.com/safing/portmaster/service/profile/binmeta"
"github.com/safing/portmaster/service/updates"
)
var (
migrations = migration.New("core:migrations/profile")
module *modules.Module
updatesPath string
)
// Events.
const (
ConfigChangeEvent = "profile config change"
DeletedEvent = "profile deleted"
MigratedEvent = "profile migrated"
)
func init() {
module = modules.Register("profiles", prep, start, stop, "base", "updates")
module.RegisterEvent(ConfigChangeEvent, true)
module.RegisterEvent(DeletedEvent, true)
module.RegisterEvent(MigratedEvent, true)
}
func prep() error {
if err := registerConfiguration(); err != nil {
return err
}
if err := registerConfigUpdater(); err != nil {
return err
}
if err := registerMigrations(); err != nil {
return err
}
// Setup icon storage location.
iconsDir := dataroot.Root().ChildDir("databases", 0o0700).ChildDir("icons", 0o0700)
if err := iconsDir.Ensure(); err != nil {
return fmt.Errorf("failed to create/check icons directory: %w", err)
}
binmeta.ProfileIconStoragePath = iconsDir.Path
return nil
}
func start() error {
updatesPath = updates.RootPath()
if updatesPath != "" {
updatesPath += string(os.PathSeparator)
}
if err := loadProfilesMetadata(); err != nil {
if !errors.Is(err, database.ErrNotFound) {
log.Warningf("profile: failed to load profiles metadata, falling back to empty state: %s", err)
}
meta = &ProfilesMetadata{}
}
meta.check()
if err := migrations.Migrate(module.Ctx); err != nil {
log.Errorf("profile: migrations failed: %s", err)
}
err := registerValidationDBHook()
if err != nil {
return err
}
err = registerRevisionProvider()
if err != nil {
return err
}
err = startProfileUpdateChecker()
if err != nil {
return err
}
module.StartServiceWorker("clean active profiles", 0, cleanActiveProfiles)
err = updateGlobalConfigProfile(module.Ctx, nil)
if err != nil {
log.Warningf("profile: error during loading global profile from configuration: %s", err)
}
if err := registerAPIEndpoints(); err != nil {
return err
}
return nil
}
func stop() error {
return meta.Save()
}

View File

@@ -0,0 +1,84 @@
package profile
import (
"errors"
"strings"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/log"
"github.com/safing/portbase/runtime"
)
const (
revisionProviderPrefix = "layeredProfile/"
)
var (
errProfileNotActive = errors.New("profile not active")
errNoLayeredProfile = errors.New("profile has no layered profile")
pushLayeredProfile runtime.PushFunc = func(...record.Record) {}
)
func registerRevisionProvider() error {
push, err := runtime.Register(
revisionProviderPrefix,
runtime.SimpleValueGetterFunc(getRevisions),
)
if err != nil {
return err
}
pushLayeredProfile = push
return nil
}
func getRevisions(key string) ([]record.Record, error) {
key = strings.TrimPrefix(key, revisionProviderPrefix)
var profiles []*Profile
if key == "" {
profiles = getAllActiveProfiles()
} else {
// Get active profile.
profile := getActiveProfile(key)
if profile == nil {
return nil, errProfileNotActive
}
profiles = append(profiles, profile)
}
records := make([]record.Record, 0, len(profiles))
for _, p := range profiles {
layered, err := getProfileRevision(p)
if err != nil {
log.Warningf("failed to get layered profile for %s: %s", p.ID, err)
continue
}
records = append(records, layered)
}
return records, nil
}
// getProfileRevision returns the layered profile for p.
// It also updates the layered profile if required.
func getProfileRevision(p *Profile) (*LayeredProfile, error) {
// Get layered profile.
layeredProfile := p.LayeredProfile()
if layeredProfile == nil {
return nil, errNoLayeredProfile
}
// Update profiles if necessary.
// TODO: Cannot update as we have too little information.
// Just return the current state. Previous code:
// if layeredProfile.NeedsUpdate() {
// layeredProfile.Update()
// }
return layeredProfile, nil
}

View File

@@ -0,0 +1,538 @@
package profile
import (
"context"
"sync"
"sync/atomic"
"github.com/safing/portbase/config"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/log"
"github.com/safing/portbase/runtime"
"github.com/safing/portmaster/service/intel"
"github.com/safing/portmaster/service/profile/endpoints"
)
// LayeredProfile combines multiple Profiles.
type LayeredProfile struct {
record.Base
sync.RWMutex
localProfile *Profile
layers []*Profile
LayerIDs []string
RevisionCounter uint64
globalValidityFlag *config.ValidityFlag
securityLevel *uint32
// These functions give layered access to configuration options and require
// the layered profile to be read locked.
// TODO(ppacher): we need JSON tags here so the layeredProfile can be exposed
// via the API. If we ever switch away from JSON to something else supported
// by DSD this WILL BREAK!
DisableAutoPermit config.BoolOption `json:"-"`
BlockScopeLocal config.BoolOption `json:"-"`
BlockScopeLAN config.BoolOption `json:"-"`
BlockScopeInternet config.BoolOption `json:"-"`
BlockP2P config.BoolOption `json:"-"`
BlockInbound config.BoolOption `json:"-"`
RemoveOutOfScopeDNS config.BoolOption `json:"-"`
RemoveBlockedDNS config.BoolOption `json:"-"`
FilterSubDomains config.BoolOption `json:"-"`
FilterCNAMEs config.BoolOption `json:"-"`
PreventBypassing config.BoolOption `json:"-"`
DomainHeuristics config.BoolOption `json:"-"`
UseSPN config.BoolOption `json:"-"`
SPNRoutingAlgorithm config.StringOption `json:"-"`
EnableHistory config.BoolOption `json:"-"`
KeepHistory config.IntOption `json:"-"`
}
// NewLayeredProfile returns a new layered profile based on the given local profile.
func NewLayeredProfile(localProfile *Profile) *LayeredProfile {
var securityLevelVal uint32
lp := &LayeredProfile{
localProfile: localProfile,
layers: make([]*Profile, 0, 1),
LayerIDs: make([]string, 0, 1),
globalValidityFlag: config.NewValidityFlag(),
RevisionCounter: 1,
securityLevel: &securityLevelVal,
}
lp.DisableAutoPermit = lp.wrapBoolOption(
CfgOptionDisableAutoPermitKey,
cfgOptionDisableAutoPermit,
)
lp.BlockScopeLocal = lp.wrapBoolOption(
CfgOptionBlockScopeLocalKey,
cfgOptionBlockScopeLocal,
)
lp.BlockScopeLAN = lp.wrapBoolOption(
CfgOptionBlockScopeLANKey,
cfgOptionBlockScopeLAN,
)
lp.BlockScopeInternet = lp.wrapBoolOption(
CfgOptionBlockScopeInternetKey,
cfgOptionBlockScopeInternet,
)
lp.BlockP2P = lp.wrapBoolOption(
CfgOptionBlockP2PKey,
cfgOptionBlockP2P,
)
lp.BlockInbound = lp.wrapBoolOption(
CfgOptionBlockInboundKey,
cfgOptionBlockInbound,
)
lp.RemoveOutOfScopeDNS = lp.wrapBoolOption(
CfgOptionRemoveOutOfScopeDNSKey,
cfgOptionRemoveOutOfScopeDNS,
)
lp.RemoveBlockedDNS = lp.wrapBoolOption(
CfgOptionRemoveBlockedDNSKey,
cfgOptionRemoveBlockedDNS,
)
lp.FilterSubDomains = lp.wrapBoolOption(
CfgOptionFilterSubDomainsKey,
cfgOptionFilterSubDomains,
)
lp.FilterCNAMEs = lp.wrapBoolOption(
CfgOptionFilterCNAMEKey,
cfgOptionFilterCNAME,
)
lp.PreventBypassing = lp.wrapBoolOption(
CfgOptionPreventBypassingKey,
cfgOptionPreventBypassing,
)
lp.DomainHeuristics = lp.wrapBoolOption(
CfgOptionDomainHeuristicsKey,
cfgOptionDomainHeuristics,
)
lp.UseSPN = lp.wrapBoolOption(
CfgOptionUseSPNKey,
cfgOptionUseSPN,
)
lp.SPNRoutingAlgorithm = lp.wrapStringOption(
CfgOptionRoutingAlgorithmKey,
cfgOptionRoutingAlgorithm,
)
lp.EnableHistory = lp.wrapBoolOption(
CfgOptionEnableHistoryKey,
cfgOptionEnableHistory,
)
lp.KeepHistory = lp.wrapIntOption(
CfgOptionKeepHistoryKey,
cfgOptionKeepHistory,
)
lp.LayerIDs = append(lp.LayerIDs, localProfile.ScopedID())
lp.layers = append(lp.layers, localProfile)
// TODO: Load additional profiles.
lp.CreateMeta()
lp.SetKey(runtime.DefaultRegistry.DatabaseName() + ":" + revisionProviderPrefix + localProfile.ScopedID())
// Inform database subscribers about the new layered profile.
lp.Lock()
defer lp.Unlock()
pushLayeredProfile(lp)
return lp
}
// LockForUsage locks the layered profile, including all layers individually.
func (lp *LayeredProfile) LockForUsage() {
lp.RLock()
for _, layer := range lp.layers {
layer.RLock()
}
}
// UnlockForUsage unlocks the layered profile, including all layers individually.
func (lp *LayeredProfile) UnlockForUsage() {
lp.RUnlock()
for _, layer := range lp.layers {
layer.RUnlock()
}
}
// LocalProfile returns the local profile associated with this layered profile.
func (lp *LayeredProfile) LocalProfile() *Profile {
if lp == nil {
return nil
}
lp.RLock()
defer lp.RUnlock()
return lp.localProfile
}
// LocalProfileWithoutLocking returns the local profile associated with this
// layered profile, but without locking the layered profile.
// This method my only be used when the caller already has a lock on the layered profile.
func (lp *LayeredProfile) LocalProfileWithoutLocking() *Profile {
if lp == nil {
return nil
}
return lp.localProfile
}
// increaseRevisionCounter increases the revision counter and pushes the
// layered profile to listeners.
func (lp *LayeredProfile) increaseRevisionCounter(lock bool) (revisionCounter uint64) { //nolint:unparam // This is documentation.
if lp == nil {
return 0
}
if lock {
lp.Lock()
defer lp.Unlock()
}
// Increase the revision counter.
lp.RevisionCounter++
// Push the increased counter to the UI.
pushLayeredProfile(lp)
return lp.RevisionCounter
}
// RevisionCnt returns the current profile revision counter.
func (lp *LayeredProfile) RevisionCnt() (revisionCounter uint64) {
if lp == nil {
return 0
}
lp.RLock()
defer lp.RUnlock()
return lp.RevisionCounter
}
// MarkStillActive marks all the layers as still active.
func (lp *LayeredProfile) MarkStillActive() {
if lp == nil {
return
}
lp.RLock()
defer lp.RUnlock()
for _, layer := range lp.layers {
layer.MarkStillActive()
}
}
// NeedsUpdate checks for outdated profiles.
func (lp *LayeredProfile) NeedsUpdate() (outdated bool) {
lp.RLock()
defer lp.RUnlock()
// Check global config state.
if !lp.globalValidityFlag.IsValid() {
return true
}
// Check config in layers.
for _, layer := range lp.layers {
if layer.outdated.IsSet() {
return true
}
}
return false
}
// Update checks for and replaces any outdated profiles.
func (lp *LayeredProfile) Update(md MatchingData, createProfileCallback func() *Profile) (revisionCounter uint64) {
lp.Lock()
defer lp.Unlock()
var changed bool
for i, layer := range lp.layers {
if layer.outdated.IsSet() {
// Check for unsupported sources.
if layer.Source != SourceLocal {
log.Warningf("profile: updating profiles outside of local source is not supported: %s", layer.ScopedID())
layer.outdated.UnSet()
continue
}
// Update layer.
changed = true
newLayer, err := GetLocalProfile(layer.ID, md, createProfileCallback)
if err != nil {
log.Errorf("profiles: failed to update profile %s: %s", layer.ScopedID(), err)
} else {
lp.layers[i] = newLayer
}
// Update local profile reference.
if i == 0 {
lp.localProfile = newLayer
}
}
}
if !lp.globalValidityFlag.IsValid() {
changed = true
}
if changed {
// get global config validity flag
lp.globalValidityFlag.Refresh()
// bump revision counter
lp.increaseRevisionCounter(false)
}
return lp.RevisionCounter
}
// SecurityLevel returns the highest security level of all layered profiles. This function is atomic and does not require any locking.
func (lp *LayeredProfile) SecurityLevel() uint8 {
return uint8(atomic.LoadUint32(lp.securityLevel))
}
// DefaultAction returns the active default action ID. This functions requires the layered profile to be read locked.
func (lp *LayeredProfile) DefaultAction() uint8 {
for _, layer := range lp.layers {
if layer.defaultAction > 0 {
return layer.defaultAction
}
}
cfgLock.RLock()
defer cfgLock.RUnlock()
return cfgDefaultAction
}
// MatchEndpoint checks if the given endpoint matches an entry in any of the profiles. This functions requires the layered profile to be read locked.
func (lp *LayeredProfile) MatchEndpoint(ctx context.Context, entity *intel.Entity) (endpoints.EPResult, endpoints.Reason) {
for _, layer := range lp.layers {
if layer.endpoints.IsSet() {
result, reason := layer.endpoints.Match(ctx, entity)
if endpoints.IsDecision(result) {
return result, reason
}
}
}
cfgLock.RLock()
defer cfgLock.RUnlock()
return cfgEndpoints.Match(ctx, entity)
}
// MatchServiceEndpoint checks if the given endpoint of an inbound connection matches an entry in any of the profiles. This functions requires the layered profile to be read locked.
func (lp *LayeredProfile) MatchServiceEndpoint(ctx context.Context, entity *intel.Entity) (endpoints.EPResult, endpoints.Reason) {
entity.EnableReverseResolving()
for _, layer := range lp.layers {
if layer.serviceEndpoints.IsSet() {
result, reason := layer.serviceEndpoints.Match(ctx, entity)
if endpoints.IsDecision(result) {
return result, reason
}
}
}
cfgLock.RLock()
defer cfgLock.RUnlock()
return cfgServiceEndpoints.Match(ctx, entity)
}
// MatchSPNUsagePolicy checks if the given endpoint matches an entry in any of the profiles. This functions requires the layered profile to be read locked.
func (lp *LayeredProfile) MatchSPNUsagePolicy(ctx context.Context, entity *intel.Entity) (endpoints.EPResult, endpoints.Reason) {
for _, layer := range lp.layers {
if layer.spnUsagePolicy.IsSet() {
result, reason := layer.spnUsagePolicy.Match(ctx, entity)
if endpoints.IsDecision(result) {
return result, reason
}
}
}
cfgLock.RLock()
defer cfgLock.RUnlock()
return cfgSPNUsagePolicy.Match(ctx, entity)
}
// StackedTransitHubPolicies returns all transit hub policies of the layered profile, including the global one.
func (lp *LayeredProfile) StackedTransitHubPolicies() []endpoints.Endpoints {
policies := make([]endpoints.Endpoints, 0, len(lp.layers)+3) // +1 for global policy, +2 for intel policies
for _, layer := range lp.layers {
if layer.spnTransitHubPolicy.IsSet() {
policies = append(policies, layer.spnTransitHubPolicy)
}
}
cfgLock.RLock()
defer cfgLock.RUnlock()
policies = append(policies, cfgSPNTransitHubPolicy)
return policies
}
// StackedExitHubPolicies returns all exit hub policies of the layered profile, including the global one.
func (lp *LayeredProfile) StackedExitHubPolicies() []endpoints.Endpoints {
policies := make([]endpoints.Endpoints, 0, len(lp.layers)+3) // +1 for global policy, +2 for intel policies
for _, layer := range lp.layers {
if layer.spnExitHubPolicy.IsSet() {
policies = append(policies, layer.spnExitHubPolicy)
}
}
cfgLock.RLock()
defer cfgLock.RUnlock()
policies = append(policies, cfgSPNExitHubPolicy)
return policies
}
// MatchFilterLists matches the entity against the set of filter
// lists. This functions requires the layered profile to be read locked.
func (lp *LayeredProfile) MatchFilterLists(ctx context.Context, entity *intel.Entity) (endpoints.EPResult, endpoints.Reason) {
entity.ResolveSubDomainLists(ctx, lp.FilterSubDomains())
entity.EnableCNAMECheck(ctx, lp.FilterCNAMEs())
for _, layer := range lp.layers {
// Search for the first layer that has filter lists set.
if layer.filterListsSet {
if entity.MatchLists(layer.filterListIDs) {
return endpoints.Denied, entity.ListBlockReason()
}
return endpoints.NoMatch, nil
}
}
cfgLock.RLock()
defer cfgLock.RUnlock()
if len(cfgFilterLists) > 0 {
if entity.MatchLists(cfgFilterLists) {
return endpoints.Denied, entity.ListBlockReason()
}
}
return endpoints.NoMatch, nil
}
func (lp *LayeredProfile) wrapBoolOption(configKey string, globalConfig config.BoolOption) config.BoolOption {
var revCnt uint64 = 0
var value bool
var refreshLock sync.Mutex
return func() bool {
refreshLock.Lock()
defer refreshLock.Unlock()
// Check if we need to refresh the value.
if revCnt != lp.RevisionCounter {
revCnt = lp.RevisionCounter
// Go through all layers to find an active value.
found := false
for _, layer := range lp.layers {
layerValue, ok := layer.configPerspective.GetAsBool(configKey)
if ok {
found = true
value = layerValue
break
}
}
if !found {
value = globalConfig()
}
}
return value
}
}
func (lp *LayeredProfile) wrapIntOption(configKey string, globalConfig config.IntOption) config.IntOption {
var revCnt uint64 = 0
var value int64
var refreshLock sync.Mutex
return func() int64 {
refreshLock.Lock()
defer refreshLock.Unlock()
// Check if we need to refresh the value.
if revCnt != lp.RevisionCounter {
revCnt = lp.RevisionCounter
// Go through all layers to find an active value.
found := false
for _, layer := range lp.layers {
layerValue, ok := layer.configPerspective.GetAsInt(configKey)
if ok {
found = true
value = layerValue
break
}
}
if !found {
value = globalConfig()
}
}
return value
}
}
// GetProfileSource returns the database key of the first profile in the
// layers that has the given configuration key set. If it returns an empty
// string, the global profile can be assumed to have been effective.
func (lp *LayeredProfile) GetProfileSource(configKey string) string {
for _, layer := range lp.layers {
if layer.configPerspective.Has(configKey) {
return layer.Key()
}
}
// Global Profile
return ""
}
func (lp *LayeredProfile) wrapStringOption(configKey string, globalConfig config.StringOption) config.StringOption {
var revCnt uint64 = 0
var value string
var refreshLock sync.Mutex
return func() string {
refreshLock.Lock()
defer refreshLock.Unlock()
// Check if we need to refresh the value.
if revCnt != lp.RevisionCounter {
revCnt = lp.RevisionCounter
// Go through all layers to find an active value.
found := false
for _, layer := range lp.layers {
layerValue, ok := layer.configPerspective.GetAsString(configKey)
if ok {
found = true
value = layerValue
break
}
}
if !found {
value = globalConfig()
}
}
return value
}
}

555
service/profile/profile.go Normal file
View File

@@ -0,0 +1,555 @@
package profile
import (
"context"
"errors"
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/tevino/abool"
"github.com/safing/portbase/config"
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/log"
"github.com/safing/portbase/utils"
"github.com/safing/portmaster/service/intel/filterlists"
"github.com/safing/portmaster/service/profile/binmeta"
"github.com/safing/portmaster/service/profile/endpoints"
)
// ProfileSource is the source of the profile.
type ProfileSource string //nolint:golint
// Profile Sources.
const (
SourceLocal ProfileSource = "local" // local, editable
SourceSpecial ProfileSource = "special" // specials (read-only)
)
// Default Action IDs.
const (
DefaultActionNotSet uint8 = 0
DefaultActionBlock uint8 = 1
DefaultActionAsk uint8 = 2
DefaultActionPermit uint8 = 3
)
// Profile is used to predefine a security profile for applications.
type Profile struct { //nolint:maligned // not worth the effort
record.Base
sync.RWMutex
// ID is a unique identifier for the profile.
ID string // constant
// Source describes the source of the profile.
Source ProfileSource // constant
// Name is a human readable name of the profile. It
// defaults to the basename of the application.
Name string
// Description may hold an optional description of the
// profile or the purpose of the application.
Description string
// Warning may hold an optional warning about this application.
// It may be static or be added later on when the Portmaster detected an
// issue with the application.
Warning string
// WarningLastUpdated holds the timestamp when the Warning field was last
// updated.
WarningLastUpdated time.Time
// Homepage may refer to the website of the application
// vendor.
Homepage string
// Deprecated: Icon holds the icon of the application. The value
// may either be a filepath, a database key or a blob URL.
// See IconType for more information.
Icon string
// Deprecated: IconType describes the type of the Icon property.
IconType binmeta.IconType
// Icons holds a list of icons to represent the application.
Icons []binmeta.Icon
// Deprecated: LinkedPath used to point to the executableis this
// profile was created for.
// Until removed, it will be added to the Fingerprints as an exact path match.
LinkedPath string // constant
// PresentationPath holds the path of an executable that should be used for
// get representative information from, like the name of the program or the icon.
// Is automatically removed when the path does not exist.
// Is automatically populated with the next match when empty.
PresentationPath string
// UsePresentationPath can be used to enable/disable fetching information
// from the executable at PresentationPath. In some cases, this is not
// desirable.
UsePresentationPath bool
// Fingerprints holds process matching information.
Fingerprints []Fingerprint
// Config holds profile specific setttings. It's a nested
// object with keys defining the settings database path. All keys
// until the actual settings value (which is everything that is not
// an object) need to be concatenated for the settings database
// path.
Config map[string]interface{}
// LastEdited holds the UTC timestamp in seconds when the profile was last
// edited by the user. This is not set automatically, but has to be manually
// set by the user interface.
LastEdited int64
// Created holds the UTC timestamp in seconds when the
// profile has been created.
Created int64
// Internal is set to true if the profile is attributed to a
// Portmaster internal process. Internal is set during profile
// creation and may be accessed without lock.
Internal bool
// layeredProfile is a link to the layered profile with this profile as the
// main profile.
// All processes with the same binary should share the same instance of the
// local profile and the associated layered profile.
layeredProfile *LayeredProfile
// Interpreted Data
configPerspective *config.Perspective
dataParsed bool
defaultAction uint8
endpoints endpoints.Endpoints
serviceEndpoints endpoints.Endpoints
filterListsSet bool
filterListIDs []string
spnUsagePolicy endpoints.Endpoints
spnTransitHubPolicy endpoints.Endpoints
spnExitHubPolicy endpoints.Endpoints
// Lifecycle Management
outdated *abool.AtomicBool
lastActive *int64
// savedInternally is set to true for profiles that are saved internally.
savedInternally bool
}
func (profile *Profile) prepProfile() {
// prepare configuration
profile.outdated = abool.New()
profile.lastActive = new(int64)
// Migration of LinkedPath to PresentationPath
if profile.PresentationPath == "" && profile.LinkedPath != "" {
profile.PresentationPath = profile.LinkedPath
}
}
func (profile *Profile) parseConfig() error {
// Check if already parsed.
if profile.dataParsed {
return nil
}
// Create new perspective and marked as parsed.
var err error
profile.configPerspective, err = config.NewPerspective(profile.Config)
if err != nil {
return fmt.Errorf("failed to create config perspective: %w", err)
}
profile.dataParsed = true
var lastErr error
action, ok := profile.configPerspective.GetAsString(CfgOptionDefaultActionKey)
profile.defaultAction = DefaultActionNotSet
if ok {
switch action {
case DefaultActionPermitValue:
profile.defaultAction = DefaultActionPermit
case DefaultActionAskValue:
profile.defaultAction = DefaultActionAsk
case DefaultActionBlockValue:
profile.defaultAction = DefaultActionBlock
default:
lastErr = fmt.Errorf(`default action "%s" invalid`, action)
}
}
list, ok := profile.configPerspective.GetAsStringArray(CfgOptionEndpointsKey)
profile.endpoints = nil
if ok {
profile.endpoints, err = endpoints.ParseEndpoints(list)
if err != nil {
lastErr = err
}
}
list, ok = profile.configPerspective.GetAsStringArray(CfgOptionServiceEndpointsKey)
profile.serviceEndpoints = nil
if ok {
profile.serviceEndpoints, err = endpoints.ParseEndpoints(list)
if err != nil {
lastErr = err
}
}
list, ok = profile.configPerspective.GetAsStringArray(CfgOptionFilterListsKey)
profile.filterListsSet = false
if ok {
profile.filterListIDs, err = filterlists.ResolveListIDs(list)
if err != nil {
lastErr = err
} else {
profile.filterListsSet = true
}
}
list, ok = profile.configPerspective.GetAsStringArray(CfgOptionSPNUsagePolicyKey)
profile.spnUsagePolicy = nil
if ok {
profile.spnUsagePolicy, err = endpoints.ParseEndpoints(list)
if err != nil {
lastErr = err
}
}
list, ok = profile.configPerspective.GetAsStringArray(CfgOptionTransitHubPolicyKey)
profile.spnTransitHubPolicy = nil
if ok {
profile.spnTransitHubPolicy, err = endpoints.ParseEndpoints(list)
if err != nil {
lastErr = err
}
}
list, ok = profile.configPerspective.GetAsStringArray(CfgOptionExitHubPolicyKey)
profile.spnExitHubPolicy = nil
if ok {
profile.spnExitHubPolicy, err = endpoints.ParseEndpoints(list)
if err != nil {
lastErr = err
}
}
return lastErr
}
// New returns a new Profile.
// Optionally, you may supply custom configuration in the flat (key=value) form.
func New(profile *Profile) *Profile {
// Create profile if none is given.
if profile == nil {
profile = &Profile{}
}
// Set default and internal values.
profile.Created = time.Now().Unix()
profile.savedInternally = true
// Expand any given configuration.
if profile.Config != nil {
profile.Config = config.Expand(profile.Config)
} else {
profile.Config = make(map[string]interface{})
}
// Generate ID if none is given.
if profile.ID == "" {
if len(profile.Fingerprints) > 0 {
// Derive from fingerprints.
profile.ID = DeriveProfileID(profile.Fingerprints)
} else {
// Generate random ID as fallback.
log.Warningf("profile: creating new profile without fingerprints to derive ID from")
profile.ID = utils.RandomUUID("").String()
}
}
// Make key from ID and source.
profile.makeKey()
// Prepare and parse initial profile config.
profile.prepProfile()
if err := profile.parseConfig(); err != nil {
log.Errorf("profile: failed to parse new profile: %s", err)
}
return profile
}
// ScopedID returns the scoped ID (Source + ID) of the profile.
func (profile *Profile) ScopedID() string {
return MakeScopedID(profile.Source, profile.ID)
}
// makeKey derives and sets the record Key from the profile attributes.
func (profile *Profile) makeKey() {
profile.SetKey(MakeProfileKey(profile.Source, profile.ID))
}
// Save saves the profile to the database.
func (profile *Profile) Save() error {
if profile.ID == "" {
return errors.New("profile: tried to save profile without ID")
}
if profile.Source == "" {
return fmt.Errorf("profile: profile %s does not specify a source", profile.ID)
}
return profileDB.Put(profile)
}
// delete deletes the profile from the database.
func (profile *Profile) delete() error {
// Check if a key is set.
if !profile.KeyIsSet() {
return errors.New("key is not set")
}
// Delete from database.
profile.Meta().Delete()
err := profileDB.Put(profile)
if err != nil {
return err
}
// Post handling is done by the profile update feed.
return nil
}
// MarkStillActive marks the profile as still active.
func (profile *Profile) MarkStillActive() {
atomic.StoreInt64(profile.lastActive, time.Now().Unix())
}
// LastActive returns the unix timestamp when the profile was last marked as
// still active.
func (profile *Profile) LastActive() int64 {
return atomic.LoadInt64(profile.lastActive)
}
// String returns a string representation of the Profile.
func (profile *Profile) String() string {
return fmt.Sprintf("<%s %s/%s>", profile.Name, profile.Source, profile.ID)
}
// IsOutdated returns whether the this instance of the profile is marked as outdated.
func (profile *Profile) IsOutdated() bool {
return profile.outdated.IsSet()
}
// GetEndpoints returns the endpoint list of the profile. This functions
// requires the profile to be read locked.
func (profile *Profile) GetEndpoints() endpoints.Endpoints {
return profile.endpoints
}
// GetServiceEndpoints returns the service endpoint list of the profile. This
// functions requires the profile to be read locked.
func (profile *Profile) GetServiceEndpoints() endpoints.Endpoints {
return profile.serviceEndpoints
}
// AddEndpoint adds an endpoint to the endpoint list, saves the profile and reloads the configuration.
func (profile *Profile) AddEndpoint(newEntry string) {
profile.addEndpointEntry(CfgOptionEndpointsKey, newEntry)
}
// AddServiceEndpoint adds a service endpoint to the endpoint list, saves the profile and reloads the configuration.
func (profile *Profile) AddServiceEndpoint(newEntry string) {
profile.addEndpointEntry(CfgOptionServiceEndpointsKey, newEntry)
}
func (profile *Profile) addEndpointEntry(cfgKey, newEntry string) {
changed := false
// When finished, save the profile.
defer func() {
if !changed {
return
}
err := profile.Save()
if err != nil {
log.Warningf("profile: failed to save profile %s after add an endpoint rule: %s", profile.ScopedID(), err)
}
}()
// Lock the profile for editing.
profile.Lock()
defer profile.Unlock()
// Get the endpoint list configuration value and add the new entry.
endpointList, ok := profile.configPerspective.GetAsStringArray(cfgKey)
if ok {
// A list already exists, check for duplicates within the same prefix.
newEntryPrefix := strings.Split(newEntry, " ")[0] + " "
for _, entry := range endpointList {
if !strings.HasPrefix(entry, newEntryPrefix) {
// We found an entry with a different prefix than the new entry.
// Beyond this entry we cannot possibly know if identical entries will
// match, so we will have to add the new entry no matter what the rest
// of the list has.
break
}
if entry == newEntry {
// An identical entry is already in the list, abort.
log.Debugf("profile: ignoring new endpoint rule for %s, as identical is already present: %s", profile, newEntry)
return
}
}
endpointList = append([]string{newEntry}, endpointList...)
} else {
endpointList = []string{newEntry}
}
// Save new value back to profile.
config.PutValueIntoHierarchicalConfig(profile.Config, cfgKey, endpointList)
changed = true
// Reload the profile manually in order to parse the newly added entry.
profile.dataParsed = false
err := profile.parseConfig()
if err != nil {
log.Errorf("profile: failed to parse %s config after adding endpoint: %s", profile, err)
}
}
// LayeredProfile returns the layered profile associated with this profile.
func (profile *Profile) LayeredProfile() *LayeredProfile {
profile.Lock()
defer profile.Unlock()
return profile.layeredProfile
}
// EnsureProfile ensures that the given record is a *Profile, and returns it.
func EnsureProfile(r record.Record) (*Profile, error) {
// unwrap
if r.IsWrapped() {
// only allocate a new struct, if we need it
newProfile := &Profile{}
err := record.Unwrap(r, newProfile)
if err != nil {
return nil, err
}
return newProfile, nil
}
// or adjust type
newProfile, ok := r.(*Profile)
if !ok {
return nil, fmt.Errorf("record not of type *Profile, but %T", r)
}
return newProfile, nil
}
// updateMetadata updates meta data fields on the profile and returns whether
// the profile was changed.
func (profile *Profile) updateMetadata(binaryPath string) (changed bool) {
// Check if this is a local profile, else warn and return.
if profile.Source != SourceLocal {
log.Warningf("tried to update metadata for non-local profile %s", profile.ScopedID())
return false
}
// Set PresentationPath if unset.
if profile.PresentationPath == "" && binaryPath != "" {
profile.PresentationPath = binaryPath
changed = true
}
// Migrate LinkedPath to PresentationPath.
// TODO: Remove in v1.5
if profile.PresentationPath == "" && profile.LinkedPath != "" {
profile.PresentationPath = profile.LinkedPath
changed = true
}
// Set Name if unset.
if profile.Name == "" && profile.PresentationPath != "" {
// Generate a default profile name from path.
profile.Name = binmeta.GenerateBinaryNameFromPath(profile.PresentationPath)
changed = true
}
// Migrate to Fingerprints.
// TODO: Remove in v1.5
if len(profile.Fingerprints) == 0 && profile.LinkedPath != "" {
profile.Fingerprints = []Fingerprint{
{
Type: FingerprintTypePathID,
Operation: FingerprintOperationEqualsID,
Value: profile.LinkedPath,
},
}
changed = true
}
// UI Backward Compatibility:
// Fill LinkedPath with PresentationPath
// TODO: Remove in v1.1
if profile.LinkedPath == "" && profile.PresentationPath != "" {
profile.LinkedPath = profile.PresentationPath
changed = true
}
return changed
}
// updateMetadataFromSystem updates the profile metadata with data from the
// operating system and saves it afterwards.
func (profile *Profile) updateMetadataFromSystem(ctx context.Context, md MatchingData) error {
var changed bool
// This function is only valid for local profiles.
if profile.Source != SourceLocal || profile.PresentationPath == "" {
return fmt.Errorf("tried to update metadata for non-local or non-path profile %s", profile.ScopedID())
}
// Get home from ENV.
var home string
if env := md.Env(); env != nil {
home = env["HOME"]
}
// Get binary icon and name.
newIcon, newName, err := binmeta.GetIconAndName(ctx, profile.PresentationPath, home)
if err != nil {
log.Warningf("profile: failed to get binary icon/name for %s: %s", profile.PresentationPath, err)
}
// Apply new data to profile.
func() {
// Lock profile for applying metadata.
profile.Lock()
defer profile.Unlock()
// Apply new name if it changed.
if newName != "" && profile.Name != newName {
profile.Name = newName
changed = true
}
// Apply new icon if found.
if newIcon != nil {
if len(profile.Icons) == 0 {
profile.Icons = []binmeta.Icon{*newIcon}
} else {
profile.Icons = append(profile.Icons, *newIcon)
profile.Icons = binmeta.SortAndCompactIcons(profile.Icons)
}
}
}()
// If anything changed, save the profile.
// profile.Lock must not be held!
if changed {
err := profile.Save()
if err != nil {
log.Warningf("profile: failed to save %s after metadata update: %s", profile.ScopedID(), err)
}
}
return nil
}

324
service/profile/special.go Normal file
View File

@@ -0,0 +1,324 @@
package profile
import (
"time"
"github.com/safing/portbase/log"
)
const (
// UnidentifiedProfileID is the profile ID used for unidentified processes.
UnidentifiedProfileID = "_unidentified"
// UnidentifiedProfileName is the name used for unidentified processes.
UnidentifiedProfileName = "Other Connections"
// UnidentifiedProfileDescription is the description used for unidentified processes.
UnidentifiedProfileDescription = `Connections that could not be attributed to a specific app.
The Portmaster attributes connections (only TCP/UDP) to specific apps. When attribution for a connection fails, it ends up here.
Connections from unsupported protocols (like ICMP/"ping") are always collected here.
`
// UnsolicitedProfileID is the profile ID used for unsolicited connections.
UnsolicitedProfileID = "_unsolicited"
// UnsolicitedProfileName is the name used for unsolicited connections.
UnsolicitedProfileName = "Network Noise"
// UnsolicitedProfileDescription is the description used for unsolicited connections.
UnsolicitedProfileDescription = `Common connections coming from your Local Area Network.
Local Area Networks usually have quite a lot of traffic from applications that are trying to find things on the network. This might be a computer trying to find a printer, or a file sharing application searching for local peers. These network packets will automatically arrive at your device.
These connections - the "network noise" - can be found in this app.`
// SystemProfileID is the profile ID used for the system/kernel.
SystemProfileID = "_system"
// SystemProfileName is the name used for the system/kernel.
SystemProfileName = "Operating System"
// SystemProfileDescription is the description used for the system/kernel.
SystemProfileDescription = "This is the operation system itself."
// SystemResolverProfileID is the profile ID used for the system's DNS resolver.
SystemResolverProfileID = "_system-resolver"
// SystemResolverProfileName is the name used for the system's DNS resolver.
SystemResolverProfileName = "System DNS Client"
// SystemResolverProfileDescription is the description used for the system's DNS resolver.
SystemResolverProfileDescription = `The System DNS Client is a system service that requires special handling.
For regular network connections, the configured settings will apply as usual.
DNS Requests coming from the System DNS Client, however, could actually be coming from any other application on the system: The System DNS Client resolves domain names on behalf of other applications.
In order to correctly handle these, DNS Requests (not regular connections), do not take the globally configured Outgoing Rules into account.
Additionally, the settings for the System DNS Client are specially pre-configured. If you are having issues or want to revert to the default settings, please delete this profile below. It will be automatically recreated with the default settings.
`
// PortmasterProfileID is the profile ID used for the Portmaster Core itself.
PortmasterProfileID = "_portmaster"
// PortmasterProfileName is the name used for the Portmaster Core itself.
PortmasterProfileName = "Portmaster Core Service"
// PortmasterProfileDescription is the description used for the Portmaster Core itself.
PortmasterProfileDescription = `This is the Portmaster itself, which runs in the background as a system service. App specific settings have no effect.`
// PortmasterAppProfileID is the profile ID used for the Portmaster App.
PortmasterAppProfileID = "_portmaster-app"
// PortmasterAppProfileName is the name used for the Portmaster App.
PortmasterAppProfileName = "Portmaster User Interface"
// PortmasterAppProfileDescription is the description used for the Portmaster App.
PortmasterAppProfileDescription = `This is the Portmaster UI Windows.`
// PortmasterNotifierProfileID is the profile ID used for the Portmaster Notifier.
PortmasterNotifierProfileID = "_portmaster-notifier"
// PortmasterNotifierProfileName is the name used for the Portmaster Notifier.
PortmasterNotifierProfileName = "Portmaster Notifier"
// PortmasterNotifierProfileDescription is the description used for the Portmaster Notifier.
PortmasterNotifierProfileDescription = `This is the Portmaster UI Tray Notifier.`
)
func isSpecialProfileID(id string) bool {
switch id {
case UnidentifiedProfileID,
UnsolicitedProfileID,
SystemProfileID,
SystemResolverProfileID,
PortmasterProfileID,
PortmasterAppProfileID,
PortmasterNotifierProfileID:
return true
default:
return false
}
}
func updateSpecialProfileMetadata(profile *Profile, binaryPath string) (changed bool) {
// Get new profile name and check if profile is applicable to special handling.
var newProfileName, newDescription string
switch profile.ID {
case UnidentifiedProfileID:
newProfileName = UnidentifiedProfileName
newDescription = UnidentifiedProfileDescription
case UnsolicitedProfileID:
newProfileName = UnsolicitedProfileName
newDescription = UnsolicitedProfileDescription
case SystemProfileID:
newProfileName = SystemProfileName
newDescription = SystemProfileDescription
case SystemResolverProfileID:
newProfileName = SystemResolverProfileName
newDescription = SystemResolverProfileDescription
case PortmasterProfileID:
newProfileName = PortmasterProfileName
newDescription = PortmasterProfileDescription
case PortmasterAppProfileID:
newProfileName = PortmasterAppProfileName
newDescription = PortmasterAppProfileDescription
case PortmasterNotifierProfileID:
newProfileName = PortmasterNotifierProfileName
newDescription = PortmasterNotifierProfileDescription
default:
return false
}
// Update profile name if needed.
if profile.Name != newProfileName {
profile.Name = newProfileName
changed = true
}
// Update description if needed.
if profile.Description != newDescription {
profile.Description = newDescription
changed = true
}
// Update PresentationPath to new value.
if profile.PresentationPath != binaryPath {
profile.PresentationPath = binaryPath
changed = true
}
return changed
}
func createSpecialProfile(profileID string, path string) *Profile {
switch profileID {
case UnidentifiedProfileID:
return New(&Profile{
ID: UnidentifiedProfileID,
Source: SourceLocal,
PresentationPath: path,
})
case UnsolicitedProfileID:
return New(&Profile{
ID: UnsolicitedProfileID,
Source: SourceLocal,
PresentationPath: path,
})
case SystemProfileID:
return New(&Profile{
ID: SystemProfileID,
Source: SourceLocal,
PresentationPath: path,
})
case SystemResolverProfileID:
return New(&Profile{
ID: SystemResolverProfileID,
Source: SourceLocal,
PresentationPath: path,
Config: map[string]interface{}{
// Explicitly setting the default action to "permit" will improve the
// user experience for people who set the global default to "prompt".
// Resolved domain from the system resolver are checked again when
// attributed to a connection of a regular process. Otherwise, users
// would see two connection prompts for the same domain.
CfgOptionDefaultActionKey: DefaultActionPermitValue,
// Disable force blockers.
CfgOptionBlockScopeInternetKey: false,
CfgOptionBlockScopeLANKey: false,
CfgOptionBlockScopeLocalKey: false,
CfgOptionBlockP2PKey: false,
CfgOptionBlockInboundKey: false,
// Explicitly allow localhost and answers to multicast protocols that
// are commonly used by system resolvers.
// TODO: When the Portmaster gains the ability to attribute multicast
// responses to their requests, these rules can probably be removed
// again.
CfgOptionServiceEndpointsKey: []string{
"+ Localhost", // Allow everything from localhost.
"+ LAN UDP/5353", // Allow inbound mDNS requests and multicast replies.
"+ LAN UDP/5355", // Allow inbound LLMNR requests and multicast replies.
"+ LAN UDP/1900", // Allow inbound SSDP requests and multicast replies.
"- *", // Deny everything else.
},
// Explicitly disable all filter lists, as these will be checked later
// with the attributed connection. As this is the system resolver, this
// list can instead be used as a global enforcement of filter lists, if
// the system resolver is used. Users who want to
CfgOptionFilterListsKey: []string{},
},
})
case PortmasterProfileID:
return New(&Profile{
ID: PortmasterProfileID,
Source: SourceLocal,
PresentationPath: path,
Config: map[string]interface{}{
// In case anything slips through the internal self-allow, be sure to
// allow everything explicitly.
// Blocking connections here can lead to a very literal deadlock.
// This can currently happen, as fast-tracked connections are also
// reset in the OS integration and might show up in the connection
// handling if a packet in the other direction hits the firewall first.
CfgOptionDefaultActionKey: DefaultActionPermitValue,
CfgOptionBlockScopeInternetKey: false,
CfgOptionBlockScopeLANKey: false,
CfgOptionBlockScopeLocalKey: false,
CfgOptionBlockP2PKey: false,
CfgOptionBlockInboundKey: false,
CfgOptionEndpointsKey: []string{
"+ *",
},
CfgOptionServiceEndpointsKey: []string{
"+ Localhost",
"+ LAN",
"- *",
},
},
Internal: true,
})
case PortmasterAppProfileID:
return New(&Profile{
ID: PortmasterAppProfileID,
Source: SourceLocal,
PresentationPath: path,
Config: map[string]interface{}{
CfgOptionDefaultActionKey: DefaultActionBlockValue,
CfgOptionBlockScopeInternetKey: false,
CfgOptionBlockScopeLANKey: false,
CfgOptionBlockScopeLocalKey: false,
CfgOptionBlockP2PKey: false,
CfgOptionBlockInboundKey: true,
CfgOptionEndpointsKey: []string{
"+ Localhost",
"+ .safing.io",
},
},
Internal: true,
})
case PortmasterNotifierProfileID:
return New(&Profile{
ID: PortmasterNotifierProfileID,
Source: SourceLocal,
PresentationPath: path,
Config: map[string]interface{}{
CfgOptionDefaultActionKey: DefaultActionBlockValue,
CfgOptionBlockScopeInternetKey: false,
CfgOptionBlockScopeLANKey: false,
CfgOptionBlockScopeLocalKey: false,
CfgOptionBlockP2PKey: false,
CfgOptionBlockInboundKey: true,
CfgOptionEndpointsKey: []string{
"+ Localhost",
},
},
Internal: true,
})
default:
return nil
}
}
// specialProfileNeedsReset is used as a workaround until we can properly use
// profile layering in a way that it is also correctly handled by the UI. We
// check if the special profile has not been changed by the user and if not,
// check if the profile is outdated and can be upgraded.
func specialProfileNeedsReset(profile *Profile) bool {
if profile == nil {
return false
}
switch {
case profile.Source != SourceLocal:
// Special profiles live in the local scope only.
return false
case profile.LastEdited > 0:
// Profile was edited - don't override user settings.
return false
}
switch profile.ID {
case SystemResolverProfileID:
return canBeUpgraded(profile, "22.8.2023")
case PortmasterProfileID:
return canBeUpgraded(profile, "22.8.2023")
case PortmasterAppProfileID:
return canBeUpgraded(profile, "22.8.2023")
default:
// Not a special profile or no upgrade available yet.
return false
}
}
func canBeUpgraded(profile *Profile, upgradeDate string) bool {
// Parse upgrade date.
upgradeTime, err := time.Parse("2.1.2006", upgradeDate)
if err != nil {
log.Warningf("profile: failed to parse date %q: %s", upgradeDate, err)
return false
}
// Check if the upgrade is applicable.
if profile.Created < upgradeTime.Unix() {
log.Infof("profile: upgrading special profile %s", profile.ScopedID())
return true
}
return false
}