Restructure modules (#1572)

* Move portbase into monorepo

* Add new simple module mgr

* [WIP] Switch to new simple module mgr

* Add StateMgr and more worker variants

* [WIP] Switch more modules

* [WIP] Switch more modules

* [WIP] swtich more modules

* [WIP] switch all SPN modules

* [WIP] switch all service modules

* [WIP] Convert all workers to the new module system

* [WIP] add new task system to module manager

* [WIP] Add second take for scheduling workers

* [WIP] Add FIXME for bugs in new scheduler

* [WIP] Add minor improvements to scheduler

* [WIP] Add new worker scheduler

* [WIP] Fix more bug related to new module system

* [WIP] Fix start handing of the new module system

* [WIP] Improve startup process

* [WIP] Fix minor issues

* [WIP] Fix missing subsystem in settings

* [WIP] Initialize managers in constructor

* [WIP] Move module event initialization to constrictors

* [WIP] Fix setting for enabling and disabling the SPN module

* [WIP] Move API registeration into module construction

* [WIP] Update states mgr for all modules

* [WIP] Add CmdLine operation support

* Add state helper methods to module group and instance

* Add notification and module status handling to status package

* Fix starting issues

* Remove pilot widget and update security lock to new status data

* Remove debug logs

* Improve http server shutdown

* Add workaround for cleanly shutting down firewall+netquery

* Improve logging

* Add syncing states with notifications for new module system

* Improve starting, stopping, shutdown; resolve FIXMEs/TODOs

* [WIP] Fix most unit tests

* Review new module system and fix minor issues

* Push shutdown and restart events again via API

* Set sleep mode via interface

* Update example/template module

* [WIP] Fix spn/cabin unit test

* Remove deprecated UI elements

* Make log output more similar for the logging transition phase

* Switch spn hub and observer cmds to new module system

* Fix log sources

* Make worker mgr less error prone

* Fix tests and minor issues

* Fix observation hub

* Improve shutdown and restart handling

* Split up big connection.go source file

* Move varint and dsd packages to structures repo

* Improve expansion test

* Fix linter warnings

* Fix interception module on windows

* Fix linter errors

---------

Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
This commit is contained in:
Daniel Hååvi
2024-08-09 17:15:48 +02:00
committed by GitHub
parent 10a77498f4
commit 80664d1a27
647 changed files with 37690 additions and 3366 deletions

View File

@@ -0,0 +1,231 @@
package badger
import (
"context"
"errors"
"fmt"
"time"
"github.com/dgraph-io/badger"
"github.com/safing/portmaster/base/database/iterator"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/database/storage"
"github.com/safing/portmaster/base/log"
)
// Badger database made pluggable for portbase.
type Badger struct {
name string
db *badger.DB
}
func init() {
_ = storage.Register("badger", NewBadger)
}
// NewBadger opens/creates a badger database.
func NewBadger(name, location string) (storage.Interface, error) {
opts := badger.DefaultOptions(location)
db, err := badger.Open(opts)
if errors.Is(err, badger.ErrTruncateNeeded) {
// clean up after crash
log.Warningf("database/storage: truncating corrupted value log of badger database %s: this may cause data loss", name)
opts.Truncate = true
db, err = badger.Open(opts)
}
if err != nil {
return nil, err
}
return &Badger{
name: name,
db: db,
}, nil
}
// Get returns a database record.
func (b *Badger) Get(key string) (record.Record, error) {
var item *badger.Item
err := b.db.View(func(txn *badger.Txn) error {
var err error
item, err = txn.Get([]byte(key))
if err != nil {
if errors.Is(err, badger.ErrKeyNotFound) {
return storage.ErrNotFound
}
return err
}
return nil
})
if err != nil {
return nil, err
}
// return err if deleted or expired
if item.IsDeletedOrExpired() {
return nil, storage.ErrNotFound
}
data, err := item.ValueCopy(nil)
if err != nil {
return nil, err
}
m, err := record.NewRawWrapper(b.name, string(item.Key()), data)
if err != nil {
return nil, err
}
return m, nil
}
// GetMeta returns the metadata of a database record.
func (b *Badger) GetMeta(key string) (*record.Meta, error) {
// TODO: Replace with more performant variant.
r, err := b.Get(key)
if err != nil {
return nil, err
}
return r.Meta(), nil
}
// Put stores a record in the database.
func (b *Badger) Put(r record.Record) (record.Record, error) {
data, err := r.MarshalRecord(r)
if err != nil {
return nil, err
}
err = b.db.Update(func(txn *badger.Txn) error {
return txn.Set([]byte(r.DatabaseKey()), data)
})
if err != nil {
return nil, err
}
return r, nil
}
// Delete deletes a record from the database.
func (b *Badger) Delete(key string) error {
return b.db.Update(func(txn *badger.Txn) error {
err := txn.Delete([]byte(key))
if err != nil && !errors.Is(err, badger.ErrKeyNotFound) {
return err
}
return nil
})
}
// Query returns a an iterator for the supplied query.
func (b *Badger) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %w", err)
}
queryIter := iterator.New()
go b.queryExecutor(queryIter, q, local, internal)
return queryIter, nil
}
//nolint:gocognit
func (b *Badger) queryExecutor(queryIter *iterator.Iterator, q *query.Query, local, internal bool) {
err := b.db.View(func(txn *badger.Txn) error {
it := txn.NewIterator(badger.DefaultIteratorOptions)
defer it.Close()
prefix := []byte(q.DatabaseKeyPrefix())
for it.Seek(prefix); it.ValidForPrefix(prefix); it.Next() {
item := it.Item()
var data []byte
err := item.Value(func(val []byte) error {
data = val
return nil
})
if err != nil {
return err
}
r, err := record.NewRawWrapper(b.name, string(item.Key()), data)
if err != nil {
return err
}
if !r.Meta().CheckValidity() {
continue
}
if !r.Meta().CheckPermission(local, internal) {
continue
}
if q.MatchesRecord(r) {
copiedData, err := item.ValueCopy(nil)
if err != nil {
return err
}
newWrapper, err := record.NewRawWrapper(b.name, r.DatabaseKey(), copiedData)
if err != nil {
return err
}
select {
case <-queryIter.Done:
return nil
case queryIter.Next <- newWrapper:
default:
select {
case queryIter.Next <- newWrapper:
case <-queryIter.Done:
return nil
case <-time.After(1 * time.Minute):
return errors.New("query timeout")
}
}
}
}
return nil
})
queryIter.Finish(err)
}
// ReadOnly returns whether the database is read only.
func (b *Badger) ReadOnly() bool {
return false
}
// Injected returns whether the database is injected.
func (b *Badger) Injected() bool {
return false
}
// Maintain runs a light maintenance operation on the database.
func (b *Badger) Maintain(_ context.Context) error {
_ = b.db.RunValueLogGC(0.7)
return nil
}
// MaintainThorough runs a thorough maintenance operation on the database.
func (b *Badger) MaintainThorough(_ context.Context) (err error) {
for err == nil {
err = b.db.RunValueLogGC(0.7)
}
return nil
}
// MaintainRecordStates maintains records states in the database.
func (b *Badger) MaintainRecordStates(ctx context.Context, purgeDeletedBefore time.Time, shadowDelete bool) error {
// TODO: implement MaintainRecordStates
return nil
}
// Shutdown shuts down the database.
func (b *Badger) Shutdown() error {
return b.db.Close()
}

View File

@@ -0,0 +1,148 @@
package badger
import (
"context"
"os"
"reflect"
"sync"
"testing"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/database/storage"
)
var (
// Compile time interface checks.
_ storage.Interface = &Badger{}
_ storage.Maintainer = &Badger{}
)
type TestRecord struct { //nolint:maligned
record.Base
sync.Mutex
S string
I int
I8 int8
I16 int16
I32 int32
I64 int64
UI uint
UI8 uint8
UI16 uint16
UI32 uint32
UI64 uint64
F32 float32
F64 float64
B bool
}
func TestBadger(t *testing.T) {
t.Parallel()
testDir, err := os.MkdirTemp("", "testing-")
if err != nil {
t.Fatal(err)
}
defer func() {
_ = os.RemoveAll(testDir) // clean up
}()
// start
db, err := NewBadger("test", testDir)
if err != nil {
t.Fatal(err)
}
a := &TestRecord{
S: "banana",
I: 42,
I8: 42,
I16: 42,
I32: 42,
I64: 42,
UI: 42,
UI8: 42,
UI16: 42,
UI32: 42,
UI64: 42,
F32: 42.42,
F64: 42.42,
B: true,
}
a.SetMeta(&record.Meta{})
a.Meta().Update()
a.SetKey("test:A")
// put record
_, err = db.Put(a)
if err != nil {
t.Fatal(err)
}
// get and compare
r1, err := db.Get("A")
if err != nil {
t.Fatal(err)
}
a1 := &TestRecord{}
err = record.Unwrap(r1, a1)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(a, a1) {
t.Fatalf("mismatch, got %v", a1)
}
// test query
q := query.New("").MustBeValid()
it, err := db.Query(q, true, true)
if err != nil {
t.Fatal(err)
}
cnt := 0
for range it.Next {
cnt++
}
if it.Err() != nil {
t.Fatal(err)
}
if cnt != 1 {
t.Fatalf("unexpected query result count: %d", cnt)
}
// delete
err = db.Delete("A")
if err != nil {
t.Fatal(err)
}
// check if its gone
_, err = db.Get("A")
if err == nil {
t.Fatal("should fail")
}
// maintenance
maintainer, ok := db.(storage.Maintainer)
if ok {
err = maintainer.Maintain(context.TODO())
if err != nil {
t.Fatal(err)
}
err = maintainer.MaintainThorough(context.TODO())
if err != nil {
t.Fatal(err)
}
} else {
t.Fatal("should implement Maintainer")
}
// shutdown
err = db.Shutdown()
if err != nil {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,427 @@
package bbolt
import (
"bytes"
"context"
"errors"
"fmt"
"path/filepath"
"time"
"go.etcd.io/bbolt"
"github.com/safing/portmaster/base/database/iterator"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/database/storage"
)
var bucketName = []byte{0}
// BBolt database made pluggable for portbase.
type BBolt struct {
name string
db *bbolt.DB
}
func init() {
_ = storage.Register("bbolt", NewBBolt)
}
// NewBBolt opens/creates a bbolt database.
func NewBBolt(name, location string) (storage.Interface, error) {
// Create options for bbolt database.
dbFile := filepath.Join(location, "db.bbolt")
dbOptions := &bbolt.Options{
Timeout: 1 * time.Second,
}
// Open/Create database, retry if there is a timeout.
db, err := bbolt.Open(dbFile, 0o0600, dbOptions)
for i := 0; i < 5 && err != nil; i++ {
// Try again if there is an error.
db, err = bbolt.Open(dbFile, 0o0600, dbOptions)
}
if err != nil {
return nil, err
}
// Create bucket
err = db.Update(func(tx *bbolt.Tx) error {
_, err := tx.CreateBucketIfNotExists(bucketName)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
return &BBolt{
name: name,
db: db,
}, nil
}
// Get returns a database record.
func (b *BBolt) Get(key string) (record.Record, error) {
var r record.Record
err := b.db.View(func(tx *bbolt.Tx) error {
// get value from db
value := tx.Bucket(bucketName).Get([]byte(key))
if value == nil {
return storage.ErrNotFound
}
// copy data
duplicate := make([]byte, len(value))
copy(duplicate, value)
// create record
var txErr error
r, txErr = record.NewRawWrapper(b.name, key, duplicate)
if txErr != nil {
return txErr
}
return nil
})
if err != nil {
return nil, err
}
return r, nil
}
// GetMeta returns the metadata of a database record.
func (b *BBolt) GetMeta(key string) (*record.Meta, error) {
// TODO: Replace with more performant variant.
r, err := b.Get(key)
if err != nil {
return nil, err
}
return r.Meta(), nil
}
// Put stores a record in the database.
func (b *BBolt) Put(r record.Record) (record.Record, error) {
data, err := r.MarshalRecord(r)
if err != nil {
return nil, err
}
err = b.db.Update(func(tx *bbolt.Tx) error {
txErr := tx.Bucket(bucketName).Put([]byte(r.DatabaseKey()), data)
if txErr != nil {
return txErr
}
return nil
})
if err != nil {
return nil, err
}
return r, nil
}
// PutMany stores many records in the database.
func (b *BBolt) PutMany(shadowDelete bool) (chan<- record.Record, <-chan error) {
batch := make(chan record.Record, 100)
errs := make(chan error, 1)
go func() {
err := b.db.Batch(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(bucketName)
for r := range batch {
txErr := b.batchPutOrDelete(bucket, shadowDelete, r)
if txErr != nil {
return txErr
}
}
return nil
})
errs <- err
}()
return batch, errs
}
func (b *BBolt) batchPutOrDelete(bucket *bbolt.Bucket, shadowDelete bool, r record.Record) (err error) {
r.Lock()
defer r.Unlock()
if !shadowDelete && r.Meta().IsDeleted() {
// Immediate delete.
err = bucket.Delete([]byte(r.DatabaseKey()))
} else {
// Put or shadow delete.
var data []byte
data, err = r.MarshalRecord(r)
if err == nil {
err = bucket.Put([]byte(r.DatabaseKey()), data)
}
}
return err
}
// Delete deletes a record from the database.
func (b *BBolt) Delete(key string) error {
err := b.db.Update(func(tx *bbolt.Tx) error {
txErr := tx.Bucket(bucketName).Delete([]byte(key))
if txErr != nil {
return txErr
}
return nil
})
if err != nil {
return err
}
return nil
}
// Query returns a an iterator for the supplied query.
func (b *BBolt) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %w", err)
}
queryIter := iterator.New()
go b.queryExecutor(queryIter, q, local, internal)
return queryIter, nil
}
func (b *BBolt) queryExecutor(queryIter *iterator.Iterator, q *query.Query, local, internal bool) {
prefix := []byte(q.DatabaseKeyPrefix())
err := b.db.View(func(tx *bbolt.Tx) error {
// Create a cursor for iteration.
c := tx.Bucket(bucketName).Cursor()
// Iterate over items in sorted key order. This starts from the
// first key/value pair and updates the k/v variables to the
// next key/value on each iteration.
//
// The loop finishes at the end of the cursor when a nil key is returned.
for key, value := c.Seek(prefix); key != nil; key, value = c.Next() {
// if we don't match the prefix anymore, exit
if !bytes.HasPrefix(key, prefix) {
return nil
}
// wrap value
iterWrapper, err := record.NewRawWrapper(b.name, string(key), value)
if err != nil {
return err
}
// check validity / access
if !iterWrapper.Meta().CheckValidity() {
continue
}
if !iterWrapper.Meta().CheckPermission(local, internal) {
continue
}
// check if matches & send
if q.MatchesRecord(iterWrapper) {
// copy data
duplicate := make([]byte, len(value))
copy(duplicate, value)
newWrapper, err := record.NewRawWrapper(b.name, iterWrapper.DatabaseKey(), duplicate)
if err != nil {
return err
}
select {
case <-queryIter.Done:
return nil
case queryIter.Next <- newWrapper:
default:
select {
case <-queryIter.Done:
return nil
case queryIter.Next <- newWrapper:
case <-time.After(1 * time.Second):
return errors.New("query timeout")
}
}
}
}
return nil
})
queryIter.Finish(err)
}
// ReadOnly returns whether the database is read only.
func (b *BBolt) ReadOnly() bool {
return false
}
// Injected returns whether the database is injected.
func (b *BBolt) Injected() bool {
return false
}
// MaintainRecordStates maintains records states in the database.
func (b *BBolt) MaintainRecordStates(ctx context.Context, purgeDeletedBefore time.Time, shadowDelete bool) error { //nolint:gocognit
now := time.Now().Unix()
purgeThreshold := purgeDeletedBefore.Unix()
return b.db.Update(func(tx *bbolt.Tx) error {
bucket := tx.Bucket(bucketName)
// Create a cursor for iteration.
c := bucket.Cursor()
for key, value := c.First(); key != nil; key, value = c.Next() {
// check if context is cancelled
select {
case <-ctx.Done():
return nil
default:
}
// wrap value
wrapper, err := record.NewRawWrapper(b.name, string(key), value)
if err != nil {
return err
}
// check if we need to do maintenance
meta := wrapper.Meta()
switch {
case meta.Deleted == 0 && meta.Expires > 0 && meta.Expires < now:
if shadowDelete {
// mark as deleted
meta.Deleted = meta.Expires
deleted, err := wrapper.MarshalRecord(wrapper)
if err != nil {
return err
}
err = bucket.Put(key, deleted)
if err != nil {
return err
}
// Cursor repositioning is required after modifying data.
// While the documentation states that this is also required after a
// delete, this actually makes the cursor skip a record with the
// following c.Next() call of the loop.
// Docs/Issue: https://github.com/boltdb/bolt/issues/426#issuecomment-141982984
c.Seek(key)
continue
}
// Immediately delete expired entries if shadowDelete is disabled.
fallthrough
case meta.Deleted > 0 && (!shadowDelete || meta.Deleted < purgeThreshold):
// delete from storage
err = c.Delete()
if err != nil {
return err
}
}
}
return nil
})
}
// Purge deletes all records that match the given query. It returns the number of successful deletes and an error.
func (b *BBolt) Purge(ctx context.Context, q *query.Query, local, internal, shadowDelete bool) (int, error) { //nolint:gocognit
prefix := []byte(q.DatabaseKeyPrefix())
var cnt int
var done bool
for !done {
err := b.db.Update(func(tx *bbolt.Tx) error {
// Create a cursor for iteration.
bucket := tx.Bucket(bucketName)
c := bucket.Cursor()
for key, value := c.Seek(prefix); key != nil; key, value = c.Next() {
// Check if context has been cancelled.
select {
case <-ctx.Done():
done = true
return nil
default:
}
// Check if we still match the key prefix, if not, exit.
if !bytes.HasPrefix(key, prefix) {
done = true
return nil
}
// Wrap the value in a new wrapper to access the metadata.
wrapper, err := record.NewRawWrapper(b.name, string(key), value)
if err != nil {
return err
}
// Check if we have permission for this record.
if !wrapper.Meta().CheckPermission(local, internal) {
continue
}
// Check if record is already deleted.
if wrapper.Meta().IsDeleted() {
continue
}
// Check if the query matches this record.
if !q.MatchesRecord(wrapper) {
continue
}
// Delete record.
if shadowDelete {
// Shadow delete.
wrapper.Meta().Delete()
deleted, err := wrapper.MarshalRecord(wrapper)
if err != nil {
return err
}
err = bucket.Put(key, deleted)
if err != nil {
return err
}
// Cursor repositioning is required after modifying data.
// While the documentation states that this is also required after a
// delete, this actually makes the cursor skip a record with the
// following c.Next() call of the loop.
// Docs/Issue: https://github.com/boltdb/bolt/issues/426#issuecomment-141982984
c.Seek(key)
} else {
// Immediate delete.
err = c.Delete()
if err != nil {
return err
}
}
// Work in batches of 1000 changes in order to enable other operations in between.
cnt++
if cnt%1000 == 0 {
return nil
}
}
done = true
return nil
})
if err != nil {
return cnt, err
}
}
return cnt, nil
}
// Shutdown shuts down the database.
func (b *BBolt) Shutdown() error {
return b.db.Close()
}

View File

@@ -0,0 +1,206 @@
package bbolt
import (
"context"
"os"
"reflect"
"sync"
"testing"
"time"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/database/storage"
)
var (
// Compile time interface checks.
_ storage.Interface = &BBolt{}
_ storage.Batcher = &BBolt{}
_ storage.Purger = &BBolt{}
)
type TestRecord struct { //nolint:maligned
record.Base
sync.Mutex
S string
I int
I8 int8
I16 int16
I32 int32
I64 int64
UI uint
UI8 uint8
UI16 uint16
UI32 uint32
UI64 uint64
F32 float32
F64 float64
B bool
}
func TestBBolt(t *testing.T) {
t.Parallel()
testDir, err := os.MkdirTemp("", "testing-")
if err != nil {
t.Fatal(err)
}
defer func() {
_ = os.RemoveAll(testDir) // clean up
}()
// start
db, err := NewBBolt("test", testDir)
if err != nil {
t.Fatal(err)
}
a := &TestRecord{
S: "banana",
I: 42,
I8: 42,
I16: 42,
I32: 42,
I64: 42,
UI: 42,
UI8: 42,
UI16: 42,
UI32: 42,
UI64: 42,
F32: 42.42,
F64: 42.42,
B: true,
}
a.SetMeta(&record.Meta{})
a.Meta().Update()
a.SetKey("test:A")
// put record
_, err = db.Put(a)
if err != nil {
t.Fatal(err)
}
// get and compare
r1, err := db.Get("A")
if err != nil {
t.Fatal(err)
}
a1 := &TestRecord{}
err = record.Unwrap(r1, a1)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(a, a1) {
t.Fatalf("mismatch, got %v", a1)
}
// setup query test records
qA := &TestRecord{}
qA.SetKey("test:path/to/A")
qA.CreateMeta()
qB := &TestRecord{}
qB.SetKey("test:path/to/B")
qB.CreateMeta()
qC := &TestRecord{}
qC.SetKey("test:path/to/C")
qC.CreateMeta()
qZ := &TestRecord{}
qZ.SetKey("test:z")
qZ.CreateMeta()
// put
_, err = db.Put(qA)
if err == nil {
_, err = db.Put(qB)
}
if err == nil {
_, err = db.Put(qC)
}
if err == nil {
_, err = db.Put(qZ)
}
if err != nil {
t.Fatal(err)
}
// test query
q := query.New("test:path/to/").MustBeValid()
it, err := db.Query(q, true, true)
if err != nil {
t.Fatal(err)
}
cnt := 0
for range it.Next {
cnt++
}
if it.Err() != nil {
t.Fatal(it.Err())
}
if cnt != 3 {
t.Fatalf("unexpected query result count: %d", cnt)
}
// delete
err = db.Delete("A")
if err != nil {
t.Fatal(err)
}
// check if its gone
_, err = db.Get("A")
if err == nil {
t.Fatal("should fail")
}
// maintenance
err = db.MaintainRecordStates(context.TODO(), time.Now(), true)
if err != nil {
t.Fatal(err)
}
// maintenance
err = db.MaintainRecordStates(context.TODO(), time.Now(), false)
if err != nil {
t.Fatal(err)
}
// purging
purger, ok := db.(storage.Purger)
if ok {
n, err := purger.Purge(context.TODO(), query.New("test:path/to/").MustBeValid(), true, true, false)
if err != nil {
t.Fatal(err)
}
if n != 3 {
t.Fatalf("unexpected purge delete count: %d", n)
}
} else {
t.Fatal("should implement Purger")
}
// test query
q = query.New("test").MustBeValid()
it, err = db.Query(q, true, true)
if err != nil {
t.Fatal(err)
}
cnt = 0
for range it.Next {
cnt++
}
if it.Err() != nil {
t.Fatal(it.Err())
}
if cnt != 1 {
t.Fatalf("unexpected query result count: %d", cnt)
}
// shutdown
err = db.Shutdown()
if err != nil {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,8 @@
package storage
import "errors"
// Errors for storages.
var (
ErrNotFound = errors.New("storage entry not found")
)

View File

@@ -0,0 +1,302 @@
/*
Package fstree provides a dead simple file-based database storage backend.
It is primarily meant for easy testing or storing big files that can easily be accesses directly, without datastore.
*/
package fstree
import (
"context"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/safing/portmaster/base/database/iterator"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/database/storage"
"github.com/safing/portmaster/base/utils/renameio"
)
const (
defaultFileMode = os.FileMode(0o0644)
defaultDirMode = os.FileMode(0o0755)
onWindows = runtime.GOOS == "windows"
)
// FSTree database storage.
type FSTree struct {
name string
basePath string
}
func init() {
_ = storage.Register("fstree", NewFSTree)
}
// NewFSTree returns a (new) FSTree database.
func NewFSTree(name, location string) (storage.Interface, error) {
basePath, err := filepath.Abs(location)
if err != nil {
return nil, fmt.Errorf("fstree: failed to validate path %s: %w", location, err)
}
file, err := os.Stat(basePath)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
err = os.MkdirAll(basePath, defaultDirMode)
if err != nil {
return nil, fmt.Errorf("fstree: failed to create directory %s: %w", basePath, err)
}
} else {
return nil, fmt.Errorf("fstree: failed to stat path %s: %w", basePath, err)
}
} else {
if !file.IsDir() {
return nil, fmt.Errorf("fstree: provided database path (%s) is a file", basePath)
}
}
return &FSTree{
name: name,
basePath: basePath,
}, nil
}
func (fst *FSTree) buildFilePath(key string, checkKeyLength bool) (string, error) {
// check key length
if checkKeyLength && len(key) < 1 {
return "", fmt.Errorf("fstree: key too short: %s", key)
}
// build filepath
dstPath := filepath.Join(fst.basePath, key) // Join also calls Clean()
if !strings.HasPrefix(dstPath, fst.basePath) {
return "", fmt.Errorf("fstree: key integrity check failed, compiled path is %s", dstPath)
}
// return
return dstPath, nil
}
// Get returns a database record.
func (fst *FSTree) Get(key string) (record.Record, error) {
dstPath, err := fst.buildFilePath(key, true)
if err != nil {
return nil, err
}
data, err := os.ReadFile(dstPath)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil, storage.ErrNotFound
}
return nil, fmt.Errorf("fstree: failed to read file %s: %w", dstPath, err)
}
r, err := record.NewRawWrapper(fst.name, key, data)
if err != nil {
return nil, err
}
return r, nil
}
// GetMeta returns the metadata of a database record.
func (fst *FSTree) GetMeta(key string) (*record.Meta, error) {
// TODO: Replace with more performant variant.
r, err := fst.Get(key)
if err != nil {
return nil, err
}
return r.Meta(), nil
}
// Put stores a record in the database.
func (fst *FSTree) Put(r record.Record) (record.Record, error) {
dstPath, err := fst.buildFilePath(r.DatabaseKey(), true)
if err != nil {
return nil, err
}
data, err := r.MarshalRecord(r)
if err != nil {
return nil, err
}
err = writeFile(dstPath, data, defaultFileMode)
if err != nil {
// create dir and try again
err = os.MkdirAll(filepath.Dir(dstPath), defaultDirMode)
if err != nil {
return nil, fmt.Errorf("fstree: failed to create directory %s: %w", filepath.Dir(dstPath), err)
}
err = writeFile(dstPath, data, defaultFileMode)
if err != nil {
return nil, fmt.Errorf("fstree: could not write file %s: %w", dstPath, err)
}
}
return r, nil
}
// Delete deletes a record from the database.
func (fst *FSTree) Delete(key string) error {
dstPath, err := fst.buildFilePath(key, true)
if err != nil {
return err
}
// remove entry
err = os.Remove(dstPath)
if err != nil {
return fmt.Errorf("fstree: could not delete %s: %w", dstPath, err)
}
return nil
}
// Query returns a an iterator for the supplied query.
func (fst *FSTree) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %w", err)
}
walkPrefix, err := fst.buildFilePath(q.DatabaseKeyPrefix(), false)
if err != nil {
return nil, err
}
fileInfo, err := os.Stat(walkPrefix)
var walkRoot string
switch {
case err == nil && fileInfo.IsDir():
walkRoot = walkPrefix
case err == nil:
walkRoot = filepath.Dir(walkPrefix)
case errors.Is(err, fs.ErrNotExist):
walkRoot = filepath.Dir(walkPrefix)
default: // err != nil
return nil, fmt.Errorf("fstree: could not stat query root %s: %w", walkPrefix, err)
}
queryIter := iterator.New()
go fst.queryExecutor(walkRoot, queryIter, q, local, internal)
return queryIter, nil
}
func (fst *FSTree) queryExecutor(walkRoot string, queryIter *iterator.Iterator, q *query.Query, local, internal bool) {
err := filepath.Walk(walkRoot, func(path string, info os.FileInfo, err error) error {
if err != nil {
return fmt.Errorf("fstree: error in walking fs: %w", err)
}
if info.IsDir() {
// skip dir if not in scope
if !strings.HasPrefix(path, fst.basePath) {
return filepath.SkipDir
}
// continue
return nil
}
// still in scope?
if !strings.HasPrefix(path, fst.basePath) {
return nil
}
// read file
data, err := os.ReadFile(path)
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
return nil
}
return fmt.Errorf("fstree: failed to read file %s: %w", path, err)
}
// parse
key, err := filepath.Rel(fst.basePath, path)
if err != nil {
return fmt.Errorf("fstree: failed to extract key from filepath %s: %w", path, err)
}
r, err := record.NewRawWrapper(fst.name, key, data)
if err != nil {
return fmt.Errorf("fstree: failed to load file %s: %w", path, err)
}
if !r.Meta().CheckValidity() {
// record is not valid
return nil
}
if !r.Meta().CheckPermission(local, internal) {
// no permission to access
return nil
}
// check if matches, then send
if q.MatchesRecord(r) {
select {
case queryIter.Next <- r:
case <-queryIter.Done:
case <-time.After(1 * time.Second):
return errors.New("fstree: query buffer full, timeout")
}
}
return nil
})
queryIter.Finish(err)
}
// ReadOnly returns whether the database is read only.
func (fst *FSTree) ReadOnly() bool {
return false
}
// Injected returns whether the database is injected.
func (fst *FSTree) Injected() bool {
return false
}
// MaintainRecordStates maintains records states in the database.
func (fst *FSTree) MaintainRecordStates(ctx context.Context, purgeDeletedBefore time.Time, shadowDelete bool) error {
// TODO: implement MaintainRecordStates
return nil
}
// Shutdown shuts down the database.
func (fst *FSTree) Shutdown() error {
return nil
}
// writeFile mirrors os.WriteFile, replacing an existing file with the same
// name atomically. This is not atomic on Windows, but still an improvement.
// TODO: Replace with github.com/google/renamio.WriteFile as soon as it is fixed on Windows.
// TODO: This has become a wont-fix. Explore other options.
// This function is forked from https://github.com/google/renameio/blob/a368f9987532a68a3d676566141654a81aa8100b/writefile.go.
func writeFile(filename string, data []byte, perm os.FileMode) error {
t, err := renameio.TempFile("", filename)
if err != nil {
return err
}
defer t.Cleanup() //nolint:errcheck
// Set permissions before writing data, in case the data is sensitive.
if !onWindows {
if err := t.Chmod(perm); err != nil {
return err
}
}
if _, err := t.Write(data); err != nil {
return err
}
return t.CloseAtomicallyReplace()
}

View File

@@ -0,0 +1,6 @@
package fstree
import "github.com/safing/portmaster/base/database/storage"
// Compile time interface checks.
var _ storage.Interface = &FSTree{}

View File

@@ -0,0 +1,216 @@
package hashmap
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/safing/portmaster/base/database/iterator"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/database/storage"
)
// HashMap storage.
type HashMap struct {
name string
db map[string]record.Record
dbLock sync.RWMutex
}
func init() {
_ = storage.Register("hashmap", NewHashMap)
}
// NewHashMap creates a hashmap database.
func NewHashMap(name, location string) (storage.Interface, error) {
return &HashMap{
name: name,
db: make(map[string]record.Record),
}, nil
}
// Get returns a database record.
func (hm *HashMap) Get(key string) (record.Record, error) {
hm.dbLock.RLock()
defer hm.dbLock.RUnlock()
r, ok := hm.db[key]
if !ok {
return nil, storage.ErrNotFound
}
return r, nil
}
// GetMeta returns the metadata of a database record.
func (hm *HashMap) GetMeta(key string) (*record.Meta, error) {
// TODO: Replace with more performant variant.
r, err := hm.Get(key)
if err != nil {
return nil, err
}
return r.Meta(), nil
}
// Put stores a record in the database.
func (hm *HashMap) Put(r record.Record) (record.Record, error) {
hm.dbLock.Lock()
defer hm.dbLock.Unlock()
hm.db[r.DatabaseKey()] = r
return r, nil
}
// PutMany stores many records in the database.
func (hm *HashMap) PutMany(shadowDelete bool) (chan<- record.Record, <-chan error) {
hm.dbLock.Lock()
defer hm.dbLock.Unlock()
// we could lock for every record, but we want to have the same behaviour
// as the other storage backends, especially for testing.
batch := make(chan record.Record, 100)
errs := make(chan error, 1)
// start handler
go func() {
for r := range batch {
hm.batchPutOrDelete(shadowDelete, r)
}
errs <- nil
}()
return batch, errs
}
func (hm *HashMap) batchPutOrDelete(shadowDelete bool, r record.Record) {
r.Lock()
defer r.Unlock()
hm.dbLock.Lock()
defer hm.dbLock.Unlock()
if !shadowDelete && r.Meta().IsDeleted() {
delete(hm.db, r.DatabaseKey())
} else {
hm.db[r.DatabaseKey()] = r
}
}
// Delete deletes a record from the database.
func (hm *HashMap) Delete(key string) error {
hm.dbLock.Lock()
defer hm.dbLock.Unlock()
delete(hm.db, key)
return nil
}
// Query returns a an iterator for the supplied query.
func (hm *HashMap) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
_, err := q.Check()
if err != nil {
return nil, fmt.Errorf("invalid query: %w", err)
}
queryIter := iterator.New()
go hm.queryExecutor(queryIter, q, local, internal)
return queryIter, nil
}
func (hm *HashMap) queryExecutor(queryIter *iterator.Iterator, q *query.Query, local, internal bool) {
hm.dbLock.RLock()
defer hm.dbLock.RUnlock()
var err error
mapLoop:
for key, record := range hm.db {
record.Lock()
if !q.MatchesKey(key) ||
!q.MatchesRecord(record) ||
!record.Meta().CheckValidity() ||
!record.Meta().CheckPermission(local, internal) {
record.Unlock()
continue
}
record.Unlock()
select {
case <-queryIter.Done:
break mapLoop
case queryIter.Next <- record:
default:
select {
case <-queryIter.Done:
break mapLoop
case queryIter.Next <- record:
case <-time.After(1 * time.Second):
err = errors.New("query timeout")
break mapLoop
}
}
}
queryIter.Finish(err)
}
// ReadOnly returns whether the database is read only.
func (hm *HashMap) ReadOnly() bool {
return false
}
// Injected returns whether the database is injected.
func (hm *HashMap) Injected() bool {
return false
}
// MaintainRecordStates maintains records states in the database.
func (hm *HashMap) MaintainRecordStates(ctx context.Context, purgeDeletedBefore time.Time, shadowDelete bool) error {
hm.dbLock.Lock()
defer hm.dbLock.Unlock()
now := time.Now().Unix()
purgeThreshold := purgeDeletedBefore.Unix()
for key, record := range hm.db {
// check if context is cancelled
select {
case <-ctx.Done():
return nil
default:
}
meta := record.Meta()
switch {
case meta.Deleted == 0 && meta.Expires > 0 && meta.Expires < now:
if shadowDelete {
// mark as deleted
record.Lock()
meta.Deleted = meta.Expires
record.Unlock()
continue
}
// Immediately delete expired entries if shadowDelete is disabled.
fallthrough
case meta.Deleted > 0 && (!shadowDelete || meta.Deleted < purgeThreshold):
// delete from storage
delete(hm.db, key)
}
}
return nil
}
// Shutdown shuts down the database.
func (hm *HashMap) Shutdown() error {
return nil
}

View File

@@ -0,0 +1,145 @@
package hashmap
import (
"reflect"
"sync"
"testing"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/database/storage"
)
var (
// Compile time interface checks.
_ storage.Interface = &HashMap{}
_ storage.Batcher = &HashMap{}
)
type TestRecord struct { //nolint:maligned
record.Base
sync.Mutex
S string
I int
I8 int8
I16 int16
I32 int32
I64 int64
UI uint
UI8 uint8
UI16 uint16
UI32 uint32
UI64 uint64
F32 float32
F64 float64
B bool
}
func TestHashMap(t *testing.T) {
t.Parallel()
// start
db, err := NewHashMap("test", "")
if err != nil {
t.Fatal(err)
}
a := &TestRecord{
S: "banana",
I: 42,
I8: 42,
I16: 42,
I32: 42,
I64: 42,
UI: 42,
UI8: 42,
UI16: 42,
UI32: 42,
UI64: 42,
F32: 42.42,
F64: 42.42,
B: true,
}
a.SetMeta(&record.Meta{})
a.Meta().Update()
a.SetKey("test:A")
// put record
_, err = db.Put(a)
if err != nil {
t.Fatal(err)
}
// get and compare
a1, err := db.Get("A")
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(a, a1) {
t.Fatalf("mismatch, got %v", a1)
}
// setup query test records
qA := &TestRecord{}
qA.SetKey("test:path/to/A")
qA.CreateMeta()
qB := &TestRecord{}
qB.SetKey("test:path/to/B")
qB.CreateMeta()
qC := &TestRecord{}
qC.SetKey("test:path/to/C")
qC.CreateMeta()
qZ := &TestRecord{}
qZ.SetKey("test:z")
qZ.CreateMeta()
// put
_, err = db.Put(qA)
if err == nil {
_, err = db.Put(qB)
}
if err == nil {
_, err = db.Put(qC)
}
if err == nil {
_, err = db.Put(qZ)
}
if err != nil {
t.Fatal(err)
}
// test query
q := query.New("test:path/to/").MustBeValid()
it, err := db.Query(q, true, true)
if err != nil {
t.Fatal(err)
}
cnt := 0
for range it.Next {
cnt++
}
if it.Err() != nil {
t.Fatal(it.Err())
}
if cnt != 3 {
t.Fatalf("unexpected query result count: %d", cnt)
}
// delete
err = db.Delete("A")
if err != nil {
t.Fatal(err)
}
// check if its gone
_, err = db.Get("A")
if err == nil {
t.Fatal("should fail")
}
// shutdown
err = db.Shutdown()
if err != nil {
t.Fatal(err)
}
}

View File

@@ -0,0 +1,60 @@
package storage
import (
"context"
"errors"
"time"
"github.com/safing/portmaster/base/database/iterator"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/database/record"
)
// ErrNotImplemented is returned when a function is not implemented by a storage.
var ErrNotImplemented = errors.New("not implemented")
// InjectBase is a dummy base structure to reduce boilerplate code for injected storage interfaces.
type InjectBase struct{}
// Compile time interface check.
var _ Interface = &InjectBase{}
// Get returns a database record.
func (i *InjectBase) Get(key string) (record.Record, error) {
return nil, ErrNotImplemented
}
// Put stores a record in the database.
func (i *InjectBase) Put(m record.Record) (record.Record, error) {
return nil, ErrNotImplemented
}
// Delete deletes a record from the database.
func (i *InjectBase) Delete(key string) error {
return ErrNotImplemented
}
// Query returns a an iterator for the supplied query.
func (i *InjectBase) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
return nil, ErrNotImplemented
}
// ReadOnly returns whether the database is read only.
func (i *InjectBase) ReadOnly() bool {
return true
}
// Injected returns whether the database is injected.
func (i *InjectBase) Injected() bool {
return true
}
// MaintainRecordStates maintains records states in the database.
func (i *InjectBase) MaintainRecordStates(ctx context.Context, purgeDeletedBefore time.Time, shadowDelete bool) error {
return nil
}
// Shutdown shuts down the database.
func (i *InjectBase) Shutdown() error {
return nil
}

View File

@@ -0,0 +1,48 @@
package storage
import (
"context"
"time"
"github.com/safing/portmaster/base/database/iterator"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/database/record"
)
// Interface defines the database storage API.
type Interface interface {
// Primary Interface
Get(key string) (record.Record, error)
Put(m record.Record) (record.Record, error)
Delete(key string) error
Query(q *query.Query, local, internal bool) (*iterator.Iterator, error)
// Information and Control
ReadOnly() bool
Injected() bool
Shutdown() error
// Mandatory Record Maintenance
MaintainRecordStates(ctx context.Context, purgeDeletedBefore time.Time, shadowDelete bool) error
}
// MetaHandler defines the database storage API for backends that support optimized fetching of only the metadata.
type MetaHandler interface {
GetMeta(key string) (*record.Meta, error)
}
// Maintainer defines the database storage API for backends that require regular maintenance.
type Maintainer interface {
Maintain(ctx context.Context) error
MaintainThorough(ctx context.Context) error
}
// Batcher defines the database storage API for backends that support batch operations.
type Batcher interface {
PutMany(shadowDelete bool) (batch chan<- record.Record, errs <-chan error)
}
// Purger defines the database storage API for backends that support the purge operation.
type Purger interface {
Purge(ctx context.Context, q *query.Query, local, internal, shadowDelete bool) (int, error)
}

View File

@@ -0,0 +1,111 @@
package sinkhole
import (
"context"
"errors"
"time"
"github.com/safing/portmaster/base/database/iterator"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/database/storage"
)
// Sinkhole is a dummy storage.
type Sinkhole struct {
name string
}
var (
// Compile time interface checks.
_ storage.Interface = &Sinkhole{}
_ storage.Maintainer = &Sinkhole{}
_ storage.Batcher = &Sinkhole{}
)
func init() {
_ = storage.Register("sinkhole", NewSinkhole)
}
// NewSinkhole creates a dummy database.
func NewSinkhole(name, location string) (storage.Interface, error) {
return &Sinkhole{
name: name,
}, nil
}
// Exists returns whether an entry with the given key exists.
func (s *Sinkhole) Exists(key string) (bool, error) {
return false, nil
}
// Get returns a database record.
func (s *Sinkhole) Get(key string) (record.Record, error) {
return nil, storage.ErrNotFound
}
// GetMeta returns the metadata of a database record.
func (s *Sinkhole) GetMeta(key string) (*record.Meta, error) {
return nil, storage.ErrNotFound
}
// Put stores a record in the database.
func (s *Sinkhole) Put(r record.Record) (record.Record, error) {
return r, nil
}
// PutMany stores many records in the database.
func (s *Sinkhole) PutMany(shadowDelete bool) (chan<- record.Record, <-chan error) {
batch := make(chan record.Record, 100)
errs := make(chan error, 1)
// start handler
go func() {
for range batch {
// discard everything
}
errs <- nil
}()
return batch, errs
}
// Delete deletes a record from the database.
func (s *Sinkhole) Delete(key string) error {
return nil
}
// Query returns a an iterator for the supplied query.
func (s *Sinkhole) Query(q *query.Query, local, internal bool) (*iterator.Iterator, error) {
return nil, errors.New("query not implemented by sinkhole")
}
// ReadOnly returns whether the database is read only.
func (s *Sinkhole) ReadOnly() bool {
return false
}
// Injected returns whether the database is injected.
func (s *Sinkhole) Injected() bool {
return false
}
// Maintain runs a light maintenance operation on the database.
func (s *Sinkhole) Maintain(ctx context.Context) error {
return nil
}
// MaintainThorough runs a thorough maintenance operation on the database.
func (s *Sinkhole) MaintainThorough(ctx context.Context) error {
return nil
}
// MaintainRecordStates maintains records states in the database.
func (s *Sinkhole) MaintainRecordStates(ctx context.Context, purgeDeletedBefore time.Time, shadowDelete bool) error {
return nil
}
// Shutdown shuts down the database.
func (s *Sinkhole) Shutdown() error {
return nil
}

View File

@@ -0,0 +1,47 @@
package storage
import (
"errors"
"fmt"
"sync"
)
// A Factory creates a new database of it's type.
type Factory func(name, location string) (Interface, error)
var (
storages = make(map[string]Factory)
storagesLock sync.Mutex
)
// Register registers a new storage type.
func Register(name string, factory Factory) error {
storagesLock.Lock()
defer storagesLock.Unlock()
_, ok := storages[name]
if ok {
return errors.New("factory for this type already exists")
}
storages[name] = factory
return nil
}
// CreateDatabase starts a new database with the given name and storageType at location.
func CreateDatabase(name, storageType, location string) (Interface, error) {
return nil, nil
}
// StartDatabase starts a new database with the given name and storageType at location.
func StartDatabase(name, storageType, location string) (Interface, error) {
storagesLock.Lock()
defer storagesLock.Unlock()
factory, ok := storages[storageType]
if !ok {
return nil, fmt.Errorf("storage type %s not registered", storageType)
}
return factory(name, location)
}