Restructure modules (#1572)

* Move portbase into monorepo

* Add new simple module mgr

* [WIP] Switch to new simple module mgr

* Add StateMgr and more worker variants

* [WIP] Switch more modules

* [WIP] Switch more modules

* [WIP] swtich more modules

* [WIP] switch all SPN modules

* [WIP] switch all service modules

* [WIP] Convert all workers to the new module system

* [WIP] add new task system to module manager

* [WIP] Add second take for scheduling workers

* [WIP] Add FIXME for bugs in new scheduler

* [WIP] Add minor improvements to scheduler

* [WIP] Add new worker scheduler

* [WIP] Fix more bug related to new module system

* [WIP] Fix start handing of the new module system

* [WIP] Improve startup process

* [WIP] Fix minor issues

* [WIP] Fix missing subsystem in settings

* [WIP] Initialize managers in constructor

* [WIP] Move module event initialization to constrictors

* [WIP] Fix setting for enabling and disabling the SPN module

* [WIP] Move API registeration into module construction

* [WIP] Update states mgr for all modules

* [WIP] Add CmdLine operation support

* Add state helper methods to module group and instance

* Add notification and module status handling to status package

* Fix starting issues

* Remove pilot widget and update security lock to new status data

* Remove debug logs

* Improve http server shutdown

* Add workaround for cleanly shutting down firewall+netquery

* Improve logging

* Add syncing states with notifications for new module system

* Improve starting, stopping, shutdown; resolve FIXMEs/TODOs

* [WIP] Fix most unit tests

* Review new module system and fix minor issues

* Push shutdown and restart events again via API

* Set sleep mode via interface

* Update example/template module

* [WIP] Fix spn/cabin unit test

* Remove deprecated UI elements

* Make log output more similar for the logging transition phase

* Switch spn hub and observer cmds to new module system

* Fix log sources

* Make worker mgr less error prone

* Fix tests and minor issues

* Fix observation hub

* Improve shutdown and restart handling

* Split up big connection.go source file

* Move varint and dsd packages to structures repo

* Improve expansion test

* Fix linter warnings

* Fix interception module on windows

* Fix linter errors

---------

Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
This commit is contained in:
Daniel Hååvi
2024-08-09 17:15:48 +02:00
committed by GitHub
parent 10a77498f4
commit 80664d1a27
647 changed files with 37690 additions and 3366 deletions

View File

@@ -1,36 +1,69 @@
package status
import (
"context"
"errors"
"fmt"
"sync"
"sync/atomic"
"github.com/safing/portbase/modules"
"github.com/safing/portbase/utils/debug"
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/base/runtime"
"github.com/safing/portmaster/base/utils/debug"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/netenv"
)
var module *modules.Module
// Status Module manages status information.
type Status struct {
mgr *mgr.Manager
instance instance
func init() {
module = modules.Register("status", nil, start, nil, "base", "config")
publishUpdate runtime.PushFunc
triggerUpdate chan struct{}
states map[string]mgr.StateUpdate
statesLock sync.Mutex
notifications map[string]map[string]*notifications.Notification
notificationsLock sync.Mutex
}
func start() error {
if err := setupRuntimeProvider(); err != nil {
// Manager returns the module manager.
func (s *Status) Manager() *mgr.Manager {
return s.mgr
}
// Start starts the module.
func (s *Status) Start() error {
if err := s.setupRuntimeProvider(); err != nil {
return err
}
if err := module.RegisterEventHook(
netenv.ModuleName,
netenv.OnlineStatusChangedEvent,
"update online status in system status",
func(_ context.Context, _ interface{}) error {
pushSystemStatus()
return nil
s.mgr.Go("status publisher", s.statusPublisher)
s.instance.NetEnv().EventOnlineStatusChange.AddCallback("update online status in system status",
func(_ *mgr.WorkerCtx, _ netenv.OnlineStatus) (bool, error) {
s.triggerPublishStatus()
return false, nil
},
); err != nil {
return err
)
// Make an initial status query.
s.statesLock.Lock()
defer s.statesLock.Unlock()
// Add status callback within the lock so we can force the right order.
s.instance.AddStatesCallback("status update", s.handleModuleStatusUpdate)
// Get initial states.
for _, stateUpdate := range s.instance.GetStates() {
s.states[stateUpdate.Module] = stateUpdate
s.deriveNotificationsFromStateUpdate(stateUpdate)
}
return nil
}
// Stop stops the module.
func (s *Status) Stop() error {
return nil
}
@@ -43,3 +76,31 @@ func AddToDebugInfo(di *debug.Info) {
"CaptivePortal: "+netenv.GetCaptivePortal().URL,
)
}
var (
module *Status
shimLoaded atomic.Bool
)
// New returns a new status module.
func New(instance instance) (*Status, error) {
if !shimLoaded.CompareAndSwap(false, true) {
return nil, errors.New("only one instance allowed")
}
m := mgr.New("Status")
module = &Status{
mgr: m,
instance: instance,
triggerUpdate: make(chan struct{}, 1),
states: make(map[string]mgr.StateUpdate),
notifications: make(map[string]map[string]*notifications.Notification),
}
return module, nil
}
type instance interface {
NetEnv() *netenv.NetEnv
GetStates() []mgr.StateUpdate
AddStatesCallback(callbackName string, callback mgr.EventCallbackFunc[mgr.StateUpdate])
}

View File

@@ -0,0 +1,74 @@
package status
import (
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/service/mgr"
)
func (s *Status) deriveNotificationsFromStateUpdate(update mgr.StateUpdate) {
s.notificationsLock.Lock()
defer s.notificationsLock.Unlock()
notifs := s.notifications[update.Module]
if notifs == nil {
notifs = make(map[string]*notifications.Notification)
s.notifications[update.Module] = notifs
}
// Add notifications.
seenStateIDs := make(map[string]struct{}, len(update.States))
for _, state := range update.States {
seenStateIDs[state.ID] = struct{}{}
// Check if we already have a notification registered.
if _, ok := notifs[state.ID]; ok {
continue
}
// Check if the notification was pre-created.
// If a matching notification is found, assign it.
n := notifications.Get(state.ID)
if n != nil {
notifs[state.ID] = n
continue
}
// Create a new notification.
n = &notifications.Notification{
EventID: state.ID,
Title: state.Name,
Message: state.Message,
AvailableActions: []*notifications.Action{
{
Text: "Get Help",
Type: notifications.ActionTypeOpenURL,
Payload: "https://safing.io/support/",
},
},
}
switch state.Type {
case mgr.StateTypeWarning:
n.Type = notifications.Warning
n.ShowOnSystem = true
case mgr.StateTypeError:
n.Type = notifications.Error
n.ShowOnSystem = true
case mgr.StateTypeHint, mgr.StateTypeUndefined:
fallthrough
default:
n.Type = notifications.Info
n.AvailableActions = nil
}
notifs[state.ID] = n
notifications.Notify(n)
}
// Remove notifications.
for stateID, n := range notifs {
if _, ok := seenStateIDs[stateID]; !ok {
n.Delete()
delete(notifs, stateID)
}
}
}

View File

@@ -1,49 +0,0 @@
package status
import (
"github.com/safing/portbase/database/record"
"github.com/safing/portbase/runtime"
"github.com/safing/portmaster/service/netenv"
)
var pushUpdate runtime.PushFunc
func setupRuntimeProvider() (err error) {
// register the system status getter
statusProvider := runtime.SimpleValueGetterFunc(func(_ string) ([]record.Record, error) {
return []record.Record{buildSystemStatus()}, nil
})
pushUpdate, err = runtime.Register("system/status", statusProvider)
if err != nil {
return err
}
return nil
}
// buildSystemStatus build a new system status record.
func buildSystemStatus() *SystemStatusRecord {
status := &SystemStatusRecord{
CaptivePortal: netenv.GetCaptivePortal(),
OnlineStatus: netenv.GetOnlineStatus(),
}
status.CreateMeta()
status.SetKey("runtime:system/status")
return status
}
// pushSystemStatus pushes a new system status via
// the runtime database.
func pushSystemStatus() {
if pushUpdate == nil {
return
}
record := buildSystemStatus()
record.Lock()
defer record.Unlock()
pushUpdate(record)
}

View File

@@ -1,23 +0,0 @@
package status
import (
"sync"
"github.com/safing/portbase/database/record"
"github.com/safing/portmaster/service/netenv"
)
// SystemStatusRecord describes the overall status of the Portmaster.
// It's a read-only record exposed via runtime:system/status.
type SystemStatusRecord struct {
record.Base
sync.Mutex
// OnlineStatus holds the current online status as
// seen by the netenv package.
OnlineStatus netenv.OnlineStatus
// CaptivePortal holds all information about the captive
// portal of the network the portmaster is currently
// connected to, if any.
CaptivePortal *netenv.CaptivePortal
}

View File

@@ -1,6 +1,6 @@
package status
import "github.com/safing/portbase/config"
import "github.com/safing/portmaster/base/config"
// MigrateSecurityLevelToBoolean migrates a security level (int) option value to a boolean option value.
func MigrateSecurityLevelToBoolean(option *config.Option, value any) any {

122
service/status/status.go Normal file
View File

@@ -0,0 +1,122 @@
package status
import (
"slices"
"strings"
"sync"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/runtime"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/netenv"
)
// SystemStatusRecord describes the overall status of the Portmaster.
// It's a read-only record exposed via runtime:system/status.
type SystemStatusRecord struct {
record.Base
sync.Mutex
// OnlineStatus holds the current online status as
// seen by the netenv package.
OnlineStatus netenv.OnlineStatus
// CaptivePortal holds all information about the captive
// portal of the network the portmaster is currently
// connected to, if any.
CaptivePortal *netenv.CaptivePortal
Modules []mgr.StateUpdate
WorstState struct {
Module string
mgr.State
}
}
func (s *Status) handleModuleStatusUpdate(_ *mgr.WorkerCtx, update mgr.StateUpdate) (cancel bool, err error) {
s.statesLock.Lock()
defer s.statesLock.Unlock()
s.states[update.Module] = update
s.deriveNotificationsFromStateUpdate(update)
s.triggerPublishStatus()
return false, nil
}
func (s *Status) triggerPublishStatus() {
select {
case s.triggerUpdate <- struct{}{}:
default:
}
}
func (s *Status) statusPublisher(w *mgr.WorkerCtx) error {
for {
select {
case <-w.Done():
return nil
case <-s.triggerUpdate:
s.publishSystemStatus()
}
}
}
func (s *Status) setupRuntimeProvider() (err error) {
// register the system status getter
statusProvider := runtime.SimpleValueGetterFunc(func(_ string) ([]record.Record, error) {
return []record.Record{s.buildSystemStatus()}, nil
})
s.publishUpdate, err = runtime.Register("system/status", statusProvider)
if err != nil {
return err
}
return nil
}
// buildSystemStatus build a new system status record.
func (s *Status) buildSystemStatus() *SystemStatusRecord {
s.statesLock.Lock()
defer s.statesLock.Unlock()
status := &SystemStatusRecord{
CaptivePortal: netenv.GetCaptivePortal(),
OnlineStatus: netenv.GetOnlineStatus(),
Modules: make([]mgr.StateUpdate, 0, len(s.states)),
}
for _, newStateUpdate := range s.states {
// Deep copy state.
newStateUpdate.States = append([]mgr.State(nil), newStateUpdate.States...)
status.Modules = append(status.Modules, newStateUpdate)
// Check if state is worst so far.
for _, state := range newStateUpdate.States {
if state.Type.Severity() > status.WorstState.Type.Severity() {
s.mgr.Error("new worst state", "state", state)
status.WorstState.State = state
status.WorstState.Module = newStateUpdate.Module
}
}
}
slices.SortFunc(status.Modules, func(a, b mgr.StateUpdate) int {
return strings.Compare(a.Module, b.Module)
})
status.CreateMeta()
status.SetKey("runtime:system/status")
return status
}
// publishSystemStatus pushes a new system status via
// the runtime database.
func (s *Status) publishSystemStatus() {
if s.publishUpdate == nil {
return
}
record := s.buildSystemStatus()
record.Lock()
defer record.Unlock()
s.publishUpdate(record)
}