wip: migrate to mono-repo. SPN has already been moved to spn/
This commit is contained in:
68
spn/captain/api.go
Normal file
68
spn/captain/api.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/safing/portbase/api"
|
||||
"github.com/safing/portbase/database"
|
||||
"github.com/safing/portbase/database/query"
|
||||
"github.com/safing/portbase/modules"
|
||||
)
|
||||
|
||||
const (
|
||||
apiPathForSPNReInit = "spn/reinit"
|
||||
)
|
||||
|
||||
func registerAPIEndpoints() error {
|
||||
if err := api.RegisterEndpoint(api.Endpoint{
|
||||
Path: apiPathForSPNReInit,
|
||||
Write: api.PermitAdmin,
|
||||
// BelongsTo: module, // Do not attach to module, as this must run outside of the module.
|
||||
ActionFunc: handleReInit,
|
||||
Name: "Re-initialize SPN",
|
||||
Description: "Stops the SPN, resets all caches and starts it again. The SPN account and settings are not changed.",
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleReInit(ar *api.Request) (msg string, err error) {
|
||||
// Disable module and check
|
||||
changed := module.Disable()
|
||||
if !changed {
|
||||
return "", errors.New("can only re-initialize when the SPN is enabled")
|
||||
}
|
||||
|
||||
// Run module manager.
|
||||
err = modules.ManageModules()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to stop SPN: %w", err)
|
||||
}
|
||||
|
||||
// Delete SPN cache.
|
||||
db := database.NewInterface(&database.Options{
|
||||
Local: true,
|
||||
Internal: true,
|
||||
})
|
||||
deletedRecords, err := db.Purge(ar.Context(), query.New("cache:spn/"))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to delete SPN cache: %w", err)
|
||||
}
|
||||
|
||||
// Enable module.
|
||||
module.Enable()
|
||||
|
||||
// Run module manager.
|
||||
err = modules.ManageModules()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to start SPN after cache reset: %w", err)
|
||||
}
|
||||
|
||||
return fmt.Sprintf(
|
||||
"Completed SPN re-initialization and deleted %d cache records in the process.",
|
||||
deletedRecords,
|
||||
), nil
|
||||
}
|
||||
152
spn/captain/bootstrap.go
Normal file
152
spn/captain/bootstrap.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
|
||||
"github.com/safing/portbase/formats/dsd"
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/hub"
|
||||
"github.com/safing/portmaster/spn/navigator"
|
||||
)
|
||||
|
||||
// BootstrapFile is used for sideloading bootstrap data.
|
||||
type BootstrapFile struct {
|
||||
Main BootstrapFileEntry
|
||||
}
|
||||
|
||||
// BootstrapFileEntry is the bootstrap data structure for one map.
|
||||
type BootstrapFileEntry struct {
|
||||
Hubs []string
|
||||
}
|
||||
|
||||
var (
|
||||
bootstrapHubFlag string
|
||||
bootstrapFileFlag string
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&bootstrapHubFlag, "bootstrap-hub", "", "transport address of hub for bootstrapping with the hub ID in the fragment")
|
||||
flag.StringVar(&bootstrapFileFlag, "bootstrap-file", "", "bootstrap file containing bootstrap hubs - will be initialized if running a public hub and it doesn't exist")
|
||||
}
|
||||
|
||||
// prepBootstrapHubFlag checks the bootstrap-hub argument if it is valid.
|
||||
func prepBootstrapHubFlag() error {
|
||||
if bootstrapHubFlag != "" {
|
||||
_, _, _, err := hub.ParseBootstrapHub(bootstrapHubFlag)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processBootstrapHubFlag processes the bootstrap-hub argument.
|
||||
func processBootstrapHubFlag() error {
|
||||
if bootstrapHubFlag != "" {
|
||||
return navigator.Main.AddBootstrapHubs([]string{bootstrapHubFlag})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processBootstrapFileFlag processes the bootstrap-file argument.
|
||||
func processBootstrapFileFlag() error {
|
||||
if bootstrapFileFlag == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := os.Stat(bootstrapFileFlag)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return createBootstrapFile(bootstrapFileFlag)
|
||||
}
|
||||
return fmt.Errorf("failed to access bootstrap hub file: %w", err)
|
||||
}
|
||||
|
||||
return loadBootstrapFile(bootstrapFileFlag)
|
||||
}
|
||||
|
||||
// bootstrapWithUpdates loads bootstrap hubs from the updates server and imports them.
|
||||
func bootstrapWithUpdates() error {
|
||||
if bootstrapFileFlag != "" {
|
||||
return errors.New("using the bootstrap-file argument disables bootstrapping via the update system")
|
||||
}
|
||||
|
||||
return updateSPNIntel(module.Ctx, nil)
|
||||
}
|
||||
|
||||
// loadBootstrapFile loads a file with bootstrap hub entries and imports them.
|
||||
func loadBootstrapFile(filename string) (err error) {
|
||||
// Load bootstrap file from disk and parse it.
|
||||
data, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load bootstrap file: %w", err)
|
||||
}
|
||||
bootstrapFile := &BootstrapFile{}
|
||||
_, err = dsd.Load(data, bootstrapFile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse bootstrap file: %w", err)
|
||||
}
|
||||
if len(bootstrapFile.Main.Hubs) == 0 {
|
||||
return errors.New("bootstrap holds no hubs for main map")
|
||||
}
|
||||
|
||||
// Add Hubs to map.
|
||||
err = navigator.Main.AddBootstrapHubs(bootstrapFile.Main.Hubs)
|
||||
if err == nil {
|
||||
log.Infof("spn/captain: loaded bootstrap file %s", filename)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// createBootstrapFile save a bootstrap hub file with an entry of the public identity.
|
||||
func createBootstrapFile(filename string) error {
|
||||
if !conf.PublicHub() {
|
||||
log.Infof("spn/captain: skipped writing a bootstrap hub file, as this is not a public hub")
|
||||
return nil
|
||||
}
|
||||
|
||||
// create bootstrap hub
|
||||
if len(publicIdentity.Hub.Info.Transports) == 0 {
|
||||
return errors.New("public identity has no transports available")
|
||||
}
|
||||
// parse first transport
|
||||
t, err := hub.ParseTransport(publicIdentity.Hub.Info.Transports[0])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse transport of public identity: %w", err)
|
||||
}
|
||||
// add IP address
|
||||
switch {
|
||||
case publicIdentity.Hub.Info.IPv4 != nil:
|
||||
t.Domain = publicIdentity.Hub.Info.IPv4.String()
|
||||
case publicIdentity.Hub.Info.IPv6 != nil:
|
||||
t.Domain = "[" + publicIdentity.Hub.Info.IPv6.String() + "]"
|
||||
default:
|
||||
return errors.New("public identity has no IP address available")
|
||||
}
|
||||
// add Hub ID
|
||||
t.Option = publicIdentity.Hub.ID
|
||||
// put together
|
||||
bs := &BootstrapFile{
|
||||
Main: BootstrapFileEntry{
|
||||
Hubs: []string{t.String()},
|
||||
},
|
||||
}
|
||||
|
||||
// serialize
|
||||
fileData, err := dsd.Dump(bs, dsd.JSON)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// save to disk
|
||||
err = os.WriteFile(filename, fileData, 0o0664) //nolint:gosec // Should be able to be read by others.
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Infof("spn/captain: created bootstrap file %s", filename)
|
||||
return nil
|
||||
}
|
||||
506
spn/captain/client.go
Normal file
506
spn/captain/client.go
Normal file
@@ -0,0 +1,506 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tevino/abool"
|
||||
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portbase/notifications"
|
||||
"github.com/safing/portmaster/service/netenv"
|
||||
"github.com/safing/portmaster/service/network/netutils"
|
||||
"github.com/safing/portmaster/spn/access"
|
||||
"github.com/safing/portmaster/spn/crew"
|
||||
"github.com/safing/portmaster/spn/docks"
|
||||
"github.com/safing/portmaster/spn/navigator"
|
||||
"github.com/safing/portmaster/spn/terminal"
|
||||
)
|
||||
|
||||
var (
|
||||
ready = abool.New()
|
||||
|
||||
spnLoginButton = notifications.Action{
|
||||
Text: "Login",
|
||||
Type: notifications.ActionTypeOpenPage,
|
||||
Payload: "spn",
|
||||
}
|
||||
spnOpenAccountPage = notifications.Action{
|
||||
Text: "Open Account Page",
|
||||
Type: notifications.ActionTypeOpenURL,
|
||||
Payload: "https://account.safing.io",
|
||||
}
|
||||
)
|
||||
|
||||
// ClientReady signifies if the SPN client is fully ready to handle connections.
|
||||
func ClientReady() bool {
|
||||
return ready.IsSet()
|
||||
}
|
||||
|
||||
type (
|
||||
clientComponentFunc func(ctx context.Context) clientComponentResult
|
||||
clientComponentResult uint8
|
||||
)
|
||||
|
||||
const (
|
||||
clientResultOk clientComponentResult = iota // Continue and clean module status.
|
||||
clientResultRetry // Go back to start of current step, don't clear module status.
|
||||
clientResultReconnect // Stop current connection and start from zero.
|
||||
clientResultShutdown // SPN Module is shutting down.
|
||||
)
|
||||
|
||||
var (
|
||||
clientNetworkChangedFlag = netenv.GetNetworkChangedFlag()
|
||||
clientIneligibleAccountUpdateDelay = 1 * time.Minute
|
||||
clientRetryConnectBackoffDuration = 5 * time.Second
|
||||
clientInitialHealthCheckDelay = 10 * time.Second
|
||||
clientHealthCheckTickDuration = 1 * time.Minute
|
||||
clientHealthCheckTickDurationSleepMode = 5 * time.Minute
|
||||
clientHealthCheckTimeout = 15 * time.Second
|
||||
|
||||
clientHealthCheckTrigger = make(chan struct{}, 1)
|
||||
lastHealthCheck time.Time
|
||||
)
|
||||
|
||||
func triggerClientHealthCheck() {
|
||||
select {
|
||||
case clientHealthCheckTrigger <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func clientManager(ctx context.Context) error {
|
||||
defer func() {
|
||||
ready.UnSet()
|
||||
netenv.ConnectedToSPN.UnSet()
|
||||
resetSPNStatus(StatusDisabled, true)
|
||||
module.Resolve("")
|
||||
clientStopHomeHub(ctx)
|
||||
}()
|
||||
|
||||
module.Hint(
|
||||
"spn:establishing-home-hub",
|
||||
"Connecting to SPN...",
|
||||
"Connecting to the SPN network is in progress.",
|
||||
)
|
||||
|
||||
// TODO: When we are starting and the SPN module is faster online than the
|
||||
// nameserver, then updating the account will fail as the DNS query is
|
||||
// redirected to a closed port.
|
||||
// We also can't add the nameserver as a module dependency, as the nameserver
|
||||
// is not part of the server.
|
||||
select {
|
||||
case <-time.After(1 * time.Second):
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
|
||||
healthCheckTicker := module.NewSleepyTicker(clientHealthCheckTickDuration, clientHealthCheckTickDurationSleepMode)
|
||||
|
||||
reconnect:
|
||||
for {
|
||||
// Check if we are shutting down.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
|
||||
// Reset SPN status.
|
||||
if ready.SetToIf(true, false) {
|
||||
netenv.ConnectedToSPN.UnSet()
|
||||
log.Info("spn/captain: client not ready")
|
||||
}
|
||||
resetSPNStatus(StatusConnecting, true)
|
||||
|
||||
// Check everything and connect to the SPN.
|
||||
for _, clientFunc := range []clientComponentFunc{
|
||||
clientStopHomeHub,
|
||||
clientCheckNetworkReady,
|
||||
clientCheckAccountAndTokens,
|
||||
clientConnectToHomeHub,
|
||||
clientSetActiveConnectionStatus,
|
||||
} {
|
||||
switch clientFunc(ctx) {
|
||||
case clientResultOk:
|
||||
// Continue
|
||||
case clientResultRetry, clientResultReconnect:
|
||||
// Wait for a short time to not loop too quickly.
|
||||
select {
|
||||
case <-time.After(clientRetryConnectBackoffDuration):
|
||||
continue reconnect
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
case clientResultShutdown:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("spn/captain: client is ready")
|
||||
ready.Set()
|
||||
netenv.ConnectedToSPN.Set()
|
||||
|
||||
module.TriggerEvent(SPNConnectedEvent, nil)
|
||||
module.StartWorker("update quick setting countries", navigator.Main.UpdateConfigQuickSettings)
|
||||
|
||||
// Reset last health check value, as we have just connected.
|
||||
lastHealthCheck = time.Now()
|
||||
|
||||
// Back off before starting initial health checks.
|
||||
select {
|
||||
case <-time.After(clientInitialHealthCheckDelay):
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
|
||||
for {
|
||||
// Check health of the current SPN connection and monitor the user status.
|
||||
maintainers:
|
||||
for _, clientFunc := range []clientComponentFunc{
|
||||
clientCheckHomeHubConnection,
|
||||
clientCheckAccountAndTokens,
|
||||
clientSetActiveConnectionStatus,
|
||||
} {
|
||||
switch clientFunc(ctx) {
|
||||
case clientResultOk:
|
||||
// Continue
|
||||
case clientResultRetry:
|
||||
// Abort and wait for the next run.
|
||||
break maintainers
|
||||
case clientResultReconnect:
|
||||
continue reconnect
|
||||
case clientResultShutdown:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Wait for signal to run maintenance again.
|
||||
select {
|
||||
case <-healthCheckTicker.Wait():
|
||||
case <-clientHealthCheckTrigger:
|
||||
case <-crew.ConnectErrors():
|
||||
case <-clientNetworkChangedFlag.Signal():
|
||||
clientNetworkChangedFlag.Refresh()
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func clientCheckNetworkReady(ctx context.Context) clientComponentResult {
|
||||
// Check if we are online enough for connecting.
|
||||
switch netenv.GetOnlineStatus() { //nolint:exhaustive
|
||||
case netenv.StatusOffline,
|
||||
netenv.StatusLimited:
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return clientResultShutdown
|
||||
case <-time.After(1 * time.Second):
|
||||
return clientResultRetry
|
||||
}
|
||||
}
|
||||
|
||||
return clientResultOk
|
||||
}
|
||||
|
||||
// DisableAccount disables using any account related SPN functionality.
|
||||
// Attempts to use the same will result in errors.
|
||||
var DisableAccount bool
|
||||
|
||||
func clientCheckAccountAndTokens(ctx context.Context) clientComponentResult {
|
||||
if DisableAccount {
|
||||
return clientResultOk
|
||||
}
|
||||
|
||||
// Get SPN user.
|
||||
user, err := access.GetUser()
|
||||
if err != nil && !errors.Is(err, access.ErrNotLoggedIn) {
|
||||
notifications.NotifyError(
|
||||
"spn:failed-to-get-user",
|
||||
"SPN Internal Error",
|
||||
`Please restart Portmaster.`,
|
||||
// TODO: Add restart button.
|
||||
// TODO: Use special UI restart action in order to reload UI on restart.
|
||||
).AttachToModule(module)
|
||||
resetSPNStatus(StatusFailed, true)
|
||||
log.Errorf("spn/captain: client internal error: %s", err)
|
||||
return clientResultReconnect
|
||||
}
|
||||
|
||||
// Check if user is logged in.
|
||||
if user == nil || !user.IsLoggedIn() {
|
||||
notifications.NotifyWarn(
|
||||
"spn:not-logged-in",
|
||||
"SPN Login Required",
|
||||
`Please log in to access the SPN.`,
|
||||
spnLoginButton,
|
||||
).AttachToModule(module)
|
||||
resetSPNStatus(StatusFailed, true)
|
||||
log.Warningf("spn/captain: enabled but not logged in")
|
||||
return clientResultReconnect
|
||||
}
|
||||
|
||||
// Check if user is eligible.
|
||||
if !user.MayUseTheSPN() {
|
||||
// Update user in case there was a change.
|
||||
// Only update here if we need to - there is an update task in the access
|
||||
// module for periodic updates.
|
||||
if time.Now().Add(-clientIneligibleAccountUpdateDelay).After(time.Unix(user.Meta().Modified, 0)) {
|
||||
_, _, err := access.UpdateUser()
|
||||
if err != nil {
|
||||
notifications.NotifyError(
|
||||
"spn:failed-to-update-user",
|
||||
"SPN Account Server Error",
|
||||
fmt.Sprintf(`The status of your SPN account could not be updated: %s`, err),
|
||||
).AttachToModule(module)
|
||||
resetSPNStatus(StatusFailed, true)
|
||||
log.Errorf("spn/captain: failed to update ineligible account: %s", err)
|
||||
return clientResultReconnect
|
||||
}
|
||||
}
|
||||
|
||||
// Check if user is eligible after a possible update.
|
||||
if !user.MayUseTheSPN() {
|
||||
|
||||
// If package is generally valid, then the current package does not have access to the SPN.
|
||||
if user.MayUse("") {
|
||||
notifications.NotifyError(
|
||||
"spn:package-not-eligible",
|
||||
"SPN Not Included In Package",
|
||||
"Your current Portmaster Package does not include access to the SPN. Please upgrade your package on the Account Page.",
|
||||
spnOpenAccountPage,
|
||||
).AttachToModule(module)
|
||||
resetSPNStatus(StatusFailed, true)
|
||||
return clientResultReconnect
|
||||
}
|
||||
|
||||
// Otherwise, include the message from the user view.
|
||||
message := "There is an issue with your Portmaster Package. Please check the Account Page."
|
||||
if user.View != nil && user.View.Message != "" {
|
||||
message = user.View.Message
|
||||
}
|
||||
notifications.NotifyError(
|
||||
"spn:subscription-inactive",
|
||||
"Portmaster Package Issue",
|
||||
"Cannot enable SPN: "+message,
|
||||
spnOpenAccountPage,
|
||||
).AttachToModule(module)
|
||||
resetSPNStatus(StatusFailed, true)
|
||||
return clientResultReconnect
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have enough tokens.
|
||||
if access.ShouldRequest(access.ExpandAndConnectZones) {
|
||||
err := access.UpdateTokens()
|
||||
if err != nil {
|
||||
log.Errorf("spn/captain: failed to get tokens: %s", err)
|
||||
|
||||
// There was an error updating the account.
|
||||
// Check if we have enough tokens to continue anyway.
|
||||
regular, _ := access.GetTokenAmount(access.ExpandAndConnectZones)
|
||||
if regular == 0 /* && fallback == 0 */ { // TODO: Add fallback token check when fallback was tested on servers.
|
||||
notifications.NotifyError(
|
||||
"spn:tokens-exhausted",
|
||||
"SPN Access Tokens Exhausted",
|
||||
`The Portmaster failed to get new access tokens to access the SPN. The Portmaster will automatically retry to get new access tokens.`,
|
||||
).AttachToModule(module)
|
||||
resetSPNStatus(StatusFailed, false)
|
||||
}
|
||||
return clientResultRetry
|
||||
}
|
||||
}
|
||||
|
||||
return clientResultOk
|
||||
}
|
||||
|
||||
func clientStopHomeHub(ctx context.Context) clientComponentResult {
|
||||
// Don't use the context in this function, as it will likely be canceled
|
||||
// already and would disrupt any context usage in here.
|
||||
|
||||
// Get crane connecting to home.
|
||||
home, _ := navigator.Main.GetHome()
|
||||
if home == nil {
|
||||
return clientResultOk
|
||||
}
|
||||
crane := docks.GetAssignedCrane(home.Hub.ID)
|
||||
if crane == nil {
|
||||
return clientResultOk
|
||||
}
|
||||
|
||||
// Stop crane and all connected terminals.
|
||||
crane.Stop(nil)
|
||||
return clientResultOk
|
||||
}
|
||||
|
||||
func clientConnectToHomeHub(ctx context.Context) clientComponentResult {
|
||||
err := establishHomeHub(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("spn/captain: failed to establish connection to home hub: %s", err)
|
||||
resetSPNStatus(StatusFailed, true)
|
||||
|
||||
switch {
|
||||
case errors.Is(err, ErrAllHomeHubsExcluded):
|
||||
notifications.NotifyError(
|
||||
"spn:all-home-hubs-excluded",
|
||||
"All Home Nodes Excluded",
|
||||
"Your current Home Node Rules exclude all available and eligible SPN Nodes. Please change your rules to allow for at least one available and eligible Home Node.",
|
||||
notifications.Action{
|
||||
Text: "Configure",
|
||||
Type: notifications.ActionTypeOpenSetting,
|
||||
Payload: ¬ifications.ActionTypeOpenSettingPayload{
|
||||
Key: CfgOptionHomeHubPolicyKey,
|
||||
},
|
||||
},
|
||||
).AttachToModule(module)
|
||||
|
||||
case errors.Is(err, ErrReInitSPNSuggested):
|
||||
notifications.NotifyError(
|
||||
"spn:cannot-bootstrap",
|
||||
"SPN Cannot Bootstrap",
|
||||
"The local state of the SPN network is likely outdated. Portmaster was not able to identify a server to connect to. Please re-initialize the SPN using the tools menu or the button on the notification.",
|
||||
notifications.Action{
|
||||
ID: "re-init",
|
||||
Text: "Re-Init SPN",
|
||||
Type: notifications.ActionTypeWebhook,
|
||||
Payload: ¬ifications.ActionTypeWebhookPayload{
|
||||
URL: apiPathForSPNReInit,
|
||||
ResultAction: "display",
|
||||
},
|
||||
},
|
||||
).AttachToModule(module)
|
||||
|
||||
default:
|
||||
notifications.NotifyWarn(
|
||||
"spn:home-hub-failure",
|
||||
"SPN Failed to Connect",
|
||||
fmt.Sprintf("Failed to connect to a home hub: %s. The Portmaster will retry to connect automatically.", err),
|
||||
).AttachToModule(module)
|
||||
}
|
||||
|
||||
return clientResultReconnect
|
||||
}
|
||||
|
||||
// Log new connection.
|
||||
home, _ := navigator.Main.GetHome()
|
||||
if home != nil {
|
||||
log.Infof("spn/captain: established new home %s", home.Hub)
|
||||
}
|
||||
|
||||
return clientResultOk
|
||||
}
|
||||
|
||||
func clientSetActiveConnectionStatus(ctx context.Context) clientComponentResult {
|
||||
// Get current home.
|
||||
home, homeTerminal := navigator.Main.GetHome()
|
||||
if home == nil || homeTerminal == nil {
|
||||
return clientResultReconnect
|
||||
}
|
||||
|
||||
// Resolve any connection error.
|
||||
module.Resolve("")
|
||||
|
||||
// Update SPN Status with connection information, if not already correctly set.
|
||||
spnStatus.Lock()
|
||||
defer spnStatus.Unlock()
|
||||
|
||||
if spnStatus.Status != StatusConnected || spnStatus.HomeHubID != home.Hub.ID {
|
||||
// Fill connection status data.
|
||||
spnStatus.Status = StatusConnected
|
||||
spnStatus.HomeHubID = home.Hub.ID
|
||||
spnStatus.HomeHubName = home.Hub.Info.Name
|
||||
|
||||
connectedIP, _, err := netutils.IPPortFromAddr(homeTerminal.RemoteAddr())
|
||||
if err != nil {
|
||||
spnStatus.ConnectedIP = homeTerminal.RemoteAddr().String()
|
||||
} else {
|
||||
spnStatus.ConnectedIP = connectedIP.String()
|
||||
}
|
||||
spnStatus.ConnectedTransport = homeTerminal.Transport().String()
|
||||
|
||||
geoLoc := home.GetLocation(connectedIP)
|
||||
if geoLoc != nil {
|
||||
spnStatus.ConnectedCountry = &geoLoc.Country
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
spnStatus.ConnectedSince = &now
|
||||
|
||||
// Push new status.
|
||||
pushSPNStatusUpdate()
|
||||
}
|
||||
|
||||
return clientResultOk
|
||||
}
|
||||
|
||||
func clientCheckHomeHubConnection(ctx context.Context) clientComponentResult {
|
||||
// Check the status of the Home Hub.
|
||||
home, homeTerminal := navigator.Main.GetHome()
|
||||
if home == nil || homeTerminal == nil || homeTerminal.IsBeingAbandoned() {
|
||||
return clientResultReconnect
|
||||
}
|
||||
|
||||
// Get crane controller for health check.
|
||||
crane := docks.GetAssignedCrane(home.Hub.ID)
|
||||
if crane == nil {
|
||||
log.Errorf("spn/captain: could not find home hub crane for health check")
|
||||
return clientResultOk
|
||||
}
|
||||
|
||||
// Ping home hub.
|
||||
latency, tErr := pingHome(ctx, crane.Controller, clientHealthCheckTimeout)
|
||||
if tErr != nil {
|
||||
log.Warningf("spn/captain: failed to ping home hub: %s", tErr)
|
||||
|
||||
// Prepare to reconnect to the network.
|
||||
|
||||
// Reset all failing states, as these might have been caused by the failing home hub.
|
||||
navigator.Main.ResetFailingStates(ctx)
|
||||
|
||||
// If the last health check is clearly too long ago, assume that the device was sleeping and do not set the home node to failing yet.
|
||||
if time.Since(lastHealthCheck) > clientHealthCheckTickDuration+
|
||||
clientHealthCheckTickDurationSleepMode+
|
||||
(clientHealthCheckTimeout*2) {
|
||||
return clientResultReconnect
|
||||
}
|
||||
|
||||
// Mark the home hub itself as failing, as we want to try to connect to somewhere else.
|
||||
home.MarkAsFailingFor(5 * time.Minute)
|
||||
|
||||
return clientResultReconnect
|
||||
}
|
||||
lastHealthCheck = time.Now()
|
||||
|
||||
log.Debugf("spn/captain: pinged home hub in %s", latency)
|
||||
return clientResultOk
|
||||
}
|
||||
|
||||
func pingHome(ctx context.Context, t terminal.Terminal, timeout time.Duration) (latency time.Duration, err *terminal.Error) {
|
||||
started := time.Now()
|
||||
|
||||
// Start ping operation.
|
||||
pingOp, tErr := crew.NewPingOp(t)
|
||||
if tErr != nil {
|
||||
return 0, tErr
|
||||
}
|
||||
|
||||
// Wait for response.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, terminal.ErrCanceled
|
||||
case <-time.After(timeout):
|
||||
return 0, terminal.ErrTimeout
|
||||
case result := <-pingOp.Result:
|
||||
if result.Is(terminal.ErrExplicitAck) {
|
||||
return time.Since(started), nil
|
||||
}
|
||||
if result.IsOK() {
|
||||
return 0, result.Wrap("unexpected response")
|
||||
}
|
||||
return 0, result
|
||||
}
|
||||
}
|
||||
253
spn/captain/config.go
Normal file
253
spn/captain/config.go
Normal file
@@ -0,0 +1,253 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/safing/portbase/config"
|
||||
"github.com/safing/portmaster/service/profile"
|
||||
"github.com/safing/portmaster/service/profile/endpoints"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/navigator"
|
||||
)
|
||||
|
||||
var (
|
||||
// CfgOptionEnableSPNKey is the configuration key for the SPN module.
|
||||
CfgOptionEnableSPNKey = "spn/enable"
|
||||
cfgOptionEnableSPNOrder = 128
|
||||
|
||||
// CfgOptionHomeHubPolicyKey is the configuration key for the SPN home policy.
|
||||
CfgOptionHomeHubPolicyKey = "spn/homePolicy"
|
||||
cfgOptionHomeHubPolicy config.StringArrayOption
|
||||
cfgOptionHomeHubPolicyOrder = 145
|
||||
|
||||
// CfgOptionDNSExitHubPolicyKey is the configuration key for the SPN DNS exit policy.
|
||||
CfgOptionDNSExitHubPolicyKey = "spn/dnsExitPolicy"
|
||||
cfgOptionDNSExitHubPolicy config.StringArrayOption
|
||||
cfgOptionDNSExitHubPolicyOrder = 148
|
||||
|
||||
// CfgOptionUseCommunityNodesKey is the configuration key for whether to use community nodes.
|
||||
CfgOptionUseCommunityNodesKey = "spn/useCommunityNodes"
|
||||
cfgOptionUseCommunityNodes config.BoolOption
|
||||
cfgOptionUseCommunityNodesOrder = 149
|
||||
|
||||
// NonCommunityVerifiedOwners holds a list of verified owners that are not
|
||||
// considered "community".
|
||||
NonCommunityVerifiedOwners = []string{"Safing"}
|
||||
|
||||
// CfgOptionTrustNodeNodesKey is the configuration key for whether additional trusted nodes.
|
||||
CfgOptionTrustNodeNodesKey = "spn/trustNodes"
|
||||
cfgOptionTrustNodeNodes config.StringArrayOption
|
||||
cfgOptionTrustNodeNodesOrder = 150
|
||||
|
||||
// Special Access Code.
|
||||
cfgOptionSpecialAccessCodeKey = "spn/specialAccessCode"
|
||||
cfgOptionSpecialAccessCodeDefault = "none"
|
||||
cfgOptionSpecialAccessCode config.StringOption //nolint:unused // Linter, you drunk?
|
||||
cfgOptionSpecialAccessCodeOrder = 160
|
||||
|
||||
// IPv6 must be global and accessible.
|
||||
cfgOptionBindToAdvertisedKey = "spn/publicHub/bindToAdvertised"
|
||||
cfgOptionBindToAdvertised config.BoolOption
|
||||
cfgOptionBindToAdvertisedDefault = false
|
||||
cfgOptionBindToAdvertisedOrder = 161
|
||||
|
||||
// Config options for use.
|
||||
cfgOptionRoutingAlgorithm config.StringOption
|
||||
)
|
||||
|
||||
func prepConfig() error {
|
||||
// Home Node Rules
|
||||
err := config.Register(&config.Option{
|
||||
Name: "Home Node Rules",
|
||||
Key: CfgOptionHomeHubPolicyKey,
|
||||
Description: `Customize which countries should or should not be used for your Home Node. The Home Node is your entry into the SPN. You connect directly to it and all your connections are routed through it.
|
||||
|
||||
By default, the Portmaster tries to choose the nearest node as your Home Node in order to reduce your exposure to the open Internet.
|
||||
|
||||
Reconnect to the SPN in order to apply new rules.`,
|
||||
Help: profile.SPNRulesHelp,
|
||||
Sensitive: true,
|
||||
OptType: config.OptTypeStringArray,
|
||||
RequiresRestart: true,
|
||||
ExpertiseLevel: config.ExpertiseLevelExpert,
|
||||
DefaultValue: []string{},
|
||||
Annotations: config.Annotations{
|
||||
config.CategoryAnnotation: "Routing",
|
||||
config.DisplayOrderAnnotation: cfgOptionHomeHubPolicyOrder,
|
||||
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
|
||||
config.QuickSettingsAnnotation: profile.SPNRulesQuickSettings,
|
||||
endpoints.EndpointListVerdictNamesAnnotation: profile.SPNRulesVerdictNames,
|
||||
},
|
||||
ValidationRegex: endpoints.ListEntryValidationRegex,
|
||||
ValidationFunc: endpoints.ValidateEndpointListConfigOption,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfgOptionHomeHubPolicy = config.Concurrent.GetAsStringArray(CfgOptionHomeHubPolicyKey, []string{})
|
||||
|
||||
// DNS Exit Node Rules
|
||||
err = config.Register(&config.Option{
|
||||
Name: "DNS Exit Node Rules",
|
||||
Key: CfgOptionDNSExitHubPolicyKey,
|
||||
Description: `Customize which countries should or should not be used as DNS Exit Nodes.
|
||||
|
||||
By default, the Portmaster will exit DNS requests directly at your Home Node in order to keep them fast and close to your location. This is important, as DNS resolution often takes your approximate location into account when deciding which optimized DNS records are returned to you. As the Portmaster encrypts your DNS requests by default, you effectively gain a two-hop security level for your DNS requests in order to protect your privacy.
|
||||
|
||||
This setting mainly exists for when you need to simulate your presence in another location on a lower level too. This might be necessary to defeat more intelligent geo-blocking systems.`,
|
||||
Help: profile.SPNRulesHelp,
|
||||
Sensitive: true,
|
||||
OptType: config.OptTypeStringArray,
|
||||
RequiresRestart: true,
|
||||
ExpertiseLevel: config.ExpertiseLevelExpert,
|
||||
DefaultValue: []string{},
|
||||
Annotations: config.Annotations{
|
||||
config.CategoryAnnotation: "Routing",
|
||||
config.DisplayOrderAnnotation: cfgOptionDNSExitHubPolicyOrder,
|
||||
config.DisplayHintAnnotation: endpoints.DisplayHintEndpointList,
|
||||
config.QuickSettingsAnnotation: profile.SPNRulesQuickSettings,
|
||||
endpoints.EndpointListVerdictNamesAnnotation: profile.SPNRulesVerdictNames,
|
||||
},
|
||||
ValidationRegex: endpoints.ListEntryValidationRegex,
|
||||
ValidationFunc: endpoints.ValidateEndpointListConfigOption,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfgOptionDNSExitHubPolicy = config.Concurrent.GetAsStringArray(CfgOptionDNSExitHubPolicyKey, []string{})
|
||||
|
||||
err = config.Register(&config.Option{
|
||||
Name: "Use Community Nodes",
|
||||
Key: CfgOptionUseCommunityNodesKey,
|
||||
Description: "Use nodes (servers) not operated by Safing themselves. The use of community nodes is recommended as it diversifies the ownership of the nodes you use for your connections and further strengthens your privacy. Plain connections (eg. http, smtp, ...) will never exit via community nodes, making this setting safe to use.",
|
||||
Sensitive: true,
|
||||
OptType: config.OptTypeBool,
|
||||
RequiresRestart: true,
|
||||
DefaultValue: true,
|
||||
Annotations: config.Annotations{
|
||||
config.DisplayOrderAnnotation: cfgOptionUseCommunityNodesOrder,
|
||||
config.CategoryAnnotation: "Routing",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfgOptionUseCommunityNodes = config.Concurrent.GetAsBool(CfgOptionUseCommunityNodesKey, true)
|
||||
|
||||
err = config.Register(&config.Option{
|
||||
Name: "Trust Nodes",
|
||||
Key: CfgOptionTrustNodeNodesKey,
|
||||
Description: "Specify which community nodes to additionally trust. These nodes may then also be used as a Home Node, as well as an Exit Node for unencrypted connections.",
|
||||
Help: "You can specify nodes by their ID or their verified operator.",
|
||||
Sensitive: true,
|
||||
OptType: config.OptTypeStringArray,
|
||||
ExpertiseLevel: config.ExpertiseLevelExpert,
|
||||
DefaultValue: []string{},
|
||||
Annotations: config.Annotations{
|
||||
config.DisplayOrderAnnotation: cfgOptionTrustNodeNodesOrder,
|
||||
config.CategoryAnnotation: "Routing",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfgOptionTrustNodeNodes = config.Concurrent.GetAsStringArray(CfgOptionTrustNodeNodesKey, []string{})
|
||||
|
||||
err = config.Register(&config.Option{
|
||||
Name: "Special Access Code",
|
||||
Key: cfgOptionSpecialAccessCodeKey,
|
||||
Description: "Special Access Codes grant access to the SPN for testing or evaluation purposes.",
|
||||
Sensitive: true,
|
||||
OptType: config.OptTypeString,
|
||||
DefaultValue: cfgOptionSpecialAccessCodeDefault,
|
||||
Annotations: config.Annotations{
|
||||
config.DisplayOrderAnnotation: cfgOptionSpecialAccessCodeOrder,
|
||||
config.CategoryAnnotation: "Advanced",
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfgOptionSpecialAccessCode = config.Concurrent.GetAsString(cfgOptionSpecialAccessCodeKey, "")
|
||||
|
||||
if conf.PublicHub() {
|
||||
err = config.Register(&config.Option{
|
||||
Name: "Connect From Advertised IPs Only",
|
||||
Key: cfgOptionBindToAdvertisedKey,
|
||||
Description: "Only connect from (bind to) the advertised IP addresses.",
|
||||
OptType: config.OptTypeBool,
|
||||
ExpertiseLevel: config.ExpertiseLevelExpert,
|
||||
DefaultValue: cfgOptionBindToAdvertisedDefault,
|
||||
RequiresRestart: true,
|
||||
Annotations: config.Annotations{
|
||||
config.DisplayOrderAnnotation: cfgOptionBindToAdvertisedOrder,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfgOptionBindToAdvertised = config.GetAsBool(cfgOptionBindToAdvertisedKey, cfgOptionBindToAdvertisedDefault)
|
||||
}
|
||||
|
||||
// Config options for use.
|
||||
cfgOptionRoutingAlgorithm = config.Concurrent.GetAsString(profile.CfgOptionRoutingAlgorithmKey, navigator.DefaultRoutingProfileID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
homeHubPolicy endpoints.Endpoints
|
||||
homeHubPolicyLock sync.Mutex
|
||||
homeHubPolicyConfigFlag = config.NewValidityFlag()
|
||||
)
|
||||
|
||||
func getHomeHubPolicy() (endpoints.Endpoints, error) {
|
||||
homeHubPolicyLock.Lock()
|
||||
defer homeHubPolicyLock.Unlock()
|
||||
|
||||
// Return cached value if config is still valid.
|
||||
if homeHubPolicyConfigFlag.IsValid() {
|
||||
return homeHubPolicy, nil
|
||||
}
|
||||
homeHubPolicyConfigFlag.Refresh()
|
||||
|
||||
// Parse new policy.
|
||||
policy, err := endpoints.ParseEndpoints(cfgOptionHomeHubPolicy())
|
||||
if err != nil {
|
||||
homeHubPolicy = nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save and return the new policy.
|
||||
homeHubPolicy = policy
|
||||
return homeHubPolicy, nil
|
||||
}
|
||||
|
||||
var (
|
||||
dnsExitHubPolicy endpoints.Endpoints
|
||||
dnsExitHubPolicyLock sync.Mutex
|
||||
dnsExitHubPolicyConfigFlag = config.NewValidityFlag()
|
||||
)
|
||||
|
||||
// GetDNSExitHubPolicy return the current DNS exit policy.
|
||||
func GetDNSExitHubPolicy() (endpoints.Endpoints, error) {
|
||||
dnsExitHubPolicyLock.Lock()
|
||||
defer dnsExitHubPolicyLock.Unlock()
|
||||
|
||||
// Return cached value if config is still valid.
|
||||
if dnsExitHubPolicyConfigFlag.IsValid() {
|
||||
return dnsExitHubPolicy, nil
|
||||
}
|
||||
dnsExitHubPolicyConfigFlag.Refresh()
|
||||
|
||||
// Parse new policy.
|
||||
policy, err := endpoints.ParseEndpoints(cfgOptionDNSExitHubPolicy())
|
||||
if err != nil {
|
||||
dnsExitHubPolicy = nil
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save and return the new policy.
|
||||
dnsExitHubPolicy = policy
|
||||
return dnsExitHubPolicy, nil
|
||||
}
|
||||
105
spn/captain/establish.go
Normal file
105
spn/captain/establish.go
Normal file
@@ -0,0 +1,105 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/docks"
|
||||
"github.com/safing/portmaster/spn/hub"
|
||||
"github.com/safing/portmaster/spn/ships"
|
||||
"github.com/safing/portmaster/spn/terminal"
|
||||
)
|
||||
|
||||
// EstablishCrane establishes a crane to another Hub.
|
||||
func EstablishCrane(callerCtx context.Context, dst *hub.Hub) (*docks.Crane, error) {
|
||||
if conf.PublicHub() && dst.ID == publicIdentity.ID {
|
||||
return nil, errors.New("connecting to self")
|
||||
}
|
||||
if docks.GetAssignedCrane(dst.ID) != nil {
|
||||
return nil, fmt.Errorf("route to %s already exists", dst.ID)
|
||||
}
|
||||
|
||||
ship, err := ships.Launch(callerCtx, dst, nil, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to launch ship: %w", err)
|
||||
}
|
||||
|
||||
// On pure clients, mark all ships as public in order to show unmasked data in logs.
|
||||
if conf.Client() && !conf.PublicHub() {
|
||||
ship.MarkPublic()
|
||||
}
|
||||
|
||||
crane, err := docks.NewCrane(ship, dst, publicIdentity)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create crane: %w", err)
|
||||
}
|
||||
|
||||
err = crane.Start(callerCtx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to start crane: %w", err)
|
||||
}
|
||||
|
||||
// Start gossip op for live map updates.
|
||||
_, tErr := NewGossipOp(crane.Controller)
|
||||
if tErr != nil {
|
||||
crane.Stop(tErr)
|
||||
return nil, fmt.Errorf("failed to start gossip op: %w", tErr)
|
||||
}
|
||||
|
||||
return crane, nil
|
||||
}
|
||||
|
||||
// EstablishPublicLane establishes a crane to another Hub and publishes it.
|
||||
func EstablishPublicLane(ctx context.Context, dst *hub.Hub) (*docks.Crane, *terminal.Error) {
|
||||
// Create new context with timeout.
|
||||
// The maximum timeout is a worst case safeguard.
|
||||
// Keep in mind that multiple IPs and protocols may be tried in all configurations.
|
||||
// Some servers will be (possibly on purpose) hard to reach.
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// Connect to destination and establish communication.
|
||||
crane, err := EstablishCrane(ctx, dst)
|
||||
if err != nil {
|
||||
return nil, terminal.ErrInternalError.With("failed to establish crane: %w", err)
|
||||
}
|
||||
|
||||
// Publish as Lane.
|
||||
publishOp, tErr := NewPublishOp(crane.Controller, publicIdentity)
|
||||
if tErr != nil {
|
||||
return nil, terminal.ErrInternalError.With("failed to publish: %w", err)
|
||||
}
|
||||
|
||||
// Wait for publishing to complete.
|
||||
select {
|
||||
case tErr := <-publishOp.Result():
|
||||
if !tErr.Is(terminal.ErrExplicitAck) {
|
||||
// Stop crane again, because we failed to publish it.
|
||||
defer crane.Stop(nil)
|
||||
return nil, terminal.ErrInternalError.With("failed to publish lane: %w", tErr)
|
||||
}
|
||||
|
||||
case <-crane.Controller.Ctx().Done():
|
||||
defer crane.Stop(nil)
|
||||
return nil, terminal.ErrStopping
|
||||
|
||||
case <-ctx.Done():
|
||||
defer crane.Stop(nil)
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
return nil, terminal.ErrTimeout
|
||||
}
|
||||
return nil, terminal.ErrCanceled
|
||||
}
|
||||
|
||||
// Query all gossip msgs.
|
||||
_, tErr = NewGossipQueryOp(crane.Controller)
|
||||
if tErr != nil {
|
||||
log.Warningf("spn/captain: failed to start initial gossip query: %s", tErr)
|
||||
}
|
||||
|
||||
return crane, nil
|
||||
}
|
||||
28
spn/captain/exceptions.go
Normal file
28
spn/captain/exceptions.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
exceptionLock sync.Mutex
|
||||
exceptIPv4 net.IP
|
||||
exceptIPv6 net.IP
|
||||
)
|
||||
|
||||
func setExceptions(ipv4, ipv6 net.IP) {
|
||||
exceptionLock.Lock()
|
||||
defer exceptionLock.Unlock()
|
||||
|
||||
exceptIPv4 = ipv4
|
||||
exceptIPv6 = ipv6
|
||||
}
|
||||
|
||||
// IsExcepted checks if the given IP is currently excepted from the SPN.
|
||||
func IsExcepted(ip net.IP) bool {
|
||||
exceptionLock.Lock()
|
||||
defer exceptionLock.Unlock()
|
||||
|
||||
return ip.Equal(exceptIPv4) || ip.Equal(exceptIPv6)
|
||||
}
|
||||
38
spn/captain/gossip.go
Normal file
38
spn/captain/gossip.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
gossipOps = make(map[string]*GossipOp)
|
||||
gossipOpsLock sync.RWMutex
|
||||
)
|
||||
|
||||
func registerGossipOp(craneID string, op *GossipOp) {
|
||||
gossipOpsLock.Lock()
|
||||
defer gossipOpsLock.Unlock()
|
||||
|
||||
gossipOps[craneID] = op
|
||||
}
|
||||
|
||||
func deleteGossipOp(craneID string) {
|
||||
gossipOpsLock.Lock()
|
||||
defer gossipOpsLock.Unlock()
|
||||
|
||||
delete(gossipOps, craneID)
|
||||
}
|
||||
|
||||
func gossipRelayMsg(receivedFrom string, msgType GossipMsgType, data []byte) {
|
||||
gossipOpsLock.RLock()
|
||||
defer gossipOpsLock.RUnlock()
|
||||
|
||||
for craneID, gossipOp := range gossipOps {
|
||||
// Don't return same msg back to sender.
|
||||
if craneID == receivedFrom {
|
||||
continue
|
||||
}
|
||||
|
||||
gossipOp.sendMsg(msgType, data)
|
||||
}
|
||||
}
|
||||
47
spn/captain/hooks.go
Normal file
47
spn/captain/hooks.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/safing/portmaster/service/updates"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/docks"
|
||||
)
|
||||
|
||||
func startDockHooks() {
|
||||
docks.RegisterCraneUpdateHook(handleCraneUpdate)
|
||||
}
|
||||
|
||||
func stopDockHooks() {
|
||||
docks.ResetCraneUpdateHook()
|
||||
}
|
||||
|
||||
func handleCraneUpdate(crane *docks.Crane) {
|
||||
if crane == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if conf.Client() && crane.Controller != nil && crane.Controller.Abandoning.IsSet() {
|
||||
// Check connection to home hub.
|
||||
triggerClientHealthCheck()
|
||||
}
|
||||
|
||||
if conf.PublicHub() && crane.Public() {
|
||||
// Update Hub status.
|
||||
updateConnectionStatus()
|
||||
}
|
||||
}
|
||||
|
||||
func updateConnectionStatus() {
|
||||
// Delay updating status for a better chance to combine multiple changes.
|
||||
statusUpdateTask.Schedule(time.Now().Add(maintainStatusUpdateDelay))
|
||||
|
||||
// Check if we lost all connections and trigger a pending restart if we did.
|
||||
for _, crane := range docks.GetAllAssignedCranes() {
|
||||
if crane.Public() && !crane.Stopped() {
|
||||
// There is at least one public and active crane, so don't restart now.
|
||||
return
|
||||
}
|
||||
}
|
||||
updates.TriggerRestartIfPending()
|
||||
}
|
||||
108
spn/captain/intel.go
Normal file
108
spn/captain/intel.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/safing/portbase/config"
|
||||
"github.com/safing/portbase/updater"
|
||||
"github.com/safing/portmaster/service/updates"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/hub"
|
||||
"github.com/safing/portmaster/spn/navigator"
|
||||
"github.com/safing/portmaster/spn/ships"
|
||||
)
|
||||
|
||||
var (
|
||||
intelResource *updater.File
|
||||
intelResourcePath = "intel/spn/main-intel.yaml"
|
||||
intelResourceMapName = "main"
|
||||
intelResourceUpdateLock sync.Mutex
|
||||
)
|
||||
|
||||
func registerIntelUpdateHook() error {
|
||||
if err := module.RegisterEventHook(
|
||||
updates.ModuleName,
|
||||
updates.ResourceUpdateEvent,
|
||||
"update SPN intel",
|
||||
updateSPNIntel,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := module.RegisterEventHook(
|
||||
"config",
|
||||
config.ChangeEvent,
|
||||
"update SPN intel",
|
||||
updateSPNIntel,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func updateSPNIntel(ctx context.Context, _ interface{}) (err error) {
|
||||
intelResourceUpdateLock.Lock()
|
||||
defer intelResourceUpdateLock.Unlock()
|
||||
|
||||
// Only update SPN intel when using the matching map.
|
||||
if conf.MainMapName != intelResourceMapName {
|
||||
return fmt.Errorf("intel resource not for map %q", conf.MainMapName)
|
||||
}
|
||||
|
||||
// Check if there is something to do.
|
||||
if intelResource != nil && !intelResource.UpgradeAvailable() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get intel file and load it from disk.
|
||||
intelResource, err = updates.GetFile(intelResourcePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get SPN intel update: %w", err)
|
||||
}
|
||||
intelData, err := os.ReadFile(intelResource.Path())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load SPN intel update: %w", err)
|
||||
}
|
||||
|
||||
// Parse and apply intel data.
|
||||
intel, err := hub.ParseIntel(intelData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse SPN intel update: %w", err)
|
||||
}
|
||||
|
||||
setVirtualNetworkConfig(intel.VirtualNetworks)
|
||||
return navigator.Main.UpdateIntel(intel, cfgOptionTrustNodeNodes())
|
||||
}
|
||||
|
||||
func resetSPNIntel() {
|
||||
intelResourceUpdateLock.Lock()
|
||||
defer intelResourceUpdateLock.Unlock()
|
||||
|
||||
intelResource = nil
|
||||
}
|
||||
|
||||
func setVirtualNetworkConfig(configs []*hub.VirtualNetworkConfig) {
|
||||
// Do nothing if not public Hub.
|
||||
if !conf.PublicHub() {
|
||||
return
|
||||
}
|
||||
// Reset if there are no virtual networks configured.
|
||||
if len(configs) == 0 {
|
||||
ships.SetVirtualNetworkConfig(nil)
|
||||
}
|
||||
|
||||
// Check if we are in a virtual network.
|
||||
for _, config := range configs {
|
||||
if _, ok := config.Mapping[publicIdentity.Hub.ID]; ok {
|
||||
ships.SetVirtualNetworkConfig(config)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If not, reset - we might have been in one before.
|
||||
ships.SetVirtualNetworkConfig(nil)
|
||||
}
|
||||
219
spn/captain/module.go
Normal file
219
spn/captain/module.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/safing/portbase/api"
|
||||
"github.com/safing/portbase/config"
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portbase/modules"
|
||||
"github.com/safing/portbase/modules/subsystems"
|
||||
"github.com/safing/portbase/rng"
|
||||
"github.com/safing/portmaster/service/network/netutils"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/crew"
|
||||
"github.com/safing/portmaster/spn/navigator"
|
||||
"github.com/safing/portmaster/spn/patrol"
|
||||
"github.com/safing/portmaster/spn/ships"
|
||||
_ "github.com/safing/portmaster/spn/sluice"
|
||||
)
|
||||
|
||||
const controlledFailureExitCode = 24
|
||||
|
||||
var module *modules.Module
|
||||
|
||||
// SPNConnectedEvent is the name of the event that is fired when the SPN has connected and is ready.
|
||||
const SPNConnectedEvent = "spn connect"
|
||||
|
||||
func init() {
|
||||
module = modules.Register("captain", prep, start, stop, "base", "terminal", "cabin", "ships", "docks", "crew", "navigator", "sluice", "patrol", "netenv")
|
||||
module.RegisterEvent(SPNConnectedEvent, false)
|
||||
subsystems.Register(
|
||||
"spn",
|
||||
"SPN",
|
||||
"Safing Privacy Network",
|
||||
module,
|
||||
"config:spn/",
|
||||
&config.Option{
|
||||
Name: "SPN Module",
|
||||
Key: CfgOptionEnableSPNKey,
|
||||
Description: "Start the Safing Privacy Network module. If turned off, the SPN is fully disabled on this device.",
|
||||
OptType: config.OptTypeBool,
|
||||
DefaultValue: false,
|
||||
Annotations: config.Annotations{
|
||||
config.DisplayOrderAnnotation: cfgOptionEnableSPNOrder,
|
||||
config.CategoryAnnotation: "General",
|
||||
},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func prep() error {
|
||||
// Check if we can parse the bootstrap hub flag.
|
||||
if err := prepBootstrapHubFlag(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Register SPN status provider.
|
||||
if err := registerSPNStatusProvider(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Register API endpoints.
|
||||
if err := registerAPIEndpoints(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if conf.PublicHub() {
|
||||
// Register API authenticator.
|
||||
if err := api.SetAuthenticator(apiAuthenticator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := module.RegisterEventHook(
|
||||
"patrol",
|
||||
patrol.ChangeSignalEventName,
|
||||
"trigger hub status maintenance",
|
||||
func(_ context.Context, _ any) error {
|
||||
TriggerHubStatusMaintenance()
|
||||
return nil
|
||||
},
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return prepConfig()
|
||||
}
|
||||
|
||||
func start() error {
|
||||
maskingBytes, err := rng.Bytes(16)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get random bytes for masking: %w", err)
|
||||
}
|
||||
ships.EnableMasking(maskingBytes)
|
||||
|
||||
// Initialize intel.
|
||||
if err := registerIntelUpdateHook(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := updateSPNIntel(module.Ctx, nil); err != nil {
|
||||
log.Errorf("spn/captain: failed to update SPN intel: %s", err)
|
||||
}
|
||||
|
||||
// Initialize identity and piers.
|
||||
if conf.PublicHub() {
|
||||
// Load identity.
|
||||
if err := loadPublicIdentity(); err != nil {
|
||||
// We cannot recover from this, set controlled failure (do not retry).
|
||||
modules.SetExitStatusCode(controlledFailureExitCode)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if any networks are configured.
|
||||
if !conf.HubHasIPv4() && !conf.HubHasIPv6() {
|
||||
// We cannot recover from this, set controlled failure (do not retry).
|
||||
modules.SetExitStatusCode(controlledFailureExitCode)
|
||||
|
||||
return errors.New("no IP addresses for Hub configured (or detected)")
|
||||
}
|
||||
|
||||
// Start management of identity and piers.
|
||||
if err := prepPublicIdentityMgmt(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Set ID to display on http info page.
|
||||
ships.DisplayHubID = publicIdentity.ID
|
||||
// Start listeners.
|
||||
if err := startPiers(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Enable connect operation.
|
||||
crew.EnableConnecting(publicIdentity.Hub)
|
||||
}
|
||||
|
||||
// Subscribe to updates of cranes.
|
||||
startDockHooks()
|
||||
|
||||
// bootstrapping
|
||||
if err := processBootstrapHubFlag(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := processBootstrapFileFlag(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// network optimizer
|
||||
if conf.PublicHub() {
|
||||
module.NewTask("optimize network", optimizeNetwork).
|
||||
Repeat(1 * time.Minute).
|
||||
Schedule(time.Now().Add(15 * time.Second))
|
||||
}
|
||||
|
||||
// client + home hub manager
|
||||
if conf.Client() {
|
||||
module.StartServiceWorker("client manager", 0, clientManager)
|
||||
|
||||
// Reset failing hubs when the network changes while not connected.
|
||||
if err := module.RegisterEventHook(
|
||||
"netenv",
|
||||
"network changed",
|
||||
"reset failing hubs",
|
||||
func(_ context.Context, _ interface{}) error {
|
||||
if ready.IsNotSet() {
|
||||
navigator.Main.ResetFailingStates(module.Ctx)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stop() error {
|
||||
// Reset intel resource so that it is loaded again when starting.
|
||||
resetSPNIntel()
|
||||
|
||||
// Unregister crane update hook.
|
||||
stopDockHooks()
|
||||
|
||||
// Send shutdown status message.
|
||||
if conf.PublicHub() {
|
||||
publishShutdownStatus()
|
||||
stopPiers()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// apiAuthenticator grants User permissions for local API requests.
|
||||
func apiAuthenticator(r *http.Request, s *http.Server) (*api.AuthToken, error) {
|
||||
// Get remote IP.
|
||||
host, _, err := net.SplitHostPort(r.RemoteAddr)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to split host/port: %w", err)
|
||||
}
|
||||
remoteIP := net.ParseIP(host)
|
||||
if remoteIP == nil {
|
||||
return nil, fmt.Errorf("failed to parse remote address %s", host)
|
||||
}
|
||||
|
||||
if !netutils.GetIPScope(remoteIP).IsLocalhost() {
|
||||
return nil, api.ErrAPIAccessDeniedMessage
|
||||
}
|
||||
|
||||
return &api.AuthToken{
|
||||
Read: api.PermitUser,
|
||||
Write: api.PermitUser,
|
||||
}, nil
|
||||
}
|
||||
306
spn/captain/navigation.go
Normal file
306
spn/captain/navigation.go
Normal file
@@ -0,0 +1,306 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portbase/modules"
|
||||
"github.com/safing/portmaster/service/intel"
|
||||
"github.com/safing/portmaster/service/netenv"
|
||||
"github.com/safing/portmaster/service/profile/endpoints"
|
||||
"github.com/safing/portmaster/spn/access"
|
||||
"github.com/safing/portmaster/spn/docks"
|
||||
"github.com/safing/portmaster/spn/hub"
|
||||
"github.com/safing/portmaster/spn/navigator"
|
||||
"github.com/safing/portmaster/spn/terminal"
|
||||
)
|
||||
|
||||
const stopCraneAfterBeingUnsuggestedFor = 6 * time.Hour
|
||||
|
||||
var (
|
||||
// ErrAllHomeHubsExcluded is returned when all available home hubs were excluded.
|
||||
ErrAllHomeHubsExcluded = errors.New("all home hubs are excluded")
|
||||
|
||||
// ErrReInitSPNSuggested is returned when no home hub can be found, even without rules.
|
||||
ErrReInitSPNSuggested = errors.New("SPN re-init suggested")
|
||||
)
|
||||
|
||||
func establishHomeHub(ctx context.Context) error {
|
||||
// Get own IP.
|
||||
locations, ok := netenv.GetInternetLocation()
|
||||
if !ok || len(locations.All) == 0 {
|
||||
return errors.New("failed to locate own device")
|
||||
}
|
||||
log.Debugf(
|
||||
"spn/captain: looking for new home hub near %s and %s",
|
||||
locations.BestV4(),
|
||||
locations.BestV6(),
|
||||
)
|
||||
|
||||
// Get own entity.
|
||||
// Checking the entity against the entry policies is somewhat hit and miss
|
||||
// anyway, as the device location is an approximation.
|
||||
var myEntity *intel.Entity
|
||||
if dl := locations.BestV4(); dl != nil && dl.IP != nil {
|
||||
myEntity = (&intel.Entity{IP: dl.IP}).Init(0)
|
||||
myEntity.FetchData(ctx)
|
||||
} else if dl := locations.BestV6(); dl != nil && dl.IP != nil {
|
||||
myEntity = (&intel.Entity{IP: dl.IP}).Init(0)
|
||||
myEntity.FetchData(ctx)
|
||||
}
|
||||
|
||||
// Get home hub policy for selecting the home hub.
|
||||
homePolicy, err := getHomeHubPolicy()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Build navigation options for searching for a home hub.
|
||||
opts := &navigator.Options{
|
||||
Home: &navigator.HomeHubOptions{
|
||||
HubPolicies: []endpoints.Endpoints{homePolicy},
|
||||
CheckHubPolicyWith: myEntity,
|
||||
},
|
||||
}
|
||||
|
||||
// Add requirement to only use Safing nodes when not using community nodes.
|
||||
if !cfgOptionUseCommunityNodes() {
|
||||
opts.Home.RequireVerifiedOwners = NonCommunityVerifiedOwners
|
||||
}
|
||||
|
||||
// Require a trusted home node when the routing profile requires less than two hops.
|
||||
routingProfile := navigator.GetRoutingProfile(cfgOptionRoutingAlgorithm())
|
||||
if routingProfile.MinHops < 2 {
|
||||
opts.Home.Regard = opts.Home.Regard.Add(navigator.StateTrusted)
|
||||
}
|
||||
|
||||
// Find nearby hubs.
|
||||
findCandidates:
|
||||
candidates, err := navigator.Main.FindNearestHubs(
|
||||
locations.BestV4().LocationOrNil(),
|
||||
locations.BestV6().LocationOrNil(),
|
||||
opts, navigator.HomeHub,
|
||||
)
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, navigator.ErrEmptyMap):
|
||||
// bootstrap to the network!
|
||||
err := bootstrapWithUpdates()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
goto findCandidates
|
||||
|
||||
case errors.Is(err, navigator.ErrAllPinsDisregarded):
|
||||
if len(homePolicy) > 0 {
|
||||
return ErrAllHomeHubsExcluded
|
||||
}
|
||||
return ErrReInitSPNSuggested
|
||||
|
||||
default:
|
||||
return fmt.Errorf("failed to find nearby hubs: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Try connecting to a hub.
|
||||
var tries int
|
||||
var candidate *hub.Hub
|
||||
for tries, candidate = range candidates {
|
||||
err = connectToHomeHub(ctx, candidate)
|
||||
if err != nil {
|
||||
// Check if context is canceled.
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Check if the SPN protocol is stopping again.
|
||||
if errors.Is(err, terminal.ErrStopping) {
|
||||
return err
|
||||
}
|
||||
log.Warningf("spn/captain: failed to connect to %s as new home: %s", candidate, err)
|
||||
} else {
|
||||
log.Infof("spn/captain: established connection to %s as new home with %d failed tries", candidate, tries)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to a new home hub - tried %d hubs: %w", tries+1, err)
|
||||
}
|
||||
return fmt.Errorf("no home hub candidates available")
|
||||
}
|
||||
|
||||
func connectToHomeHub(ctx context.Context, dst *hub.Hub) error {
|
||||
// Create new context with timeout.
|
||||
// The maximum timeout is a worst case safeguard.
|
||||
// Keep in mind that multiple IPs and protocols may be tried in all configurations.
|
||||
// Some servers will be (possibly on purpose) hard to reach.
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// Set and clean up exceptions.
|
||||
setExceptions(dst.Info.IPv4, dst.Info.IPv6)
|
||||
defer setExceptions(nil, nil)
|
||||
|
||||
// Connect to hub.
|
||||
crane, err := EstablishCrane(ctx, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Cleanup connection in case of failure.
|
||||
var success bool
|
||||
defer func() {
|
||||
if !success {
|
||||
crane.Stop(nil)
|
||||
}
|
||||
}()
|
||||
|
||||
// Query all gossip msgs on first connection.
|
||||
gossipQuery, tErr := NewGossipQueryOp(crane.Controller)
|
||||
if tErr != nil {
|
||||
log.Warningf("spn/captain: failed to start initial gossip query: %s", tErr)
|
||||
}
|
||||
// Wait for gossip query to complete.
|
||||
select {
|
||||
case <-gossipQuery.ctx.Done():
|
||||
case <-ctx.Done():
|
||||
return context.Canceled
|
||||
}
|
||||
|
||||
// Create communication terminal.
|
||||
homeTerminal, initData, tErr := docks.NewLocalCraneTerminal(crane, nil, terminal.DefaultHomeHubTerminalOpts())
|
||||
if tErr != nil {
|
||||
return tErr.Wrap("failed to create home terminal")
|
||||
}
|
||||
tErr = crane.EstablishNewTerminal(homeTerminal, initData)
|
||||
if tErr != nil {
|
||||
return tErr.Wrap("failed to connect home terminal")
|
||||
}
|
||||
|
||||
if !DisableAccount {
|
||||
// Authenticate to home hub.
|
||||
authOp, tErr := access.AuthorizeToTerminal(homeTerminal)
|
||||
if tErr != nil {
|
||||
return tErr.Wrap("failed to authorize")
|
||||
}
|
||||
select {
|
||||
case tErr := <-authOp.Result:
|
||||
if !tErr.Is(terminal.ErrExplicitAck) {
|
||||
return tErr.Wrap("failed to authenticate to")
|
||||
}
|
||||
case <-time.After(3 * time.Second):
|
||||
return terminal.ErrTimeout.With("waiting for auth to complete")
|
||||
case <-ctx.Done():
|
||||
return terminal.ErrStopping
|
||||
}
|
||||
}
|
||||
|
||||
// Set new home on map.
|
||||
ok := navigator.Main.SetHome(dst.ID, homeTerminal)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to set home hub on map")
|
||||
}
|
||||
|
||||
// Assign crane to home hub in order to query it later.
|
||||
docks.AssignCrane(crane.ConnectedHub.ID, crane)
|
||||
|
||||
success = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func optimizeNetwork(ctx context.Context, task *modules.Task) error {
|
||||
if publicIdentity == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
optimize:
|
||||
result, err := navigator.Main.Optimize(nil)
|
||||
if err != nil {
|
||||
if errors.Is(err, navigator.ErrEmptyMap) {
|
||||
// bootstrap to the network!
|
||||
err := bootstrapWithUpdates()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
goto optimize
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Create any new connections.
|
||||
var createdConnections int
|
||||
var attemptedConnections int
|
||||
for _, connectTo := range result.SuggestedConnections {
|
||||
// Skip duplicates.
|
||||
if connectTo.Duplicate {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if connection already exists.
|
||||
crane := docks.GetAssignedCrane(connectTo.Hub.ID)
|
||||
if crane != nil {
|
||||
// Update last suggested timestamp.
|
||||
crane.NetState.UpdateLastSuggestedAt()
|
||||
// Continue crane if stopping.
|
||||
if crane.AbortStopping() {
|
||||
log.Infof("spn/captain: optimization aborted retiring of %s, removed stopping mark", crane)
|
||||
crane.NotifyUpdate()
|
||||
}
|
||||
|
||||
// Create new connections if we have connects left.
|
||||
} else if createdConnections < result.MaxConnect {
|
||||
attemptedConnections++
|
||||
|
||||
crane, tErr := EstablishPublicLane(ctx, connectTo.Hub)
|
||||
if !tErr.IsOK() {
|
||||
log.Warningf("spn/captain: failed to establish lane to %s: %s", connectTo.Hub, tErr)
|
||||
} else {
|
||||
createdConnections++
|
||||
crane.NetState.UpdateLastSuggestedAt()
|
||||
|
||||
log.Infof("spn/captain: established lane to %s", connectTo.Hub)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Log optimization result.
|
||||
if attemptedConnections > 0 {
|
||||
log.Infof(
|
||||
"spn/captain: created %d/%d new connections for %s optimization",
|
||||
createdConnections,
|
||||
attemptedConnections,
|
||||
result.Purpose)
|
||||
} else {
|
||||
log.Infof(
|
||||
"spn/captain: checked %d connections for %s optimization",
|
||||
len(result.SuggestedConnections),
|
||||
result.Purpose,
|
||||
)
|
||||
}
|
||||
|
||||
// Retire cranes if unsuggested for a while.
|
||||
if result.StopOthers {
|
||||
for _, crane := range docks.GetAllAssignedCranes() {
|
||||
switch {
|
||||
case crane.Stopped():
|
||||
// Crane already stopped.
|
||||
case crane.IsStopping():
|
||||
// Crane is stopping, forcibly stop if mine and suggested.
|
||||
if crane.IsMine() && crane.NetState.StopSuggested() {
|
||||
crane.Stop(nil)
|
||||
}
|
||||
case crane.IsMine() && crane.NetState.StoppingSuggested():
|
||||
// Mark as stopping if mine and suggested.
|
||||
crane.MarkStopping()
|
||||
case crane.NetState.RequestStoppingSuggested(stopCraneAfterBeingUnsuggestedFor):
|
||||
// Mark as stopping requested.
|
||||
crane.MarkStoppingRequested()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
156
spn/captain/op_gossip.go
Normal file
156
spn/captain/op_gossip.go
Normal file
@@ -0,0 +1,156 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/safing/portbase/container"
|
||||
"github.com/safing/portbase/formats/varint"
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/docks"
|
||||
"github.com/safing/portmaster/spn/hub"
|
||||
"github.com/safing/portmaster/spn/terminal"
|
||||
)
|
||||
|
||||
// GossipOpType is the type ID of the gossip operation.
|
||||
const GossipOpType string = "gossip"
|
||||
|
||||
// GossipMsgType is the gossip message type.
|
||||
type GossipMsgType uint8
|
||||
|
||||
// Gossip Message Types.
|
||||
const (
|
||||
GossipHubAnnouncementMsg GossipMsgType = 1
|
||||
GossipHubStatusMsg GossipMsgType = 2
|
||||
)
|
||||
|
||||
func (msgType GossipMsgType) String() string {
|
||||
switch msgType {
|
||||
case GossipHubAnnouncementMsg:
|
||||
return "hub announcement"
|
||||
case GossipHubStatusMsg:
|
||||
return "hub status"
|
||||
default:
|
||||
return "unknown gossip msg"
|
||||
}
|
||||
}
|
||||
|
||||
// GossipOp is used to gossip Hub messages.
|
||||
type GossipOp struct {
|
||||
terminal.OperationBase
|
||||
|
||||
craneID string
|
||||
}
|
||||
|
||||
// Type returns the type ID.
|
||||
func (op *GossipOp) Type() string {
|
||||
return GossipOpType
|
||||
}
|
||||
|
||||
func init() {
|
||||
terminal.RegisterOpType(terminal.OperationFactory{
|
||||
Type: GossipOpType,
|
||||
Requires: terminal.IsCraneController,
|
||||
Start: runGossipOp,
|
||||
})
|
||||
}
|
||||
|
||||
// NewGossipOp start a new gossip operation.
|
||||
func NewGossipOp(controller *docks.CraneControllerTerminal) (*GossipOp, *terminal.Error) {
|
||||
// Create and init.
|
||||
op := &GossipOp{
|
||||
craneID: controller.Crane.ID,
|
||||
}
|
||||
err := controller.StartOperation(op, nil, 1*time.Minute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
op.InitOperationBase(controller, op.ID())
|
||||
|
||||
// Register and return.
|
||||
registerGossipOp(controller.Crane.ID, op)
|
||||
return op, nil
|
||||
}
|
||||
|
||||
func runGossipOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
|
||||
// Check if we are run by a controller.
|
||||
controller, ok := t.(*docks.CraneControllerTerminal)
|
||||
if !ok {
|
||||
return nil, terminal.ErrIncorrectUsage.With("gossip op may only be started by a crane controller terminal, but was started by %T", t)
|
||||
}
|
||||
|
||||
// Create, init, register and return.
|
||||
op := &GossipOp{
|
||||
craneID: controller.Crane.ID,
|
||||
}
|
||||
op.InitOperationBase(t, opID)
|
||||
registerGossipOp(controller.Crane.ID, op)
|
||||
return op, nil
|
||||
}
|
||||
|
||||
func (op *GossipOp) sendMsg(msgType GossipMsgType, data []byte) {
|
||||
// Create message.
|
||||
msg := op.NewEmptyMsg()
|
||||
msg.Data = container.New(
|
||||
varint.Pack8(uint8(msgType)),
|
||||
data,
|
||||
)
|
||||
msg.Unit.MakeHighPriority()
|
||||
|
||||
// Send.
|
||||
err := op.Send(msg, 1*time.Second)
|
||||
if err != nil {
|
||||
log.Debugf("spn/captain: failed to forward %s via %s: %s", msgType, op.craneID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Deliver delivers a message to the operation.
|
||||
func (op *GossipOp) Deliver(msg *terminal.Msg) *terminal.Error {
|
||||
defer msg.Finish()
|
||||
|
||||
gossipMsgTypeN, err := msg.Data.GetNextN8()
|
||||
if err != nil {
|
||||
return terminal.ErrMalformedData.With("failed to parse gossip message type")
|
||||
}
|
||||
gossipMsgType := GossipMsgType(gossipMsgTypeN)
|
||||
|
||||
// Prepare data.
|
||||
data := msg.Data.CompileData()
|
||||
var announcementData, statusData []byte
|
||||
switch gossipMsgType {
|
||||
case GossipHubAnnouncementMsg:
|
||||
announcementData = data
|
||||
case GossipHubStatusMsg:
|
||||
statusData = data
|
||||
default:
|
||||
log.Warningf("spn/captain: received unknown gossip message type from %s: %d", op.craneID, gossipMsgType)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Import and verify.
|
||||
h, forward, tErr := docks.ImportAndVerifyHubInfo(module.Ctx, "", announcementData, statusData, conf.MainMapName, conf.MainMapScope)
|
||||
if tErr != nil {
|
||||
if tErr.Is(hub.ErrOldData) {
|
||||
log.Debugf("spn/captain: ignoring old %s from %s", gossipMsgType, op.craneID)
|
||||
} else {
|
||||
log.Warningf("spn/captain: failed to import %s from %s: %s", gossipMsgType, op.craneID, tErr)
|
||||
}
|
||||
} else if forward {
|
||||
// Only log if we received something to save/forward.
|
||||
log.Infof("spn/captain: received %s for %s", gossipMsgType, h)
|
||||
}
|
||||
|
||||
// Relay data.
|
||||
if forward {
|
||||
gossipRelayMsg(op.craneID, gossipMsgType, data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleStop gives the operation the ability to cleanly shut down.
|
||||
// The returned error is the error to send to the other side.
|
||||
// Should never be called directly. Call Stop() instead.
|
||||
func (op *GossipOp) HandleStop(err *terminal.Error) (errorToSend *terminal.Error) {
|
||||
deleteGossipOp(op.craneID)
|
||||
return err
|
||||
}
|
||||
195
spn/captain/op_gossip_query.go
Normal file
195
spn/captain/op_gossip_query.go
Normal file
@@ -0,0 +1,195 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/safing/portbase/container"
|
||||
"github.com/safing/portbase/formats/varint"
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/docks"
|
||||
"github.com/safing/portmaster/spn/hub"
|
||||
"github.com/safing/portmaster/spn/terminal"
|
||||
)
|
||||
|
||||
// GossipQueryOpType is the type ID of the gossip query operation.
|
||||
const GossipQueryOpType string = "gossip/query"
|
||||
|
||||
// GossipQueryOp is used to query gossip messages.
|
||||
type GossipQueryOp struct {
|
||||
terminal.OperationBase
|
||||
|
||||
t terminal.Terminal
|
||||
client bool
|
||||
importCnt int
|
||||
|
||||
ctx context.Context
|
||||
cancelCtx context.CancelFunc
|
||||
}
|
||||
|
||||
// Type returns the type ID.
|
||||
func (op *GossipQueryOp) Type() string {
|
||||
return GossipQueryOpType
|
||||
}
|
||||
|
||||
func init() {
|
||||
terminal.RegisterOpType(terminal.OperationFactory{
|
||||
Type: GossipQueryOpType,
|
||||
Requires: terminal.IsCraneController,
|
||||
Start: runGossipQueryOp,
|
||||
})
|
||||
}
|
||||
|
||||
// NewGossipQueryOp starts a new gossip query operation.
|
||||
func NewGossipQueryOp(t terminal.Terminal) (*GossipQueryOp, *terminal.Error) {
|
||||
// Create and init.
|
||||
op := &GossipQueryOp{
|
||||
t: t,
|
||||
client: true,
|
||||
}
|
||||
op.ctx, op.cancelCtx = context.WithCancel(t.Ctx())
|
||||
err := t.StartOperation(op, nil, 1*time.Minute)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return op, nil
|
||||
}
|
||||
|
||||
func runGossipQueryOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
|
||||
// Create, init, register and return.
|
||||
op := &GossipQueryOp{t: t}
|
||||
op.ctx, op.cancelCtx = context.WithCancel(t.Ctx())
|
||||
op.InitOperationBase(t, opID)
|
||||
|
||||
module.StartWorker("gossip query handler", op.handler)
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
func (op *GossipQueryOp) handler(_ context.Context) error {
|
||||
tErr := op.sendMsgs(hub.MsgTypeAnnouncement)
|
||||
if tErr != nil {
|
||||
op.Stop(op, tErr)
|
||||
return nil // Clean worker exit.
|
||||
}
|
||||
|
||||
tErr = op.sendMsgs(hub.MsgTypeStatus)
|
||||
if tErr != nil {
|
||||
op.Stop(op, tErr)
|
||||
return nil // Clean worker exit.
|
||||
}
|
||||
|
||||
op.Stop(op, nil)
|
||||
return nil // Clean worker exit.
|
||||
}
|
||||
|
||||
func (op *GossipQueryOp) sendMsgs(msgType hub.MsgType) *terminal.Error {
|
||||
it, err := hub.QueryRawGossipMsgs(conf.MainMapName, msgType)
|
||||
if err != nil {
|
||||
return terminal.ErrInternalError.With("failed to query: %w", err)
|
||||
}
|
||||
defer it.Cancel()
|
||||
|
||||
iterating:
|
||||
for {
|
||||
select {
|
||||
case r := <-it.Next:
|
||||
// Check if we are done.
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure we're handling a hub msg.
|
||||
hubMsg, err := hub.EnsureHubMsg(r)
|
||||
if err != nil {
|
||||
log.Warningf("spn/captain: failed to load hub msg: %s", err)
|
||||
continue iterating
|
||||
}
|
||||
|
||||
// Create gossip msg.
|
||||
var c *container.Container
|
||||
switch hubMsg.Type {
|
||||
case hub.MsgTypeAnnouncement:
|
||||
c = container.New(
|
||||
varint.Pack8(uint8(GossipHubAnnouncementMsg)),
|
||||
hubMsg.Data,
|
||||
)
|
||||
case hub.MsgTypeStatus:
|
||||
c = container.New(
|
||||
varint.Pack8(uint8(GossipHubStatusMsg)),
|
||||
hubMsg.Data,
|
||||
)
|
||||
default:
|
||||
log.Warningf("spn/captain: unknown hub msg for gossip query at %q: %s", hubMsg.Key(), hubMsg.Type)
|
||||
}
|
||||
|
||||
// Send msg.
|
||||
if c != nil {
|
||||
msg := op.NewEmptyMsg()
|
||||
msg.Unit.MakeHighPriority()
|
||||
msg.Data = c
|
||||
tErr := op.Send(msg, 1*time.Second)
|
||||
if tErr != nil {
|
||||
return tErr.Wrap("failed to send msg")
|
||||
}
|
||||
}
|
||||
|
||||
case <-op.ctx.Done():
|
||||
return terminal.ErrStopping
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Deliver delivers the message to the operation.
|
||||
func (op *GossipQueryOp) Deliver(msg *terminal.Msg) *terminal.Error {
|
||||
defer msg.Finish()
|
||||
|
||||
gossipMsgTypeN, err := msg.Data.GetNextN8()
|
||||
if err != nil {
|
||||
return terminal.ErrMalformedData.With("failed to parse gossip message type")
|
||||
}
|
||||
gossipMsgType := GossipMsgType(gossipMsgTypeN)
|
||||
|
||||
// Prepare data.
|
||||
data := msg.Data.CompileData()
|
||||
var announcementData, statusData []byte
|
||||
switch gossipMsgType {
|
||||
case GossipHubAnnouncementMsg:
|
||||
announcementData = data
|
||||
case GossipHubStatusMsg:
|
||||
statusData = data
|
||||
default:
|
||||
log.Warningf("spn/captain: received unknown gossip message type from gossip query: %d", gossipMsgType)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Import and verify.
|
||||
h, forward, tErr := docks.ImportAndVerifyHubInfo(module.Ctx, "", announcementData, statusData, conf.MainMapName, conf.MainMapScope)
|
||||
if tErr != nil {
|
||||
log.Warningf("spn/captain: failed to import %s from gossip query: %s", gossipMsgType, tErr)
|
||||
} else {
|
||||
log.Infof("spn/captain: received %s for %s from gossip query", gossipMsgType, h)
|
||||
op.importCnt++
|
||||
}
|
||||
|
||||
// Relay data.
|
||||
if forward {
|
||||
// TODO: Find better way to get craneID.
|
||||
craneID := strings.SplitN(op.t.FmtID(), "#", 2)[0]
|
||||
gossipRelayMsg(craneID, gossipMsgType, data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleStop gives the operation the ability to cleanly shut down.
|
||||
// The returned error is the error to send to the other side.
|
||||
// Should never be called directly. Call Stop() instead.
|
||||
func (op *GossipQueryOp) HandleStop(err *terminal.Error) (errorToSend *terminal.Error) {
|
||||
if op.client {
|
||||
log.Infof("spn/captain: gossip query imported %d entries", op.importCnt)
|
||||
}
|
||||
op.cancelCtx()
|
||||
return err
|
||||
}
|
||||
183
spn/captain/op_publish.go
Normal file
183
spn/captain/op_publish.go
Normal file
@@ -0,0 +1,183 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/safing/portbase/container"
|
||||
"github.com/safing/portmaster/spn/cabin"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/docks"
|
||||
"github.com/safing/portmaster/spn/hub"
|
||||
"github.com/safing/portmaster/spn/terminal"
|
||||
)
|
||||
|
||||
// PublishOpType is the type ID of the publish operation.
|
||||
const PublishOpType string = "publish"
|
||||
|
||||
// PublishOp is used to publish a connection.
|
||||
type PublishOp struct {
|
||||
terminal.OperationBase
|
||||
controller *docks.CraneControllerTerminal
|
||||
|
||||
identity *cabin.Identity
|
||||
requestingHub *hub.Hub
|
||||
verification *cabin.Verification
|
||||
result chan *terminal.Error
|
||||
}
|
||||
|
||||
// Type returns the type ID.
|
||||
func (op *PublishOp) Type() string {
|
||||
return PublishOpType
|
||||
}
|
||||
|
||||
func init() {
|
||||
terminal.RegisterOpType(terminal.OperationFactory{
|
||||
Type: PublishOpType,
|
||||
Requires: terminal.IsCraneController,
|
||||
Start: runPublishOp,
|
||||
})
|
||||
}
|
||||
|
||||
// NewPublishOp start a new publish operation.
|
||||
func NewPublishOp(controller *docks.CraneControllerTerminal, identity *cabin.Identity) (*PublishOp, *terminal.Error) {
|
||||
// Create and init.
|
||||
op := &PublishOp{
|
||||
controller: controller,
|
||||
identity: identity,
|
||||
result: make(chan *terminal.Error, 1),
|
||||
}
|
||||
msg := container.New()
|
||||
|
||||
// Add Hub Announcement.
|
||||
announcementData, err := identity.ExportAnnouncement()
|
||||
if err != nil {
|
||||
return nil, terminal.ErrInternalError.With("failed to export announcement: %w", err)
|
||||
}
|
||||
msg.AppendAsBlock(announcementData)
|
||||
|
||||
// Add Hub Status.
|
||||
statusData, err := identity.ExportStatus()
|
||||
if err != nil {
|
||||
return nil, terminal.ErrInternalError.With("failed to export status: %w", err)
|
||||
}
|
||||
msg.AppendAsBlock(statusData)
|
||||
|
||||
tErr := controller.StartOperation(op, msg, 10*time.Second)
|
||||
if tErr != nil {
|
||||
return nil, tErr
|
||||
}
|
||||
return op, nil
|
||||
}
|
||||
|
||||
func runPublishOp(t terminal.Terminal, opID uint32, data *container.Container) (terminal.Operation, *terminal.Error) {
|
||||
// Check if we are run by a controller.
|
||||
controller, ok := t.(*docks.CraneControllerTerminal)
|
||||
if !ok {
|
||||
return nil, terminal.ErrIncorrectUsage.With("publish op may only be started by a crane controller terminal, but was started by %T", t)
|
||||
}
|
||||
|
||||
// Parse and import Announcement and Status.
|
||||
announcementData, err := data.GetNextBlock()
|
||||
if err != nil {
|
||||
return nil, terminal.ErrMalformedData.With("failed to get announcement: %w", err)
|
||||
}
|
||||
statusData, err := data.GetNextBlock()
|
||||
if err != nil {
|
||||
return nil, terminal.ErrMalformedData.With("failed to get status: %w", err)
|
||||
}
|
||||
h, forward, tErr := docks.ImportAndVerifyHubInfo(module.Ctx, "", announcementData, statusData, conf.MainMapName, conf.MainMapScope)
|
||||
if tErr != nil {
|
||||
return nil, tErr.Wrap("failed to import and verify hub")
|
||||
}
|
||||
// Update reference in case it was changed by the import.
|
||||
controller.Crane.ConnectedHub = h
|
||||
|
||||
// Relay data.
|
||||
if forward {
|
||||
gossipRelayMsg(controller.Crane.ID, GossipHubAnnouncementMsg, announcementData)
|
||||
gossipRelayMsg(controller.Crane.ID, GossipHubStatusMsg, statusData)
|
||||
}
|
||||
|
||||
// Create verification request.
|
||||
v, request, err := cabin.CreateVerificationRequest(PublishOpType, "", "")
|
||||
if err != nil {
|
||||
return nil, terminal.ErrInternalError.With("failed to create verification request: %w", err)
|
||||
}
|
||||
|
||||
// Create operation.
|
||||
op := &PublishOp{
|
||||
controller: controller,
|
||||
requestingHub: h,
|
||||
verification: v,
|
||||
result: make(chan *terminal.Error, 1),
|
||||
}
|
||||
op.InitOperationBase(controller, opID)
|
||||
|
||||
// Reply with verification request.
|
||||
tErr = op.Send(op.NewMsg(request), 10*time.Second)
|
||||
if tErr != nil {
|
||||
return nil, tErr.Wrap("failed to send verification request")
|
||||
}
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
// Deliver delivers a message to the operation.
|
||||
func (op *PublishOp) Deliver(msg *terminal.Msg) *terminal.Error {
|
||||
defer msg.Finish()
|
||||
|
||||
if op.identity != nil {
|
||||
// Client
|
||||
|
||||
// Sign the received verification request.
|
||||
response, err := op.identity.SignVerificationRequest(msg.Data.CompileData(), PublishOpType, "", "")
|
||||
if err != nil {
|
||||
return terminal.ErrPermissionDenied.With("signing verification request failed: %w", err)
|
||||
}
|
||||
|
||||
return op.Send(op.NewMsg(response), 10*time.Second)
|
||||
} else if op.requestingHub != nil {
|
||||
// Server
|
||||
|
||||
// Verify the signed request.
|
||||
err := op.verification.Verify(msg.Data.CompileData(), op.requestingHub)
|
||||
if err != nil {
|
||||
return terminal.ErrPermissionDenied.With("checking verification request failed: %w", err)
|
||||
}
|
||||
return terminal.ErrExplicitAck
|
||||
}
|
||||
|
||||
return terminal.ErrInternalError.With("invalid operation state")
|
||||
}
|
||||
|
||||
// Result returns the result (end error) of the operation.
|
||||
func (op *PublishOp) Result() <-chan *terminal.Error {
|
||||
return op.result
|
||||
}
|
||||
|
||||
// HandleStop gives the operation the ability to cleanly shut down.
|
||||
// The returned error is the error to send to the other side.
|
||||
// Should never be called directly. Call Stop() instead.
|
||||
func (op *PublishOp) HandleStop(tErr *terminal.Error) (errorToSend *terminal.Error) {
|
||||
if tErr.Is(terminal.ErrExplicitAck) {
|
||||
// TODO: Check for concurrenct access.
|
||||
if op.controller.Crane.ConnectedHub == nil {
|
||||
op.controller.Crane.ConnectedHub = op.requestingHub
|
||||
}
|
||||
|
||||
// Publish crane, abort if it fails.
|
||||
err := op.controller.Crane.Publish()
|
||||
if err != nil {
|
||||
tErr = terminal.ErrInternalError.With("failed to publish crane: %w", err)
|
||||
op.controller.Crane.Stop(tErr)
|
||||
} else {
|
||||
op.controller.Crane.NotifyUpdate()
|
||||
}
|
||||
}
|
||||
|
||||
select {
|
||||
case op.result <- tErr:
|
||||
default:
|
||||
}
|
||||
return tErr
|
||||
}
|
||||
131
spn/captain/piers.go
Normal file
131
spn/captain/piers.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portmaster/service/intel"
|
||||
"github.com/safing/portmaster/service/network/netutils"
|
||||
"github.com/safing/portmaster/service/profile/endpoints"
|
||||
"github.com/safing/portmaster/spn/docks"
|
||||
"github.com/safing/portmaster/spn/hub"
|
||||
"github.com/safing/portmaster/spn/ships"
|
||||
)
|
||||
|
||||
var (
|
||||
dockingRequests = make(chan ships.Ship, 100)
|
||||
piers []ships.Pier
|
||||
)
|
||||
|
||||
func startPiers() error {
|
||||
// Get and check transports.
|
||||
transports := publicIdentity.Hub.Info.Transports
|
||||
if len(transports) == 0 {
|
||||
return errors.New("no transports defined")
|
||||
}
|
||||
|
||||
piers = make([]ships.Pier, 0, len(transports))
|
||||
for _, t := range transports {
|
||||
// Parse transport.
|
||||
transport, err := hub.ParseTransport(t)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot build pier for invalid transport %q: %w", t, err)
|
||||
}
|
||||
|
||||
// Establish pier / listener.
|
||||
pier, err := ships.EstablishPier(transport, dockingRequests)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to establish pier for transport %q: %w", t, err)
|
||||
}
|
||||
|
||||
piers = append(piers, pier)
|
||||
log.Infof("spn/captain: pier for transport %q built", t)
|
||||
}
|
||||
|
||||
// Start worker to handle docking requests.
|
||||
module.StartServiceWorker("docking request handler", 0, dockingRequestHandler)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func stopPiers() {
|
||||
for _, pier := range piers {
|
||||
pier.Abolish()
|
||||
}
|
||||
}
|
||||
|
||||
func dockingRequestHandler(ctx context.Context) error {
|
||||
// Sink all waiting ships when this worker ends.
|
||||
// But don't be destructive so the service worker could recover.
|
||||
defer func() {
|
||||
for {
|
||||
select {
|
||||
case ship := <-dockingRequests:
|
||||
if ship != nil {
|
||||
ship.Sink()
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
case ship := <-dockingRequests:
|
||||
// Ignore nil ships.
|
||||
if ship == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := checkDockingPermission(ctx, ship); err != nil {
|
||||
log.Warningf("spn/captain: denied ship from %s to dock at pier %s: %s", ship.RemoteAddr(), ship.Transport().String(), err)
|
||||
} else {
|
||||
handleDockingRequest(ship)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkDockingPermission(ctx context.Context, ship ships.Ship) error {
|
||||
remoteIP, remotePort, err := netutils.IPPortFromAddr(ship.RemoteAddr())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to parse remote IP: %w", err)
|
||||
}
|
||||
|
||||
// Create entity.
|
||||
entity := (&intel.Entity{
|
||||
IP: remoteIP,
|
||||
Protocol: uint8(netutils.ProtocolFromNetwork(ship.RemoteAddr().Network())),
|
||||
Port: remotePort,
|
||||
}).Init(ship.Transport().Port)
|
||||
entity.FetchData(ctx)
|
||||
|
||||
// Check against policy.
|
||||
result, reason := publicIdentity.Hub.GetInfo().EntryPolicy().Match(ctx, entity)
|
||||
if result == endpoints.Denied {
|
||||
return fmt.Errorf("entry policy violated: %s", reason)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleDockingRequest(ship ships.Ship) {
|
||||
log.Infof("spn/captain: pemitting %s to dock", ship)
|
||||
|
||||
crane, err := docks.NewCrane(ship, nil, publicIdentity)
|
||||
if err != nil {
|
||||
log.Warningf("spn/captain: failed to commission crane for %s: %s", ship, err)
|
||||
return
|
||||
}
|
||||
|
||||
module.StartWorker("start crane", func(ctx context.Context) error {
|
||||
_ = crane.Start(ctx)
|
||||
// Crane handles errors internally.
|
||||
return nil
|
||||
})
|
||||
}
|
||||
247
spn/captain/public.go
Normal file
247
spn/captain/public.go
Normal file
@@ -0,0 +1,247 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/safing/portbase/database"
|
||||
"github.com/safing/portbase/log"
|
||||
"github.com/safing/portbase/metrics"
|
||||
"github.com/safing/portbase/modules"
|
||||
"github.com/safing/portmaster/spn/cabin"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/docks"
|
||||
"github.com/safing/portmaster/spn/hub"
|
||||
"github.com/safing/portmaster/spn/navigator"
|
||||
"github.com/safing/portmaster/spn/patrol"
|
||||
)
|
||||
|
||||
const (
|
||||
maintainStatusInterval = 15 * time.Minute
|
||||
maintainStatusUpdateDelay = 5 * time.Second
|
||||
)
|
||||
|
||||
var (
|
||||
publicIdentity *cabin.Identity
|
||||
publicIdentityKey = "core:spn/public/identity"
|
||||
|
||||
publicIdentityUpdateTask *modules.Task
|
||||
statusUpdateTask *modules.Task
|
||||
)
|
||||
|
||||
func loadPublicIdentity() (err error) {
|
||||
var changed bool
|
||||
|
||||
publicIdentity, changed, err = cabin.LoadIdentity(publicIdentityKey)
|
||||
switch {
|
||||
case err == nil:
|
||||
// load was successful
|
||||
log.Infof("spn/captain: loaded public hub identity %s", publicIdentity.Hub.ID)
|
||||
case errors.Is(err, database.ErrNotFound):
|
||||
// does not exist, create new
|
||||
publicIdentity, err = cabin.CreateIdentity(module.Ctx, conf.MainMapName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new identity: %w", err)
|
||||
}
|
||||
publicIdentity.SetKey(publicIdentityKey)
|
||||
changed = true
|
||||
|
||||
log.Infof("spn/captain: created new public hub identity %s", publicIdentity.ID)
|
||||
default:
|
||||
// loading error, abort
|
||||
return fmt.Errorf("failed to load public identity: %w", err)
|
||||
}
|
||||
|
||||
// Save to database if the identity changed.
|
||||
if changed {
|
||||
err = publicIdentity.Save()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save new/updated identity to database: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Set available networks.
|
||||
conf.SetHubNetworks(
|
||||
publicIdentity.Hub.Info.IPv4 != nil,
|
||||
publicIdentity.Hub.Info.IPv6 != nil,
|
||||
)
|
||||
if cfgOptionBindToAdvertised() {
|
||||
conf.SetBindAddr(publicIdentity.Hub.Info.IPv4, publicIdentity.Hub.Info.IPv6)
|
||||
}
|
||||
|
||||
// Set Home Hub before updating the hub on the map, as this would trigger a
|
||||
// recalculation without a Home Hub.
|
||||
ok := navigator.Main.SetHome(publicIdentity.ID, nil)
|
||||
// Always update the navigator in any case in order to sync the reference to
|
||||
// the active struct of the identity.
|
||||
navigator.Main.UpdateHub(publicIdentity.Hub)
|
||||
// Setting the Home Hub will have failed if the identidy was only just
|
||||
// created - try again if it failed.
|
||||
if !ok {
|
||||
ok = navigator.Main.SetHome(publicIdentity.ID, nil)
|
||||
if !ok {
|
||||
return errors.New("failed to set self as home hub")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepPublicIdentityMgmt() error {
|
||||
publicIdentityUpdateTask = module.NewTask(
|
||||
"maintain public identity",
|
||||
maintainPublicIdentity,
|
||||
)
|
||||
|
||||
statusUpdateTask = module.NewTask(
|
||||
"maintain public status",
|
||||
maintainPublicStatus,
|
||||
).Repeat(maintainStatusInterval)
|
||||
|
||||
return module.RegisterEventHook(
|
||||
"config",
|
||||
"config change",
|
||||
"update public identity from config",
|
||||
func(_ context.Context, _ interface{}) error {
|
||||
// trigger update in 5 minutes
|
||||
publicIdentityUpdateTask.Schedule(time.Now().Add(5 * time.Minute))
|
||||
return nil
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// TriggerHubStatusMaintenance queues the Hub status update task to be executed.
|
||||
func TriggerHubStatusMaintenance() {
|
||||
if statusUpdateTask != nil {
|
||||
statusUpdateTask.Queue()
|
||||
}
|
||||
}
|
||||
|
||||
func maintainPublicIdentity(ctx context.Context, task *modules.Task) error {
|
||||
changed, err := publicIdentity.MaintainAnnouncement(nil, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to maintain announcement: %w", err)
|
||||
}
|
||||
|
||||
if !changed {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update on map.
|
||||
navigator.Main.UpdateHub(publicIdentity.Hub)
|
||||
log.Debug("spn/captain: updated own hub on map after announcement change")
|
||||
|
||||
// export announcement
|
||||
announcementData, err := publicIdentity.ExportAnnouncement()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to export announcement: %w", err)
|
||||
}
|
||||
|
||||
// forward to other connected Hubs
|
||||
gossipRelayMsg("", GossipHubAnnouncementMsg, announcementData)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func maintainPublicStatus(ctx context.Context, task *modules.Task) error {
|
||||
// Get current lanes.
|
||||
cranes := docks.GetAllAssignedCranes()
|
||||
lanes := make([]*hub.Lane, 0, len(cranes))
|
||||
for _, crane := range cranes {
|
||||
// Ignore private, stopped or stopping cranes.
|
||||
if !crane.Public() || crane.Stopped() || crane.IsStopping() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get measurements.
|
||||
measurements := crane.ConnectedHub.GetMeasurements()
|
||||
latency, _ := measurements.GetLatency()
|
||||
capacity, _ := measurements.GetCapacity()
|
||||
|
||||
// Add crane lane.
|
||||
lanes = append(lanes, &hub.Lane{
|
||||
ID: crane.ConnectedHub.ID,
|
||||
Latency: latency,
|
||||
Capacity: capacity,
|
||||
})
|
||||
}
|
||||
// Sort Lanes for comparing.
|
||||
hub.SortLanes(lanes)
|
||||
|
||||
// Get system load and convert to fixed steps.
|
||||
var load int
|
||||
loadAvg, ok := metrics.LoadAvg15()
|
||||
switch {
|
||||
case !ok:
|
||||
load = -1
|
||||
case loadAvg >= 1:
|
||||
load = 100
|
||||
case loadAvg >= 0.95:
|
||||
load = 95
|
||||
case loadAvg >= 0.8:
|
||||
load = 80
|
||||
default:
|
||||
load = 0
|
||||
}
|
||||
if loadAvg >= 0.8 {
|
||||
log.Warningf("spn/captain: publishing 15m system load average of %.2f as %d", loadAvg, load)
|
||||
}
|
||||
|
||||
// Set flags.
|
||||
var flags []string
|
||||
if !patrol.HTTPSConnectivityConfirmed() {
|
||||
flags = append(flags, hub.FlagNetError)
|
||||
}
|
||||
// Sort Lanes for comparing.
|
||||
sort.Strings(flags)
|
||||
|
||||
// Run maintenance with the new data.
|
||||
changed, err := publicIdentity.MaintainStatus(lanes, &load, flags, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to maintain status: %w", err)
|
||||
}
|
||||
|
||||
if !changed {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update on map.
|
||||
navigator.Main.UpdateHub(publicIdentity.Hub)
|
||||
log.Debug("spn/captain: updated own hub on map after status change")
|
||||
|
||||
// export status
|
||||
statusData, err := publicIdentity.ExportStatus()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to export status: %w", err)
|
||||
}
|
||||
|
||||
// forward to other connected Hubs
|
||||
gossipRelayMsg("", GossipHubStatusMsg, statusData)
|
||||
|
||||
log.Infof(
|
||||
"spn/captain: updated status with load %d and current lanes: %v",
|
||||
publicIdentity.Hub.Status.Load,
|
||||
publicIdentity.Hub.Status.Lanes,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
func publishShutdownStatus() {
|
||||
// Create offline status.
|
||||
offlineStatusData, err := publicIdentity.MakeOfflineStatus()
|
||||
if err != nil {
|
||||
log.Errorf("spn/captain: failed to create offline status: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Forward to other connected Hubs.
|
||||
gossipRelayMsg("", GossipHubStatusMsg, offlineStatusData)
|
||||
|
||||
// Leave some time for the message to broadcast.
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
log.Infof("spn/captain: broadcasted offline status")
|
||||
}
|
||||
154
spn/captain/status.go
Normal file
154
spn/captain/status.go
Normal file
@@ -0,0 +1,154 @@
|
||||
package captain
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/safing/portbase/config"
|
||||
"github.com/safing/portbase/database/record"
|
||||
"github.com/safing/portbase/runtime"
|
||||
"github.com/safing/portbase/utils/debug"
|
||||
"github.com/safing/portmaster/service/intel/geoip"
|
||||
"github.com/safing/portmaster/spn/conf"
|
||||
"github.com/safing/portmaster/spn/navigator"
|
||||
)
|
||||
|
||||
// SPNStatus holds SPN status information.
|
||||
type SPNStatus struct {
|
||||
record.Base
|
||||
sync.Mutex
|
||||
|
||||
Status SPNStatusName
|
||||
HomeHubID string
|
||||
HomeHubName string
|
||||
ConnectedIP string
|
||||
ConnectedTransport string
|
||||
ConnectedCountry *geoip.CountryInfo
|
||||
ConnectedSince *time.Time
|
||||
}
|
||||
|
||||
// SPNStatusName is a SPN status.
|
||||
type SPNStatusName string
|
||||
|
||||
// SPN Stati.
|
||||
const (
|
||||
StatusFailed SPNStatusName = "failed"
|
||||
StatusDisabled SPNStatusName = "disabled"
|
||||
StatusConnecting SPNStatusName = "connecting"
|
||||
StatusConnected SPNStatusName = "connected"
|
||||
)
|
||||
|
||||
var (
|
||||
spnStatus = &SPNStatus{
|
||||
Status: StatusDisabled,
|
||||
}
|
||||
spnStatusPushFunc runtime.PushFunc
|
||||
)
|
||||
|
||||
func registerSPNStatusProvider() (err error) {
|
||||
spnStatus.SetKey("runtime:spn/status")
|
||||
spnStatus.UpdateMeta()
|
||||
spnStatusPushFunc, err = runtime.Register("spn/status", runtime.ProvideRecord(spnStatus))
|
||||
return
|
||||
}
|
||||
|
||||
func resetSPNStatus(statusName SPNStatusName, overrideEvenIfConnected bool) {
|
||||
// Lock for updating values.
|
||||
spnStatus.Lock()
|
||||
defer spnStatus.Unlock()
|
||||
|
||||
// Ignore when connected and not overriding
|
||||
if !overrideEvenIfConnected && spnStatus.Status == StatusConnected {
|
||||
return
|
||||
}
|
||||
|
||||
// Reset status.
|
||||
spnStatus.Status = statusName
|
||||
spnStatus.HomeHubID = ""
|
||||
spnStatus.HomeHubName = ""
|
||||
spnStatus.ConnectedIP = ""
|
||||
spnStatus.ConnectedTransport = ""
|
||||
spnStatus.ConnectedCountry = nil
|
||||
spnStatus.ConnectedSince = nil
|
||||
|
||||
// Push new status.
|
||||
pushSPNStatusUpdate()
|
||||
}
|
||||
|
||||
// pushSPNStatusUpdate pushes an update of spnStatus, which must be locked.
|
||||
func pushSPNStatusUpdate() {
|
||||
spnStatus.UpdateMeta()
|
||||
spnStatusPushFunc(spnStatus)
|
||||
}
|
||||
|
||||
// GetSPNStatus returns the current SPN status.
|
||||
func GetSPNStatus() *SPNStatus {
|
||||
spnStatus.Lock()
|
||||
defer spnStatus.Unlock()
|
||||
|
||||
return &SPNStatus{
|
||||
Status: spnStatus.Status,
|
||||
HomeHubID: spnStatus.HomeHubID,
|
||||
HomeHubName: spnStatus.HomeHubName,
|
||||
ConnectedIP: spnStatus.ConnectedIP,
|
||||
ConnectedTransport: spnStatus.ConnectedTransport,
|
||||
ConnectedCountry: spnStatus.ConnectedCountry,
|
||||
ConnectedSince: spnStatus.ConnectedSince,
|
||||
}
|
||||
}
|
||||
|
||||
// AddToDebugInfo adds the SPN status to the given debug.Info.
|
||||
func AddToDebugInfo(di *debug.Info) {
|
||||
spnStatus.Lock()
|
||||
defer spnStatus.Unlock()
|
||||
|
||||
// Check if SPN module is enabled.
|
||||
var moduleStatus string
|
||||
spnEnabled := config.GetAsBool(CfgOptionEnableSPNKey, false)
|
||||
if spnEnabled() {
|
||||
moduleStatus = "enabled"
|
||||
} else {
|
||||
moduleStatus = "disabled"
|
||||
}
|
||||
|
||||
// Collect status data.
|
||||
lines := make([]string, 0, 20)
|
||||
lines = append(lines, fmt.Sprintf("HomeHubID: %v", spnStatus.HomeHubID))
|
||||
lines = append(lines, fmt.Sprintf("HomeHubName: %v", spnStatus.HomeHubName))
|
||||
lines = append(lines, fmt.Sprintf("HomeHubIP: %v", spnStatus.ConnectedIP))
|
||||
lines = append(lines, fmt.Sprintf("Transport: %v", spnStatus.ConnectedTransport))
|
||||
if spnStatus.ConnectedSince != nil {
|
||||
lines = append(lines, fmt.Sprintf("Connected: %v ago", time.Since(*spnStatus.ConnectedSince).Round(time.Minute)))
|
||||
}
|
||||
lines = append(lines, "---")
|
||||
lines = append(lines, fmt.Sprintf("Client: %v", conf.Client()))
|
||||
lines = append(lines, fmt.Sprintf("PublicHub: %v", conf.PublicHub()))
|
||||
lines = append(lines, fmt.Sprintf("HubHasIPv4: %v", conf.HubHasIPv4()))
|
||||
lines = append(lines, fmt.Sprintf("HubHasIPv6: %v", conf.HubHasIPv6()))
|
||||
|
||||
// Collect status data of map.
|
||||
if navigator.Main != nil {
|
||||
lines = append(lines, "---")
|
||||
mainMapStats := navigator.Main.Stats()
|
||||
lines = append(lines, fmt.Sprintf("Map %s:", navigator.Main.Name))
|
||||
lines = append(lines, fmt.Sprintf("Active Terminals: %d Hubs", mainMapStats.ActiveTerminals))
|
||||
// Collect hub states.
|
||||
mapStateSummary := make([]string, 0, len(mainMapStats.States))
|
||||
for state, cnt := range mainMapStats.States {
|
||||
if cnt > 0 {
|
||||
mapStateSummary = append(mapStateSummary, fmt.Sprintf("State %s: %d Hubs", state, cnt))
|
||||
}
|
||||
}
|
||||
sort.Strings(mapStateSummary)
|
||||
lines = append(lines, mapStateSummary...)
|
||||
}
|
||||
|
||||
// Add all data as section.
|
||||
di.AddSection(
|
||||
fmt.Sprintf("SPN: %s (module %s)", spnStatus.Status, moduleStatus),
|
||||
debug.UseCodeSection|debug.AddContentLineBreaks,
|
||||
lines...,
|
||||
)
|
||||
}
|
||||
Reference in New Issue
Block a user