diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d89171fc..cfb4fe3b 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -1,4 +1,4 @@ -name: Release +name: Release v2.X on: push: @@ -36,6 +36,8 @@ jobs: if-no-files-found: error installer-linux: + #JOB DISABLED FOR NOW + if: false name: Installer linux runs-on: ubuntu-latest needs: release-prep @@ -63,6 +65,8 @@ jobs: if-no-files-found: error installer-windows: + #JOB DISABLED FOR NOW + if: false name: Installer windows runs-on: windows-latest needs: release-prep diff --git a/.gitignore b/.gitignore index 95ec974e..46bbc5ef 100644 --- a/.gitignore +++ b/.gitignore @@ -59,4 +59,8 @@ windows_core_dll/x64/ windows_core_dll/portmaster-core/x64/ #Tauri-generated files -desktop/tauri/src-tauri/gen/ \ No newline at end of file +desktop/tauri/src-tauri/gen/ + +#Binaries used for installer gereneration for Windows +desktop/tauri/src-tauri/binary/ +desktop/tauri/src-tauri/intel/ diff --git a/Earthfile b/Earthfile index ad460c0f..1041d7d8 100644 --- a/Earthfile +++ b/Earthfile @@ -603,6 +603,10 @@ installer-linux: SAVE ARTIFACT --if-exists --keep-ts "target/${target}/release/bundle/deb/*.deb" AS LOCAL "${outputDir}/${GO_ARCH_STRING}/" SAVE ARTIFACT --if-exists --keep-ts "target/${target}/release/bundle/rpm/*.rpm" AS LOCAL "${outputDir}/${GO_ARCH_STRING}/" +all-artifacts: + BUILD +release-prep + BUILD +installer-linux + kext-build: FROM ${rust_builder_image} diff --git a/base/utils/call_limiter2.go b/base/utils/call_limiter2.go new file mode 100644 index 00000000..30d1b1ef --- /dev/null +++ b/base/utils/call_limiter2.go @@ -0,0 +1,74 @@ +package utils + +import ( + "sync" + "sync/atomic" + "time" +) + +// CallLimiter2 bundles concurrent calls and optionally limits how fast a function is called. +type CallLimiter2 struct { + pause time.Duration + + slot atomic.Int64 + slotWait sync.RWMutex + + executing atomic.Bool + lastExec time.Time +} + +// NewCallLimiter2 returns a new call limiter. +// Set minPause to zero to disable the minimum pause between calls. +func NewCallLimiter2(minPause time.Duration) *CallLimiter2 { + return &CallLimiter2{ + pause: minPause, + } +} + +// Do executes the given function. +// All concurrent calls to Do are bundled and return when f() finishes. +// Waits until the minimum pause is over before executing f() again. +func (l *CallLimiter2) Do(f func()) { + // Get ticket number. + slot := l.slot.Load() + + // Check if we can execute. + if l.executing.CompareAndSwap(false, true) { + // Make others wait. + l.slotWait.Lock() + defer l.slotWait.Unlock() + + // Execute and return. + l.waitAndExec(f) + return + } + + // Wait for slot to end and check if slot is done. + for l.slot.Load() == slot { + time.Sleep(100 * time.Microsecond) + l.slotWait.RLock() + l.slotWait.RUnlock() //nolint:staticcheck + } +} + +func (l *CallLimiter2) waitAndExec(f func()) { + defer func() { + // Update last exec time. + l.lastExec = time.Now().UTC() + // Enable next execution first. + l.executing.Store(false) + // Move to next slot aftewards to prevent wait loops. + l.slot.Add(1) + }() + + // Wait for the minimum duration between executions. + if l.pause > 0 { + sinceLastExec := time.Since(l.lastExec) + if sinceLastExec < l.pause { + time.Sleep(l.pause - sinceLastExec) + } + } + + // Execute. + f() +} diff --git a/base/utils/call_limiter_test.go b/base/utils/call_limiter_test.go index 3144644e..2343673a 100644 --- a/base/utils/call_limiter_test.go +++ b/base/utils/call_limiter_test.go @@ -13,7 +13,7 @@ func TestCallLimiter(t *testing.T) { t.Parallel() pause := 10 * time.Millisecond - oa := NewCallLimiter(pause) + oa := NewCallLimiter2(pause) executed := abool.New() var testWg sync.WaitGroup @@ -41,14 +41,14 @@ func TestCallLimiter(t *testing.T) { executed.UnSet() // reset check } - // Wait for pause to reset. - time.Sleep(pause) + // Wait for 2x pause to reset. + time.Sleep(2 * pause) // Continuous use with re-execution. // Choose values so that about 10 executions are expected var execs uint32 - testWg.Add(200) - for range 200 { + testWg.Add(100) + for range 100 { go func() { oa.Do(func() { atomic.AddUint32(&execs, 1) @@ -69,8 +69,8 @@ func TestCallLimiter(t *testing.T) { t.Errorf("unexpected high exec count: %d", execs) } - // Wait for pause to reset. - time.Sleep(pause) + // Wait for 2x pause to reset. + time.Sleep(2 * pause) // Check if the limiter correctly handles panics. testWg.Add(100) diff --git a/cmds/cmdbase/update.go b/cmds/cmdbase/update.go index 65c20a83..6a04c654 100644 --- a/cmds/cmdbase/update.go +++ b/cmds/cmdbase/update.go @@ -9,6 +9,7 @@ import ( "github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/notifications" "github.com/safing/portmaster/service" + "github.com/safing/portmaster/service/ui" "github.com/safing/portmaster/service/updates" ) @@ -71,3 +72,4 @@ type updateDummyInstance struct{} func (udi *updateDummyInstance) Restart() {} func (udi *updateDummyInstance) Shutdown() {} func (udi *updateDummyInstance) Notifications() *notifications.Notifications { return nil } +func (udi *updateDummyInstance) UI() *ui.UI { return nil } diff --git a/cmds/updatemgr/scan.go b/cmds/updatemgr/scan.go index 9ef29f15..e5458ee6 100644 --- a/cmds/updatemgr/scan.go +++ b/cmds/updatemgr/scan.go @@ -4,13 +4,14 @@ import ( "encoding/json" "fmt" + "github.com/safing/portmaster/service/configure" "github.com/safing/portmaster/service/updates" "github.com/spf13/cobra" ) var ( scanConfig = updates.IndexScanConfig{ - Name: "Portmaster Binaries", + Name: configure.DefaultBinaryIndexName, PrimaryArtifact: "linux_amd64/portmaster-core", BaseURL: "https://updates.safing.io/", IgnoreFiles: []string{ diff --git a/desktop/angular/package.json b/desktop/angular/package.json index bcd51b0c..f627b5b1 100644 --- a/desktop/angular/package.json +++ b/desktop/angular/package.json @@ -1,6 +1,6 @@ { "name": "portmaster", - "version": "2.0.1", + "version": "2.0.2", "scripts": { "ng": "ng", "start": "npm install && npm run build-libs:dev && ng serve --proxy-config ./proxy.json", diff --git a/desktop/tauri/rust-dark-light/src/utils/rgb.rs b/desktop/tauri/rust-dark-light/src/utils/rgb.rs index d43f62b9..271c0aef 100644 --- a/desktop/tauri/rust-dark-light/src/utils/rgb.rs +++ b/desktop/tauri/rust-dark-light/src/utils/rgb.rs @@ -1,6 +1,7 @@ use std::str::FromStr; /// Struct representing an RGB color +#[allow(dead_code)] // Suppress warnings for unused fields in this struct only pub(crate) struct Rgb(pub(crate) u32, pub(crate) u32, pub(crate) u32); impl FromStr for Rgb { diff --git a/desktop/tauri/src-tauri/Cargo.toml b/desktop/tauri/src-tauri/Cargo.toml index d79c8ddc..0f71fd9f 100644 --- a/desktop/tauri/src-tauri/Cargo.toml +++ b/desktop/tauri/src-tauri/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "portmaster" -version = "2.0.0" +version = "2.0.1" description = "Portmaster UI" authors = ["Safing"] license = "" diff --git a/desktop/tauri/src-tauri/src/main.rs b/desktop/tauri/src-tauri/src/main.rs index b4fcd405..0773b130 100644 --- a/desktop/tauri/src-tauri/src/main.rs +++ b/desktop/tauri/src-tauri/src/main.rs @@ -1,7 +1,7 @@ // Prevents additional console window on Windows in release, DO NOT REMOVE!! #![cfg_attr(not(debug_assertions), windows_subsystem = "windows")] -use std::{env, path::Path, time::Duration}; +use std::{env, time::Duration}; use tauri::{AppHandle, Emitter, Listener, Manager, RunEvent, WindowEvent}; @@ -25,6 +25,7 @@ use portmaster::PortmasterExt; use tauri_plugin_log::RotationStrategy; use traymenu::setup_tray_menu; use window::{close_splash_window, create_main_window, hide_splash_window}; +use tauri_plugin_window_state::StateFlags; #[macro_use] extern crate lazy_static; @@ -140,7 +141,7 @@ fn main() { // TODO(vladimir): Permission for logs/app2 folder are not guaranteed. Use the default location for now. #[cfg(target_os = "windows")] - let log_target = if let Some(data_dir) = cli_args.data { + let log_target = if let Some(_) = cli_args.data { tauri_plugin_log::Target::new(tauri_plugin_log::TargetKind::LogDir { file_name: None }) } else { tauri_plugin_log::Target::new(tauri_plugin_log::TargetKind::Stdout) @@ -174,7 +175,12 @@ fn main() { // OS Version and Architecture support .plugin(tauri_plugin_os::init()) // Initialize save windows state plugin. - .plugin(tauri_plugin_window_state::Builder::default().build()) + .plugin(tauri_plugin_window_state::Builder::default() + // Don't save visibility state, so it will not interfere with "--background" command line argument + .with_state_flags(StateFlags::all() & !StateFlags::VISIBLE) + // Don't save splash window state + .with_denylist(&["splash",]) + .build()) // Single instance guard .plugin(tauri_plugin_single_instance::init(|app, argv, cwd| { // Send info to already dunning instance. diff --git a/desktop/tauri/src-tauri/src/portmaster/notifications.rs b/desktop/tauri/src-tauri/src/portmaster/notifications.rs index 7128eda0..f1bc9dff 100644 --- a/desktop/tauri/src-tauri/src/portmaster/notifications.rs +++ b/desktop/tauri/src-tauri/src/portmaster/notifications.rs @@ -2,7 +2,6 @@ use crate::portapi::client::*; use crate::portapi::message::*; use crate::portapi::models::notification::*; use crate::portapi::types::*; -use log::debug; use log::error; use serde_json::json; use tauri::async_runtime; diff --git a/desktop/tauri/src-tauri/src/traymenu.rs b/desktop/tauri/src-tauri/src/traymenu.rs index 777b9d23..197bdabc 100644 --- a/desktop/tauri/src-tauri/src/traymenu.rs +++ b/desktop/tauri/src-tauri/src/traymenu.rs @@ -4,15 +4,12 @@ use std::sync::RwLock; use std::{collections::HashMap, sync::atomic::Ordering}; use log::{debug, error}; -use tauri::menu::{Menu, MenuItemKind}; -use tauri::tray::{MouseButton, MouseButtonState}; use tauri::{ image::Image, - menu::{MenuBuilder, MenuItemBuilder, PredefinedMenuItem, SubmenuBuilder}, - tray::{TrayIcon, TrayIconBuilder}, - Wry, + menu::{Menu, MenuBuilder, MenuItemBuilder, PredefinedMenuItem, SubmenuBuilder}, + tray::{MouseButton, MouseButtonState, TrayIcon, TrayIconBuilder}, + Manager, Wry, }; -use tauri::{Manager, Runtime}; use tauri_plugin_window_state::{AppHandleExt, StateFlags}; use crate::config; @@ -33,6 +30,7 @@ use crate::{ use tauri_plugin_dialog::{DialogExt, MessageDialogButtons}; pub type AppIcon = TrayIcon; +pub type ContextMenu = Menu; static SPN_STATE: AtomicBool = AtomicBool::new(false); @@ -46,12 +44,20 @@ enum IconColor { static CURRENT_ICON_COLOR: RwLock = RwLock::new(IconColor::Red); pub static USER_THEME: RwLock = RwLock::new(dark_light::Mode::Default); - -static SPN_STATUS_KEY: &str = "spn_status"; -static SPN_BUTTON_KEY: &str = "spn_toggle"; -static GLOBAL_STATUS_KEY: &str = "global_status"; +const OPEN_KEY: &str = "open"; +const EXIT_UI_KEY: &str = "exit_ui"; +const SPN_STATUS_KEY: &str = "spn_status"; +const SPN_BUTTON_KEY: &str = "spn_toggle"; +const GLOBAL_STATUS_KEY: &str = "global_status"; +const SHUTDOWN_KEY: &str = "shutdown"; +const SYSTEM_THEME_KEY: &str = "system_theme"; +const LIGHT_THEME_KEY: &str = "light_theme"; +const DARK_THEME_KEY: &str = "dark_theme"; +const RELOAD_KEY: &str = "reload"; +const FORCE_SHOW_KEY: &str = "force-show"; const PM_TRAY_ICON_ID: &str = "pm_icon"; +const PM_TRAY_MENU_ID: &str = "pm_tray_menu"; // Icons @@ -115,51 +121,57 @@ fn get_icon(icon: IconColor) -> &'static [u8] { } } -pub fn setup_tray_menu( - app: &mut tauri::App, -) -> core::result::Result> { - // Tray menu - load_theme(app.handle()); - let open_btn = MenuItemBuilder::with_id("open", "Open App").build(app)?; - let exit_ui_btn = MenuItemBuilder::with_id("exit_ui", "Exit UI").build(app)?; - let shutdown_btn = MenuItemBuilder::with_id("shutdown", "Shut Down Portmaster").build(app)?; +fn build_tray_menu( + app: &tauri::AppHandle, + status: &str, + spn_status_text: &str, +) -> core::result::Result> { + load_theme(app); - let global_status = MenuItemBuilder::with_id("global_status", "Status: Secured") + let open_btn = MenuItemBuilder::with_id(OPEN_KEY, "Open App").build(app)?; + let exit_ui_btn = MenuItemBuilder::with_id(EXIT_UI_KEY, "Exit UI").build(app)?; + let shutdown_btn = MenuItemBuilder::with_id(SHUTDOWN_KEY, "Shut Down Portmaster").build(app)?; + + let global_status = MenuItemBuilder::with_id(GLOBAL_STATUS_KEY, format!("Status: {}", status)) .enabled(false) .build(app) .unwrap(); // Setup SPN status - let spn_status = MenuItemBuilder::with_id(SPN_STATUS_KEY, "SPN: Disabled") + let spn_status = MenuItemBuilder::with_id(SPN_STATUS_KEY, format!("SPN: {}", spn_status_text)) .enabled(false) .build(app) .unwrap(); // Setup SPN button - let spn_button = MenuItemBuilder::with_id(SPN_BUTTON_KEY, "Enable SPN") + let spn_button_text = match spn_status_text { + "disabled" => "Enable SPN", + _ => "Disable SPN", + }; + let spn_button = MenuItemBuilder::with_id(SPN_BUTTON_KEY, spn_button_text) .build(app) .unwrap(); - let system_theme = MenuItemBuilder::with_id("system_theme", "System") + let system_theme = MenuItemBuilder::with_id(SYSTEM_THEME_KEY, "System") .build(app) .unwrap(); - let light_theme = MenuItemBuilder::with_id("light_theme", "Light") + let light_theme = MenuItemBuilder::with_id(LIGHT_THEME_KEY, "Light") .build(app) .unwrap(); - let dark_theme = MenuItemBuilder::with_id("dark_theme", "Dark") + let dark_theme = MenuItemBuilder::with_id(DARK_THEME_KEY, "Dark") .build(app) .unwrap(); let theme_menu = SubmenuBuilder::new(app, "Icon Theme") .items(&[&system_theme, &light_theme, &dark_theme]) .build()?; - let force_show_window = MenuItemBuilder::with_id("force-show", "Force Show UI").build(app)?; - let reload_btn = MenuItemBuilder::with_id("reload", "Reload User Interface").build(app)?; + let force_show_window = MenuItemBuilder::with_id(FORCE_SHOW_KEY, "Force Show UI").build(app)?; + let reload_btn = MenuItemBuilder::with_id(RELOAD_KEY, "Reload User Interface").build(app)?; let developer_menu = SubmenuBuilder::new(app, "Developer") .items(&[&reload_btn, &force_show_window]) .build()?; - let menu = MenuBuilder::new(app) + let menu = MenuBuilder::with_id(app, PM_TRAY_MENU_ID) .items(&[ &open_btn, &PredefinedMenuItem::separator(app)?, @@ -176,11 +188,19 @@ pub fn setup_tray_menu( ]) .build()?; + return Ok(menu); +} + +pub fn setup_tray_menu( + app: &mut tauri::App, +) -> core::result::Result> { + let menu = build_tray_menu(app.handle(), "Secured", "disabled")?; + let icon = TrayIconBuilder::with_id(PM_TRAY_ICON_ID) .icon(Image::from_bytes(get_red_icon()).unwrap()) .menu(&menu) .on_menu_event(move |app, event| match event.id().as_ref() { - "exit_ui" => { + EXIT_UI_KEY => { let handle = app.clone(); app.dialog() .message("This does not stop the Portmaster system service") @@ -196,15 +216,15 @@ pub fn setup_tray_menu( } }); } - "open" => { + OPEN_KEY => { let _ = open_window(app); } - "reload" => { + RELOAD_KEY => { if let Ok(mut win) = open_window(app) { may_navigate_to_ui(&mut win, true); } } - "force-show" => { + FORCE_SHOW_KEY => { match create_main_window(app) { Ok(mut win) => { may_navigate_to_ui(&mut win, true); @@ -217,19 +237,19 @@ pub fn setup_tray_menu( } }; } - "spn_toggle" => { + SPN_BUTTON_KEY => { if SPN_STATE.load(Ordering::Acquire) { app.portmaster().set_spn_enabled(false); } else { app.portmaster().set_spn_enabled(true); } } - "shutdown" => { + SHUTDOWN_KEY => { app.portmaster().trigger_shutdown(); } - "system_theme" => update_icon_theme(app, dark_light::Mode::Default), - "dark_theme" => update_icon_theme(app, dark_light::Mode::Dark), - "light_theme" => update_icon_theme(app, dark_light::Mode::Light), + SYSTEM_THEME_KEY => update_icon_theme(app, dark_light::Mode::Default), + DARK_THEME_KEY => update_icon_theme(app, dark_light::Mode::Dark), + LIGHT_THEME_KEY => update_icon_theme(app, dark_light::Mode::Light), other => { error!("unknown menu event id: {}", other); } @@ -251,15 +271,11 @@ pub fn setup_tray_menu( } }) .build(app)?; + Ok(icon) } -pub fn update_icon( - icon: AppIcon, - menu: Option>, - subsystems: HashMap, - spn_status: String, -) { +pub fn update_icon(icon: AppIcon, subsystems: HashMap, spn_status: String) { // iterate over the subsystems and check if there's a module failure let failure = subsystems.values().map(|s| &s.module_status).fold( (subsystem::FAILURE_NONE, "".to_string()), @@ -273,14 +289,10 @@ pub fn update_icon( }, ); - if let Some(menu) = menu { - if let Some(MenuItemKind::MenuItem(global_status)) = menu.get(GLOBAL_STATUS_KEY) { - if failure.0 == subsystem::FAILURE_NONE { - _ = global_status.set_text("Status: Secured"); - } else { - _ = global_status.set_text(format!("Status: {}", failure.1)); - } - } + let mut status = "Secured".to_owned(); + + if failure.0 != subsystem::FAILURE_NONE { + status = failure.1; } let icon_color = match failure.0 { @@ -291,6 +303,13 @@ pub fn update_icon( _ => IconColor::Green, }, }; + + if let Ok(menu) = build_tray_menu(icon.app_handle(), status.as_ref(), spn_status.as_str()) { + if let Err(err) = icon.set_menu(Some(menu)) { + error!("failed to set menu on tray icon: {}", err.to_string()); + } + } + update_icon_color(&icon, icon_color); } @@ -391,8 +410,7 @@ pub async fn tray_handler(cli: PortAPI, app: tauri::AppHandle) { match payload.parse::() { Ok(n) => { subsystems.insert(n.id.clone(), n); - - update_icon(icon.clone(), app.menu(), subsystems.clone(), spn_status.clone()); + update_icon(icon.clone(), subsystems.clone(), spn_status.clone()); }, Err(err) => match err { ParseError::Json(err) => { @@ -423,8 +441,7 @@ pub async fn tray_handler(cli: PortAPI, app: tauri::AppHandle) { Ok(value) => { debug!("SPN status update: {}", value.status); spn_status.clone_from(&value.status); - - update_icon(icon.clone(), app.menu(), subsystems.clone(), spn_status.clone()); + update_icon(icon.clone(), subsystems.clone(), spn_status.clone()); }, Err(err) => match err { ParseError::Json(err) => { @@ -453,9 +470,7 @@ pub async fn tray_handler(cli: PortAPI, app: tauri::AppHandle) { if let Some((_, payload)) = res { match payload.parse::() { Ok(value) => { - if let Some(menu) = app.menu() { - update_spn_ui_state(menu, value.value.unwrap_or(false)); - } + SPN_STATE.store(value.value.unwrap_or(false), Ordering::Release); }, Err(err) => match err { ParseError::Json(err) => { @@ -487,9 +502,6 @@ pub async fn tray_handler(cli: PortAPI, app: tauri::AppHandle) { } } } - if let Some(menu) = app.menu() { - update_spn_ui_state(menu, false); - } update_icon_color(&icon, IconColor::Red); } @@ -554,22 +566,4 @@ fn save_theme(app: &tauri::AppHandle, mode: dark_light::Mode) { } Err(err) => error!("failed to load config file: {}", err), } - if let Some(menu) = app.menu() { - update_spn_ui_state(menu, false); - } -} - -fn update_spn_ui_state(menu: Menu, enabled: bool) { - if let (Some(MenuItemKind::MenuItem(spn_status)), Some(MenuItemKind::MenuItem(spn_btn))) = - (menu.get(SPN_STATUS_KEY), menu.get(SPN_BUTTON_KEY)) - { - if enabled { - _ = spn_status.set_text("SPN: Connected"); - _ = spn_btn.set_text("Disable SPN"); - } else { - _ = spn_status.set_text("SPN: Disabled"); - _ = spn_btn.set_text("Enable SPN"); - } - SPN_STATE.store(enabled, Ordering::Release); - } } diff --git a/desktop/tauri/src-tauri/templates/nsis/install_hooks.nsh b/desktop/tauri/src-tauri/templates/nsis/install_hooks.nsh index fb8fccda..7b3a1657 100644 --- a/desktop/tauri/src-tauri/templates/nsis/install_hooks.nsh +++ b/desktop/tauri/src-tauri/templates/nsis/install_hooks.nsh @@ -81,6 +81,15 @@ var dataDir SimpleSC::SetServiceDescription "PortmasterCore" "Portmaster Application Firewall - Core Service" + ; + ; Auto start the UI + ; + DetailPrint "Creating registry entry for autostart" + WriteRegStr HKLM "SOFTWARE\Microsoft\Windows\CurrentVersion\Run" "Portmaster" '"$INSTDIR\portmaster.exe" --with-prompts --with-notifications --background' + + ; + ; MIGRATION FROM PMv1 TO PMv2 + ; StrCpy $oldInstallationDir "$COMMONPROGRAMDATA\Safing\Portmaster" StrCpy $dataDir "$COMMONPROGRAMDATA\Portmaster" @@ -168,6 +177,10 @@ var dataDir Delete /REBOOTOK "$INSTDIR\assets.zip" RMDir /r /REBOOTOK "$INSTDIR" + ; remove the registry entry for the autostart + DetailPrint "Removing registry entry for autostart" + DeleteRegKey HKLM "SOFTWARE\Microsoft\Windows\CurrentVersion\Run" + ; delete data files Delete /REBOOTOK "$COMMONPROGRAMDATA\Portmaster\databases\history.db" RMDir /r /REBOOTOK "$COMMONPROGRAMDATA\Portmaster\databases\cache" @@ -178,6 +191,9 @@ var dataDir RMDir /r /REBOOTOK "$COMMONPROGRAMDATA\Portmaster\exec" RMDir /r /REBOOTOK "$COMMONPROGRAMDATA\Portmaster\logs" + ; Remove PMv1 migration flag + Delete /REBOOTOK "$COMMONPROGRAMDATA\Safing\Portmaster\migrated.txt" + ${If} $DeleteAppDataCheckboxState = 1 DetailPrint "Deleting the application data..." RMDir /r /REBOOTOK "$COMMONPROGRAMDATA\Portmaster" diff --git a/packaging/linux/portmaster.desktop b/packaging/linux/portmaster.desktop index c21458b0..8654ac1b 100644 --- a/packaging/linux/portmaster.desktop +++ b/packaging/linux/portmaster.desktop @@ -3,6 +3,7 @@ Name=Portmaster GenericName=Application Firewall Exec={{exec}} --data=/opt/safing/portmaster --with-prompts --with-notifications Icon={{icon}} +StartupWMClass=portmaster Terminal=false Type=Application Categories=System diff --git a/packaging/linux/portmaster.service b/packaging/linux/portmaster.service index 5a56ceb3..cd25c262 100644 --- a/packaging/linux/portmaster.service +++ b/packaging/linux/portmaster.service @@ -23,6 +23,7 @@ Environment=LOGLEVEL=info Environment=PORTMASTER_ARGS= EnvironmentFile=-/etc/default/portmaster ProtectSystem=true +ReadWritePaths=/usr/lib/portmaster RestrictAddressFamilies=AF_UNIX AF_NETLINK AF_INET AF_INET6 RestrictNamespaces=yes ProtectHome=read-only diff --git a/packaging/linux/postinst b/packaging/linux/postinst index c99f8b4c..a54ee8e5 100644 --- a/packaging/linux/postinst +++ b/packaging/linux/postinst @@ -28,6 +28,9 @@ if [ -d "$OLD_INSTALLATION_DIR" ]; then echo "[ ] V1 migration: Removing V1 shortcuts" rm /etc/xdg/autostart/portmaster_notifier.desktop rm /usr/share/applications/portmaster_notifier.desktop + # app V1 shortcut + # NOTE: new V2 shortcut registered as "Portmaster.desktop" (first letter uppercase), so we can distinguish between V1 and V2 shortcuts. + rm /usr/share/applications/portmaster.desktop # Remove V1 files (except configuration) # (keeping V1 configuration for a smooth downgrade, if needed) diff --git a/packaging/windows/generate_windows_installers.ps1 b/packaging/windows/generate_windows_installers.ps1 index 5e638296..31c5ed40 100644 --- a/packaging/windows/generate_windows_installers.ps1 +++ b/packaging/windows/generate_windows_installers.ps1 @@ -50,15 +50,19 @@ #------------------------------------------------------------------------------ # # Optional arguments: -# -i, --interactive: Can prompt for user input (e.g. when a file is not found in the primary folder but found in the alternate folder) -# -v, --version: Explicitly set the version to use for the installer file name +# -i: (interactive) Can prompt for user input (e.g. when a file is not found in the primary folder but found in the alternate folder) +# -v: (version) Explicitly set the version to use for the installer file name +# -e: (erase) Just erase work directories #------------------------------------------------------------------------------ param ( [Alias('i')] [switch]$interactive, [Alias('v')] - [string]$version + [string]$version, + + [Alias('e')] + [switch]$erase ) # Save the current directory @@ -185,7 +189,18 @@ try { $destinationDir = "desktop/tauri/src-tauri" $binaryDir = "$destinationDir/binary" #portmaster\desktop\tauri\src-tauri\binary $intelDir = "$destinationDir/intel" #portmaster\desktop\tauri\src-tauri\intel - $targetDir = "$destinationDir/target/release" #portmaster\desktop\tauri\src-tauri\target\release + $targetBase= "$destinationDir/target" #portmaster\desktop\tauri\src-tauri\target + $targetDir = "$targetBase/release" #portmaster\desktop\tauri\src-tauri\target\release + + # Erasing work directories + Write-Output "[+] Erasing work directories: '$binaryDir', '$intelDir', '$targetBase'" + Remove-Item -Recurse -Force -Path $binaryDir -ErrorAction SilentlyContinue + Remove-Item -Recurse -Force -Path $intelDir -ErrorAction SilentlyContinue + Remove-Item -Recurse -Force -Path $targetBase -ErrorAction SilentlyContinue + if ($erase) { + Write-Output "[ ] Done" + exit 0 + } # Copying BINARY FILES Write-Output "`n[+] Copying binary files:" diff --git a/service/config.go b/service/config.go index 39ad2d03..28448888 100644 --- a/service/config.go +++ b/service/config.go @@ -92,7 +92,8 @@ func (sc *ServiceConfig) Init() error { return nil } -func getCurrentBinaryFolder() (string, error) { +// returns the absolute path of the currently running executable +func getCurrentBinaryPath() (string, error) { // Get the path of the currently running executable exePath, err := os.Executable() if err != nil { @@ -105,6 +106,16 @@ func getCurrentBinaryFolder() (string, error) { return "", fmt.Errorf("failed to get absolute path: %w", err) } + return absPath, nil +} + +func getCurrentBinaryFolder() (string, error) { + // Get the absolute path of the currently running executable + absPath, err := getCurrentBinaryPath() + if err != nil { + return "", err + } + // Get the directory of the executable installDir := filepath.Dir(absPath) @@ -115,12 +126,12 @@ func MakeUpdateConfigs(svcCfg *ServiceConfig) (binaryUpdateConfig, intelUpdateCo switch runtime.GOOS { case "windows": binaryUpdateConfig = &updates.Config{ - Name: "binaries", + Name: configure.DefaultBinaryIndexName, Directory: svcCfg.BinDir, DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_binaries"), PurgeDirectory: filepath.Join(svcCfg.BinDir, "upgrade_obsolete_binaries"), - Ignore: []string{"databases", "intel", "config.json"}, - IndexURLs: svcCfg.BinariesIndexURLs, // May be changed by config during instance startup. + Ignore: []string{"uninstall.exe"}, // "databases", "intel" and "config.json" not needed here since they are not in the bin dir. + IndexURLs: svcCfg.BinariesIndexURLs, // May be changed by config during instance startup. IndexFile: "index.json", Verify: svcCfg.VerifyBinaryUpdates, AutoCheck: true, // May be changed by config during instance startup. @@ -130,7 +141,7 @@ func MakeUpdateConfigs(svcCfg *ServiceConfig) (binaryUpdateConfig, intelUpdateCo Notify: true, } intelUpdateConfig = &updates.Config{ - Name: "intel", + Name: configure.DefaultIntelIndexName, Directory: filepath.Join(svcCfg.DataDir, "intel"), DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_intel"), PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_intel"), @@ -146,11 +157,11 @@ func MakeUpdateConfigs(svcCfg *ServiceConfig) (binaryUpdateConfig, intelUpdateCo case "linux": binaryUpdateConfig = &updates.Config{ - Name: "binaries", + Name: configure.DefaultBinaryIndexName, Directory: svcCfg.BinDir, DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_binaries"), PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_binaries"), - Ignore: []string{"databases", "intel", "config.json"}, + Ignore: []string{}, // "databases", "intel" and "config.json" not needed here since they are not in the bin dir. IndexURLs: svcCfg.BinariesIndexURLs, // May be changed by config during instance startup. IndexFile: "index.json", Verify: svcCfg.VerifyBinaryUpdates, @@ -160,8 +171,23 @@ func MakeUpdateConfigs(svcCfg *ServiceConfig) (binaryUpdateConfig, intelUpdateCo NeedsRestart: true, Notify: true, } + if binPath, err := getCurrentBinaryPath(); err == nil { + binaryUpdateConfig.PostUpgradeCommands = []updates.UpdateCommandConfig{ + // Restore SELinux context for the new core binary after upgrade + // (`restorecon /usr/lib/portmaster/portmaster-core`) + { + Command: "restorecon", + Args: []string{binPath}, + TriggerArtifactFName: binPath, + FailOnError: false, // Ignore error: 'restorecon' may not be available on a non-SELinux systems. + }, + } + } else { + return nil, nil, fmt.Errorf("failed to get current binary path: %w", err) + } + intelUpdateConfig = &updates.Config{ - Name: "intel", + Name: configure.DefaultIntelIndexName, Directory: filepath.Join(svcCfg.DataDir, "intel"), DownloadDirectory: filepath.Join(svcCfg.DataDir, "download_intel"), PurgeDirectory: filepath.Join(svcCfg.DataDir, "upgrade_obsolete_intel"), diff --git a/service/configure/updates.go b/service/configure/updates.go index 3fec4afc..0625ff26 100644 --- a/service/configure/updates.go +++ b/service/configure/updates.go @@ -5,6 +5,9 @@ import ( ) var ( + DefaultBinaryIndexName = "Portmaster Binaries" + DefaultIntelIndexName = "intel" + DefaultStableBinaryIndexURLs = []string{ "https://updates.safing.io/stable.v3.json", } diff --git a/service/instance.go b/service/instance.go index c71f6388..5b148507 100644 --- a/service/instance.go +++ b/service/instance.go @@ -438,6 +438,15 @@ func (i *Instance) BinaryUpdates() *updates.Updater { return i.binaryUpdates } +// GetBinaryUpdateFile returns the file path of a binary update file. +func (i *Instance) GetBinaryUpdateFile(name string) (path string, err error) { + file, err := i.binaryUpdates.GetFile(name) + if err != nil { + return "", err + } + return file.Path(), nil +} + // IntelUpdates returns the updates module. func (i *Instance) IntelUpdates() *updates.Updater { return i.intelUpdates diff --git a/service/network/proc/pids_by_user.go b/service/network/proc/pids_by_user.go index 7fd451e7..8ea43dc4 100644 --- a/service/network/proc/pids_by_user.go +++ b/service/network/proc/pids_by_user.go @@ -19,7 +19,7 @@ var ( // pidsByUserLock is also used for locking the socketInfo.PID on all socket.*Info structs. pidsByUser = make(map[int][]int) pidsByUserLock sync.RWMutex - fetchPidsByUser = utils.NewCallLimiter(10 * time.Millisecond) + fetchPidsByUser = utils.NewCallLimiter2(10 * time.Millisecond) ) // getPidsByUser returns the cached PIDs for the given UID. diff --git a/service/network/state/tcp.go b/service/network/state/tcp.go index 5d08b054..e3883847 100644 --- a/service/network/state/tcp.go +++ b/service/network/state/tcp.go @@ -25,7 +25,7 @@ type tcpTable struct { // lastUpdateAt stores the time when the tables where last updated as unix nanoseconds. lastUpdateAt atomic.Int64 - fetchLimiter *utils.CallLimiter + fetchLimiter *utils.CallLimiter2 fetchTable func() (connections []*socket.ConnectionInfo, listeners []*socket.BindInfo, err error) dualStack *tcpTable @@ -34,13 +34,13 @@ type tcpTable struct { var ( tcp6Table = &tcpTable{ version: 6, - fetchLimiter: utils.NewCallLimiter(minDurationBetweenTableUpdates), + fetchLimiter: utils.NewCallLimiter2(minDurationBetweenTableUpdates), fetchTable: getTCP6Table, } tcp4Table = &tcpTable{ version: 4, - fetchLimiter: utils.NewCallLimiter(minDurationBetweenTableUpdates), + fetchLimiter: utils.NewCallLimiter2(minDurationBetweenTableUpdates), fetchTable: getTCP4Table, } ) diff --git a/service/network/state/udp.go b/service/network/state/udp.go index 1c534b7f..0dcecc84 100644 --- a/service/network/state/udp.go +++ b/service/network/state/udp.go @@ -24,7 +24,7 @@ type udpTable struct { // lastUpdateAt stores the time when the tables where last updated as unix nanoseconds. lastUpdateAt atomic.Int64 - fetchLimiter *utils.CallLimiter + fetchLimiter *utils.CallLimiter2 fetchTable func() (binds []*socket.BindInfo, err error) states map[string]map[string]*udpState @@ -52,14 +52,14 @@ const ( var ( udp6Table = &udpTable{ version: 6, - fetchLimiter: utils.NewCallLimiter(minDurationBetweenTableUpdates), + fetchLimiter: utils.NewCallLimiter2(minDurationBetweenTableUpdates), fetchTable: getUDP6Table, states: make(map[string]map[string]*udpState), } udp4Table = &udpTable{ version: 4, - fetchLimiter: utils.NewCallLimiter(minDurationBetweenTableUpdates), + fetchLimiter: utils.NewCallLimiter2(minDurationBetweenTableUpdates), fetchTable: getUDP4Table, states: make(map[string]map[string]*udpState), } diff --git a/service/ui/api.go b/service/ui/api.go index de7e7758..d74d814b 100644 --- a/service/ui/api.go +++ b/service/ui/api.go @@ -2,35 +2,21 @@ package ui import ( "github.com/safing/portmaster/base/api" - "github.com/safing/portmaster/base/log" ) -func registerAPIEndpoints() error { +func (ui *UI) registerAPIEndpoints() error { return api.RegisterEndpoint(api.Endpoint{ Path: "ui/reload", Write: api.PermitUser, - ActionFunc: reloadUI, + ActionFunc: ui.reloadUI, Name: "Reload UI Assets", Description: "Removes all assets from the cache and reloads the current (possibly updated) version from disk when requested.", }) } -func reloadUI(_ *api.Request) (msg string, err error) { - appsLock.Lock() - defer appsLock.Unlock() - +func (ui *UI) reloadUI(_ *api.Request) (msg string, err error) { // Close all archives. - for id, archiveFS := range apps { - err := archiveFS.Close() - if err != nil { - log.Warningf("ui: failed to close archive %s: %s", id, err) - } - } - - // Reset index. - for key := range apps { - delete(apps, key) - } + ui.CloseArchives() return "all ui archives successfully reloaded", nil } diff --git a/service/ui/module.go b/service/ui/module.go index a058c508..946cb1e2 100644 --- a/service/ui/module.go +++ b/service/ui/module.go @@ -1,27 +1,55 @@ package ui import ( - "errors" "os" "path/filepath" + "sync" "sync/atomic" "github.com/safing/portmaster/base/api" "github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/utils" "github.com/safing/portmaster/service/mgr" - "github.com/safing/portmaster/service/updates" + "github.com/spkg/zipfs" ) -func prep() error { - if err := registerAPIEndpoints(); err != nil { - return err - } +// UI serves the user interface files. +type UI struct { + mgr *mgr.Manager + instance instance - return registerRoutes() + archives map[string]*zipfs.FileSystem + archivesLock sync.RWMutex + + upgradeLock atomic.Bool } -func start() error { +// New returns a new UI module. +func New(instance instance) (*UI, error) { + m := mgr.New("UI") + ui := &UI{ + mgr: m, + instance: instance, + + archives: make(map[string]*zipfs.FileSystem), + } + + if err := ui.registerAPIEndpoints(); err != nil { + return nil, err + } + if err := ui.registerRoutes(); err != nil { + return nil, err + } + + return ui, nil +} + +func (ui *UI) Manager() *mgr.Manager { + return ui.mgr +} + +// Start starts the module. +func (ui *UI) Start() error { // Create a dummy directory to which processes change their working directory // to. Currently this includes the App and the Notifier. The aim is protect // all other directories and increase compatibility should any process want @@ -30,7 +58,7 @@ func start() error { // may seem dangerous, but proper permission on the parent directory provide // (some) protection. // Processes must _never_ read from this directory. - execDir := filepath.Join(module.instance.DataDir(), "exec") + execDir := filepath.Join(ui.instance.DataDir(), "exec") err := os.MkdirAll(execDir, 0o0777) //nolint:gosec // This is intentional. if err != nil { log.Warningf("ui: failed to create safe exec dir: %s", err) @@ -45,52 +73,67 @@ func start() error { return nil } -// UI serves the user interface files. -type UI struct { - mgr *mgr.Manager - - instance instance -} - -func (ui *UI) Manager() *mgr.Manager { - return ui.mgr -} - -// Start starts the module. -func (ui *UI) Start() error { - return start() -} - // Stop stops the module. func (ui *UI) Stop() error { return nil } -var ( - shimLoaded atomic.Bool - module *UI -) +func (ui *UI) getArchive(name string) (archive *zipfs.FileSystem, ok bool) { + ui.archivesLock.RLock() + defer ui.archivesLock.RUnlock() -// New returns a new UI module. -func New(instance instance) (*UI, error) { - if !shimLoaded.CompareAndSwap(false, true) { - return nil, errors.New("only one instance allowed") - } - m := mgr.New("UI") - module = &UI{ - mgr: m, - instance: instance, + archive, ok = ui.archives[name] + return +} + +func (ui *UI) setArchive(name string, archive *zipfs.FileSystem) { + ui.archivesLock.Lock() + defer ui.archivesLock.Unlock() + + ui.archives[name] = archive +} + +// CloseArchives closes all open archives. +func (ui *UI) CloseArchives() { + if ui == nil { + return } - if err := prep(); err != nil { - return nil, err + ui.archivesLock.Lock() + defer ui.archivesLock.Unlock() + + // Close archives. + for _, archive := range ui.archives { + if err := archive.Close(); err != nil { + ui.mgr.Warn("failed to close ui archive", "err", err) + } } - return module, nil + // Reset map. + clear(ui.archives) +} + +// EnableUpgradeLock enables the upgrade lock and closes all open archives. +func (ui *UI) EnableUpgradeLock() { + if ui == nil { + return + } + + ui.upgradeLock.Store(true) + ui.CloseArchives() +} + +// DisableUpgradeLock disables the upgrade lock. +func (ui *UI) DisableUpgradeLock() { + if ui == nil { + return + } + + ui.upgradeLock.Store(false) } type instance interface { DataDir() string API() *api.API - BinaryUpdates() *updates.Updater + GetBinaryUpdateFile(name string) (path string, err error) } diff --git a/service/ui/serve.go b/service/ui/serve.go index 68203552..24bda7aa 100644 --- a/service/ui/serve.go +++ b/service/ui/serve.go @@ -9,26 +9,19 @@ import ( "net/url" "path/filepath" "strings" - "sync" "github.com/spkg/zipfs" "github.com/safing/portmaster/base/api" "github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/utils" - "github.com/safing/portmaster/service/updates" ) -var ( - apps = make(map[string]*zipfs.FileSystem) - appsLock sync.RWMutex -) - -func registerRoutes() error { +func (ui *UI) registerRoutes() error { // Server assets. api.RegisterHandler( "/assets/{resPath:[a-zA-Z0-9/\\._-]+}", - &archiveServer{defaultModuleName: "assets"}, + &archiveServer{ui: ui, defaultModuleName: "assets"}, ) // Add slash to plain module namespaces. @@ -38,7 +31,7 @@ func registerRoutes() error { ) // Serve modules. - srv := &archiveServer{} + srv := &archiveServer{ui: ui} api.RegisterHandler("/ui/modules/{moduleName:[a-z]+}/", srv) api.RegisterHandler("/ui/modules/{moduleName:[a-z]+}/{resPath:[a-zA-Z0-9/\\._-]+}", srv) @@ -52,6 +45,7 @@ func registerRoutes() error { } type archiveServer struct { + ui *UI defaultModuleName string } @@ -82,39 +76,35 @@ func (bs *archiveServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { resPath = "index.html" } - appsLock.RLock() - archiveFS, ok := apps[moduleName] - appsLock.RUnlock() + archiveFS, ok := bs.ui.getArchive(moduleName) if ok { ServeFileFromArchive(w, r, moduleName, archiveFS, resPath) return } + // Check if the upgrade lock is enabled. + if bs.ui.upgradeLock.Load() { + http.Error(w, "Resources locked, upgrade in progress.", http.StatusLocked) + return + } + // get file from update system - zipFile, err := module.instance.BinaryUpdates().GetFile(fmt.Sprintf("%s.zip", moduleName)) + zipFile, err := bs.ui.instance.GetBinaryUpdateFile(fmt.Sprintf("%s.zip", moduleName)) if err != nil { - if errors.Is(err, updates.ErrNotFound) { - log.Tracef("ui: requested module %s does not exist", moduleName) - http.Error(w, err.Error(), http.StatusNotFound) - } else { - log.Tracef("ui: error loading module %s: %s", moduleName, err) - http.Error(w, err.Error(), http.StatusInternalServerError) - } + log.Tracef("ui: error loading module %s: %s", moduleName, err) + http.Error(w, err.Error(), http.StatusInternalServerError) return } // Open archive from disk. - archiveFS, err = zipfs.New(zipFile.Path()) + archiveFS, err = zipfs.New(zipFile) if err != nil { log.Tracef("ui: error prepping module %s: %s", moduleName, err) http.Error(w, err.Error(), http.StatusInternalServerError) return } - appsLock.Lock() - apps[moduleName] = archiveFS - appsLock.Unlock() - + bs.ui.setArchive(moduleName, archiveFS) ServeFileFromArchive(w, r, moduleName, archiveFS, resPath) } diff --git a/service/updates/index.go b/service/updates/index.go index d98d92d7..13285f9d 100644 --- a/service/updates/index.go +++ b/service/updates/index.go @@ -118,6 +118,14 @@ type Index struct { Artifacts []*Artifact `json:"Artifacts"` versionNum *semver.Version + + // isLocallyGenerated indicates whether the index was generated from a local directory. + // + // When true: + // - The `Published` field represents the generation time, not a formal release date. + // This timestamp should be ignored when checking for online updates. + // - Downgrades from this locally generated version to an online index should be prevented. + isLocallyGenerated bool } // LoadIndex loads and parses an index from the given filename. @@ -235,6 +243,15 @@ func (index *Index) ShouldUpgradeTo(newIndex *Index) error { case index.Name != newIndex.Name: return errors.New("new index name does not match") + case index.isLocallyGenerated: + if newIndex.versionNum.GreaterThan(index.versionNum) { + // Upgrade! (from a locally generated index to an online index) + return nil + } else { + // "Do nothing". + return ErrSameIndex + } + case index.Published.After(newIndex.Published): return errors.New("new index is older (time)") diff --git a/service/updates/index_scan.go b/service/updates/index_scan.go index 1dc1a7f6..3ac52f5f 100644 --- a/service/updates/index_scan.go +++ b/service/updates/index_scan.go @@ -234,10 +234,11 @@ func GenerateIndexFromDir(sourceDir string, cfg IndexScanConfig) (*Index, error) // Create base index. index := &Index{ - Name: cfg.Name, - Version: cfg.Version, - Published: time.Now(), - versionNum: indexVersion, + Name: cfg.Name, + Version: cfg.Version, + Published: time.Now(), + versionNum: indexVersion, + isLocallyGenerated: true, } if index.Version == "" && cfg.PrimaryArtifact != "" { pv, ok := artifacts[cfg.PrimaryArtifact] diff --git a/service/updates/module.go b/service/updates/module.go index b57830b2..8a1b61fc 100644 --- a/service/updates/module.go +++ b/service/updates/module.go @@ -18,7 +18,9 @@ import ( "github.com/safing/portmaster/base/log" "github.com/safing/portmaster/base/notifications" "github.com/safing/portmaster/base/utils" + "github.com/safing/portmaster/service/configure" "github.com/safing/portmaster/service/mgr" + "github.com/safing/portmaster/service/ui" ) const ( @@ -49,6 +51,22 @@ var ( ErrActionRequired = errors.New("action required") ) +// UpdateCommandConfig defines the configuration for a shell command +// that is executed when an update is applied +type UpdateCommandConfig struct { + // Shell command to execute + Command string + // Arguments to pass to the command + Args []string + // Execute triggers: if not empty, the command will be executed only if specified file was updated + // if empty, the command will be executed always + TriggerArtifactFName string + // FailOnError defines whether the upgrade should fail if the command fails + // true - upgrade will fail if the command fails + // false - upgrade will continue even if the command fails + FailOnError bool +} + // Config holds the configuration for the updates module. type Config struct { // Name of the updater. @@ -86,6 +104,9 @@ type Config struct { // Notify defines whether the user shall be informed about events via notifications. // If enabled, disables automatic restart after upgrade. Notify bool + + // list of shell commands needed to run after the upgrade (if any) + PostUpgradeCommands []UpdateCommandConfig } // Check looks for obvious configuration errors. @@ -201,6 +222,7 @@ func New(instance instance, name string, cfg Config) (*Updater, error) { module.corruptedInstallation = fmt.Errorf("invalid index: %w", err) } index, err = GenerateIndexFromDir(cfg.Directory, IndexScanConfig{ + Name: configure.DefaultBinaryIndexName, Version: info.VersionNumber(), }) if err == nil && index.init(currentPlatform) == nil { @@ -402,7 +424,7 @@ func (u *Updater) updateAndUpgrade(w *mgr.WorkerCtx, indexURLs []string, ignoreV Type: notifications.ActionTypeWebhook, Payload: notifications.ActionTypeWebhookPayload{ Method: "POST", - URL: "updates/apply", + URL: "core/restart", }, }, }, @@ -643,4 +665,5 @@ type instance interface { Restart() Shutdown() Notifications() *notifications.Notifications + UI() *ui.UI } diff --git a/service/updates/upgrade.go b/service/updates/upgrade.go index 9d18de98..89da6c8f 100644 --- a/service/updates/upgrade.go +++ b/service/updates/upgrade.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "os" + "os/exec" "path/filepath" "slices" "strings" @@ -24,8 +25,20 @@ func (u *Updater) upgrade(downloader *Downloader, ignoreVersion bool) error { } } + // If we are running in a UI instance, we need to unload the UI assets + if u.instance != nil { + u.instance.UI().EnableUpgradeLock() + defer u.instance.UI().DisableUpgradeLock() + } + // Execute the upgrade. upgradeError := u.upgradeMoveFiles(downloader) + if upgradeError == nil { + // Files upgraded successfully. + // Applying post-upgrade tasks, if any. + upgradeError = u.applyPostUpgradeCommands() + } + if upgradeError == nil { return nil } @@ -73,6 +86,10 @@ func (u *Updater) upgradeMoveFiles(downloader *Downloader) error { if slices.Contains(u.cfg.Ignore, file.Name()) { continue } + // ignore PurgeDirectory itself + if strings.EqualFold(u.cfg.PurgeDirectory, filepath.Join(u.cfg.Directory, file.Name())) { + continue + } // Otherwise, move file to purge dir. src := filepath.Join(u.cfg.Directory, file.Name()) @@ -199,3 +216,41 @@ func (u *Updater) deleteUnfinishedFiles(dir string) error { return nil } + +func (u *Updater) applyPostUpgradeCommands() error { + // At this point, we assume that the upgrade was successful and all files are in place. + // We need to execute the post-upgrade commands, if any. + + if len(u.cfg.PostUpgradeCommands) == 0 { + return nil + } + + // collect full paths to files that were upgraded, required to check the trigger. + upgradedFiles := make(map[string]struct{}) + for _, artifact := range u.index.Artifacts { + upgradedFiles[filepath.Join(u.cfg.Directory, artifact.Filename)] = struct{}{} + } + + // Execute post-upgrade commands. + for _, puCmd := range u.cfg.PostUpgradeCommands { + + // Check trigger to ensure that we need to run this command. + if len(puCmd.TriggerArtifactFName) > 0 { + if _, ok := upgradedFiles[puCmd.TriggerArtifactFName]; !ok { + continue + } + } + + log.Debugf("updates/%s: executing post-upgrade command: '%s %s'", u.cfg.Name, puCmd.Command, strings.Join(puCmd.Args, " ")) + output, err := exec.Command(puCmd.Command, puCmd.Args...).CombinedOutput() + if err != nil { + if puCmd.FailOnError { + return fmt.Errorf("post-upgrade command '%s %s' failed: %w, output: %s", puCmd.Command, strings.Join(puCmd.Args, " "), err, string(output)) + } + + log.Warningf("updates/%s: post-upgrade command '%s %s' failed, but ignored. Error: %s", u.cfg.Name, puCmd.Command, strings.Join(puCmd.Args, " "), err) + } + } + + return nil +}