[WIP] New updater first working prototype

This commit is contained in:
Vladimir Stoilov
2024-08-16 16:05:01 +03:00
parent abf444630b
commit 9bae1afd73
46 changed files with 4107 additions and 4149 deletions

View File

@@ -7,7 +7,6 @@ import (
"github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/service/intel/geoip"
"github.com/safing/portmaster/service/netenv"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/spn/access"
"github.com/safing/portmaster/spn/access/account"
"github.com/safing/portmaster/spn/captain"
@@ -18,18 +17,19 @@ var portmasterStarted = time.Now()
func collectData() interface{} {
data := make(map[string]interface{})
// TODO(vladimir)
// Get data about versions.
versions := updates.GetSimpleVersions()
data["Updates"] = versions
data["Version"] = versions.Build.Version
numericVersion, err := MakeNumericVersion(versions.Build.Version)
if err != nil {
data["NumericVersion"] = &DataError{
Error: err,
}
} else {
data["NumericVersion"] = numericVersion
}
// versions := updates.GetSimpleVersions()
// data["Updates"] = versions
// data["Version"] = versions.Build.Version
// numericVersion, err := MakeNumericVersion(versions.Build.Version)
// if err != nil {
// data["NumericVersion"] = &DataError{
// Error: err,
// }
// } else {
// data["NumericVersion"] = numericVersion
// }
// Get data about install.
installInfo, err := GetInstallInfo()

View File

@@ -8,6 +8,7 @@ import (
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
)
type Broadcasts struct {
@@ -91,4 +92,6 @@ func New(instance instance) (*Broadcasts, error) {
return module, nil
}
type instance interface{}
type instance interface {
Updates() *updates.Updates
}

View File

@@ -18,7 +18,6 @@ import (
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
)
const (
@@ -68,7 +67,7 @@ type BroadcastNotification struct {
func broadcastNotify(ctx *mgr.WorkerCtx) error {
// Get broadcast notifications file, load it from disk and parse it.
broadcastsResource, err := updates.GetFile(broadcastsResourcePath)
broadcastsResource, err := module.instance.Updates().GetFile(broadcastsResourcePath)
if err != nil {
return fmt.Errorf("failed to get broadcast notifications update: %w", err)
}

View File

@@ -149,7 +149,7 @@ func debugInfo(ar *api.Request) (data []byte, err error) {
config.AddToDebugInfo(di)
// Detailed information.
updates.AddToDebugInfo(di)
// TODO(vladimir): updates.AddToDebugInfo(di)
compat.AddToDebugInfo(di)
module.instance.AddWorkerInfoToDebugInfo(di)
di.AddGoroutineStack()

View File

@@ -14,15 +14,14 @@ import (
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/service/updates/registry"
)
const (
baseListFilePath = "intel/lists/base.dsdl"
intermediateListFilePath = "intel/lists/intermediate.dsdl"
urgentListFilePath = "intel/lists/urgent.dsdl"
listIndexFilePath = "intel/lists/index.dsd"
baseListFilePath = "base.dsdl"
intermediateListFilePath = "intermediate.dsdl"
urgentListFilePath = "urgent.dsdl"
listIndexFilePath = "index.dsd"
)
// default bloomfilter element sizes (estimated).
@@ -40,9 +39,9 @@ var (
filterListLock sync.RWMutex
// Updater files for tracking upgrades.
baseFile *updater.File
intermediateFile *updater.File
urgentFile *updater.File
baseFile *registry.File
intermediateFile *registry.File
urgentFile *registry.File
filterListsLoaded chan struct{}
)
@@ -56,11 +55,10 @@ var cache = database.NewInterface(&database.Options{
// getFileFunc is the function used to get a file from
// the updater. It's basically updates.GetFile and used
// for unit testing.
type getFileFunc func(string) (*updater.File, error)
// getFile points to updates.GetFile but may be set to
// something different during unit testing.
var getFile getFileFunc = updates.GetFile
// var getFile getFileFunc = registry.GetFile
func init() {
filterListsLoaded = make(chan struct{})
@@ -79,7 +77,7 @@ func isLoaded() bool {
// processListFile opens the latest version of file and decodes it's DSDL
// content. It calls processEntry for each decoded filterlists entry.
func processListFile(ctx context.Context, filter *scopedBloom, file *updater.File) error {
func processListFile(ctx context.Context, filter *scopedBloom, file *registry.File) error {
f, err := os.Open(file.Path())
if err != nil {
return err

View File

@@ -4,14 +4,12 @@ import (
"errors"
"fmt"
"os"
"strings"
"sync"
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/service/updates/registry"
"github.com/safing/structures/dsd"
)
@@ -164,7 +162,7 @@ func getListIndexFromCache() (*ListIndexFile, error) {
var (
// listIndexUpdate must only be used by updateListIndex.
listIndexUpdate *updater.File
listIndexUpdate *registry.File
listIndexUpdateLock sync.Mutex
)
@@ -177,24 +175,24 @@ func updateListIndex() error {
case listIndexUpdate == nil:
// This is the first time this function is run, get updater file for index.
var err error
listIndexUpdate, err = updates.GetFile(listIndexFilePath)
listIndexUpdate, err = module.instance.Updates().GetFile(listIndexFilePath)
if err != nil {
return err
}
// Check if the version in the cache is current.
index, err := getListIndexFromCache()
_, err = getListIndexFromCache()
switch {
case errors.Is(err, database.ErrNotFound):
log.Info("filterlists: index not in cache, starting update")
case err != nil:
log.Warningf("filterlists: failed to load index from cache, starting update: %s", err)
case !listIndexUpdate.EqualsVersion(strings.TrimPrefix(index.Version, "v")):
log.Infof(
"filterlists: index from cache is outdated, starting update (%s != %s)",
strings.TrimPrefix(index.Version, "v"),
listIndexUpdate.Version(),
)
// case !listIndexUpdate.EqualsVersion(strings.TrimPrefix(index.Version, "v")):
// log.Infof(
// "filterlists: index from cache is outdated, starting update (%s != %s)",
// strings.TrimPrefix(index.Version, "v"),
// listIndexUpdate.Version(),
// )
default:
// List is in cache and current, there is nothing to do.
log.Debug("filterlists: index is up to date")
@@ -204,8 +202,8 @@ func updateListIndex() error {
return nil
}
case listIndexUpdate.UpgradeAvailable():
log.Info("filterlists: index update available, starting update")
// case listIndexUpdate.UpgradeAvailable():
// log.Info("filterlists: index update available, starting update")
default:
// Index is loaded and no update is available, there is nothing to do.
return nil

View File

@@ -13,8 +13,8 @@ import (
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/database/query"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/registry"
)
var updateInProgress = abool.New()
@@ -174,51 +174,51 @@ func removeAllObsoleteFilterEntries(wc *mgr.WorkerCtx) error {
// getUpgradableFiles returns a slice of filterlists files
// that should be updated. The files MUST be updated and
// processed in the returned order!
func getUpgradableFiles() ([]*updater.File, error) {
var updateOrder []*updater.File
func getUpgradableFiles() ([]*registry.File, error) {
var updateOrder []*registry.File
cacheDBInUse := isLoaded()
// cacheDBInUse := isLoaded()
if baseFile == nil || baseFile.UpgradeAvailable() || !cacheDBInUse {
var err error
baseFile, err = getFile(baseListFilePath)
if err != nil {
return nil, err
}
log.Tracef("intel/filterlists: base file needs update, selected version %s", baseFile.Version())
updateOrder = append(updateOrder, baseFile)
}
// if baseFile == nil || !cacheDBInUse { // TODO(vladimir): || baseFile.UpgradeAvailable()
// var err error
// baseFile, err = module.instance.Updates().GetFile(baseListFilePath)
// if err != nil {
// return nil, err
// }
// log.Tracef("intel/filterlists: base file needs update, selected version %s", baseFile.Version())
// updateOrder = append(updateOrder, baseFile)
// }
if intermediateFile == nil || intermediateFile.UpgradeAvailable() || !cacheDBInUse {
var err error
intermediateFile, err = getFile(intermediateListFilePath)
if err != nil && !errors.Is(err, updater.ErrNotFound) {
return nil, err
}
// if intermediateFile == nil || intermediateFile.UpgradeAvailable() || !cacheDBInUse {
// var err error
// intermediateFile, err = getFile(intermediateListFilePath)
// if err != nil && !errors.Is(err, updater.ErrNotFound) {
// return nil, err
// }
if err == nil {
log.Tracef("intel/filterlists: intermediate file needs update, selected version %s", intermediateFile.Version())
updateOrder = append(updateOrder, intermediateFile)
}
}
// if err == nil {
// log.Tracef("intel/filterlists: intermediate file needs update, selected version %s", intermediateFile.Version())
// updateOrder = append(updateOrder, intermediateFile)
// }
// }
if urgentFile == nil || urgentFile.UpgradeAvailable() || !cacheDBInUse {
var err error
urgentFile, err = getFile(urgentListFilePath)
if err != nil && !errors.Is(err, updater.ErrNotFound) {
return nil, err
}
// if urgentFile == nil || urgentFile.UpgradeAvailable() || !cacheDBInUse {
// var err error
// urgentFile, err = getFile(urgentListFilePath)
// if err != nil && !errors.Is(err, updater.ErrNotFound) {
// return nil, err
// }
if err == nil {
log.Tracef("intel/filterlists: urgent file needs update, selected version %s", urgentFile.Version())
updateOrder = append(updateOrder, urgentFile)
}
}
// if err == nil {
// log.Tracef("intel/filterlists: urgent file needs update, selected version %s", urgentFile.Version())
// updateOrder = append(updateOrder, urgentFile)
// }
// }
return resolveUpdateOrder(updateOrder)
}
func resolveUpdateOrder(updateOrder []*updater.File) ([]*updater.File, error) {
func resolveUpdateOrder(updateOrder []*registry.File) ([]*registry.File, error) {
// sort the update order by ascending version
sort.Sort(byAscVersion(updateOrder))
log.Tracef("intel/filterlists: order of updates: %v", updateOrder)
@@ -258,7 +258,7 @@ func resolveUpdateOrder(updateOrder []*updater.File) ([]*updater.File, error) {
return updateOrder[startAtIdx:], nil
}
type byAscVersion []*updater.File
type byAscVersion []*registry.File
func (fs byAscVersion) Len() int { return len(fs) }

View File

@@ -8,9 +8,8 @@ import (
maxminddb "github.com/oschwald/maxminddb-golang"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/service/updates/registry"
)
var worker *updateWorker
@@ -22,13 +21,13 @@ func init() {
}
const (
v4MMDBResource = "intel/geoip/geoipv4.mmdb.gz"
v6MMDBResource = "intel/geoip/geoipv6.mmdb.gz"
v4MMDBResource = "geoipv4.mmdb"
v6MMDBResource = "geoipv6.mmdb"
)
type geoIPDB struct {
*maxminddb.Reader
file *updater.File
file *registry.File
}
// updateBroadcaster stores a geoIPDB and provides synchronized
@@ -47,7 +46,7 @@ func (ub *updateBroadcaster) NeedsUpdate() bool {
ub.rw.RLock()
defer ub.rw.RUnlock()
return ub.db == nil || ub.db.file.UpgradeAvailable()
return ub.db == nil // TODO(vladimir) is this needed: || ub.db.file.UpgradeAvailable()
}
// ReplaceDatabase replaces (or initially sets) the mmdb database.
@@ -181,12 +180,12 @@ func (upd *updateWorker) run(ctx *mgr.WorkerCtx) error {
func getGeoIPDB(resource string) (*geoIPDB, error) {
log.Debugf("geoip: opening database %s", resource)
file, unpackedPath, err := openAndUnpack(resource)
file, err := open(resource)
if err != nil {
return nil, err
}
reader, err := maxminddb.Open(unpackedPath)
reader, err := maxminddb.Open(file.Path())
if err != nil {
return nil, fmt.Errorf("failed to open: %w", err)
}
@@ -198,16 +197,16 @@ func getGeoIPDB(resource string) (*geoIPDB, error) {
}, nil
}
func openAndUnpack(resource string) (*updater.File, string, error) {
f, err := updates.GetFile(resource)
func open(resource string) (*registry.File, error) {
f, err := module.instance.Updates().GetFile(resource)
if err != nil {
return nil, "", fmt.Errorf("getting file: %w", err)
return nil, fmt.Errorf("getting file: %w", err)
}
unpacked, err := f.Unpack(".gz", updater.UnpackGZIP)
if err != nil {
return nil, "", fmt.Errorf("unpacking file: %w", err)
}
// unpacked, err := f.Unpack(".gz", updater.UnpackGZIP)
// if err != nil {
// return nil, "", fmt.Errorf("unpacking file: %w", err)
// }
return f, unpacked, nil
return f, nil
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
)
// Event Names.
@@ -105,4 +106,6 @@ func New(instance instance) (*NetEnv, error) {
return module, nil
}
type instance interface{}
type instance interface {
Updates() *updates.Updates
}

View File

@@ -17,7 +17,6 @@ import (
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/network/netutils"
"github.com/safing/portmaster/service/updates"
)
// OnlineStatus represent a state of connectivity to the Internet.
@@ -221,7 +220,7 @@ func updateOnlineStatus(status OnlineStatus, portalURL *url.URL, comment string)
// Trigger update check when coming (semi) online.
if Online() {
_ = updates.TriggerUpdate(false, false)
module.instance.Updates().EventResourcesUpdated.Submit(struct{}{})
}
}
}

View File

@@ -16,7 +16,6 @@ import (
"github.com/safing/portmaster/service/process"
"github.com/safing/portmaster/service/resolver"
"github.com/safing/portmaster/service/status"
"github.com/safing/portmaster/service/updates"
)
func registerAPIEndpoints() error {
@@ -94,7 +93,7 @@ func debugInfo(ar *api.Request) (data []byte, err error) {
config.AddToDebugInfo(di)
// Detailed information.
updates.AddToDebugInfo(di)
// TODO(vladimir): updates.AddToDebugInfo(di)
// compat.AddToDebugInfo(di) // TODO: Cannot use due to interception import requirement which we don't want for SPN Hubs.
di.AddGoroutineStack()

View File

@@ -8,6 +8,7 @@ import (
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates"
)
func prep() error {
@@ -56,7 +57,10 @@ func (ui *UI) Stop() error {
return nil
}
var shimLoaded atomic.Bool
var (
shimLoaded atomic.Bool
module *UI
)
// New returns a new UI module.
func New(instance instance) (*UI, error) {
@@ -64,7 +68,7 @@ func New(instance instance) (*UI, error) {
return nil, errors.New("only one instance allowed")
}
m := mgr.New("UI")
module := &UI{
module = &UI{
mgr: m,
instance: instance,
}
@@ -78,4 +82,5 @@ func New(instance instance) (*UI, error) {
type instance interface {
API() *api.API
Updates() *updates.Updates
}

View File

@@ -15,9 +15,8 @@ import (
"github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/base/utils"
"github.com/safing/portmaster/service/updates"
"github.com/safing/portmaster/service/updates/registry"
)
var (
@@ -92,9 +91,9 @@ func (bs *archiveServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
// get file from update system
zipFile, err := updates.GetFile(fmt.Sprintf("ui/modules/%s.zip", moduleName))
zipFile, err := module.instance.Updates().GetFile(fmt.Sprintf("%s.zip", moduleName))
if err != nil {
if errors.Is(err, updater.ErrNotFound) {
if errors.Is(err, registry.ErrNotFound) {
log.Tracef("ui: requested module %s does not exist", moduleName)
http.Error(w, err.Error(), http.StatusNotFound)
} else {

View File

@@ -1,161 +1,161 @@
package updates
import (
"bytes"
"io"
"net/http"
"os"
"path/filepath"
"strings"
// "bytes"
// "io"
// "net/http"
// "os"
// "path/filepath"
// "strings"
"github.com/ghodss/yaml"
// "github.com/ghodss/yaml"
"github.com/safing/portmaster/base/api"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils"
// "github.com/safing/portmaster/base/api"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
)
const (
apiPathCheckForUpdates = "updates/check"
)
func registerAPIEndpoints() error {
if err := api.RegisterEndpoint(api.Endpoint{
Name: "Check for Updates",
Description: "Checks if new versions are available. If automatic updates are enabled, they are also downloaded and applied.",
Parameters: []api.Parameter{{
Method: http.MethodPost,
Field: "download",
Value: "",
Description: "Force downloading and applying of all updates, regardless of auto-update settings.",
}},
Path: apiPathCheckForUpdates,
Write: api.PermitUser,
ActionFunc: func(r *api.Request) (msg string, err error) {
// Check if we should also download regardless of settings.
downloadAll := r.URL.Query().Has("download")
// func registerAPIEndpoints() error {
// if err := api.RegisterEndpoint(api.Endpoint{
// Name: "Check for Updates",
// Description: "Checks if new versions are available. If automatic updates are enabled, they are also downloaded and applied.",
// Parameters: []api.Parameter{{
// Method: http.MethodPost,
// Field: "download",
// Value: "",
// Description: "Force downloading and applying of all updates, regardless of auto-update settings.",
// }},
// Path: apiPathCheckForUpdates,
// Write: api.PermitUser,
// ActionFunc: func(r *api.Request) (msg string, err error) {
// // Check if we should also download regardless of settings.
// downloadAll := r.URL.Query().Has("download")
// Trigger update task.
err = TriggerUpdate(true, downloadAll)
if err != nil {
return "", err
}
// // Trigger update task.
// err = TriggerUpdate(true, downloadAll)
// if err != nil {
// return "", err
// }
// Report how we triggered.
if downloadAll {
return "downloading all updates...", nil
}
return "checking for updates...", nil
},
}); err != nil {
return err
}
// // Report how we triggered.
// if downloadAll {
// return "downloading all updates...", nil
// }
// return "checking for updates...", nil
// },
// }); err != nil {
// return err
// }
if err := api.RegisterEndpoint(api.Endpoint{
Name: "Get Resource",
Description: "Returns the requested resource from the udpate system",
Path: `updates/get/{identifier:[A-Za-z0-9/\.\-_]{1,255}}`,
Read: api.PermitUser,
ReadMethod: http.MethodGet,
HandlerFunc: func(w http.ResponseWriter, r *http.Request) {
// Get identifier from URL.
var identifier string
if ar := api.GetAPIRequest(r); ar != nil {
identifier = ar.URLVars["identifier"]
}
if identifier == "" {
http.Error(w, "no resource speicified", http.StatusBadRequest)
return
}
// if err := api.RegisterEndpoint(api.Endpoint{
// Name: "Get Resource",
// Description: "Returns the requested resource from the udpate system",
// Path: `updates/get/{identifier:[A-Za-z0-9/\.\-_]{1,255}}`,
// Read: api.PermitUser,
// ReadMethod: http.MethodGet,
// HandlerFunc: func(w http.ResponseWriter, r *http.Request) {
// // Get identifier from URL.
// var identifier string
// if ar := api.GetAPIRequest(r); ar != nil {
// identifier = ar.URLVars["identifier"]
// }
// if identifier == "" {
// http.Error(w, "no resource speicified", http.StatusBadRequest)
// return
// }
// Get resource.
resource, err := registry.GetFile(identifier)
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
return
}
// // Get resource.
// resource, err := registry.GetFile(identifier)
// if err != nil {
// http.Error(w, err.Error(), http.StatusNotFound)
// return
// }
// Open file for reading.
file, err := os.Open(resource.Path())
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
defer file.Close() //nolint:errcheck,gosec
// // Open file for reading.
// file, err := os.Open(resource.Path())
// if err != nil {
// http.Error(w, err.Error(), http.StatusInternalServerError)
// return
// }
// defer file.Close() //nolint:errcheck,gosec
// Assign file to reader
var reader io.Reader = file
// // Assign file to reader
// var reader io.Reader = file
// Add version to header.
w.Header().Set("Resource-Version", resource.Version())
// // Add version to header.
// w.Header().Set("Resource-Version", resource.Version())
// Set Content-Type.
contentType, _ := utils.MimeTypeByExtension(filepath.Ext(resource.Path()))
w.Header().Set("Content-Type", contentType)
// // Set Content-Type.
// contentType, _ := utils.MimeTypeByExtension(filepath.Ext(resource.Path()))
// w.Header().Set("Content-Type", contentType)
// Check if the content type may be returned.
accept := r.Header.Get("Accept")
if accept != "" {
mimeTypes := strings.Split(accept, ",")
// First, clean mime types.
for i, mimeType := range mimeTypes {
mimeType = strings.TrimSpace(mimeType)
mimeType, _, _ = strings.Cut(mimeType, ";")
mimeTypes[i] = mimeType
}
// Second, check if we may return anything.
var acceptsAny bool
for _, mimeType := range mimeTypes {
switch mimeType {
case "*", "*/*":
acceptsAny = true
}
}
// Third, check if we can convert.
if !acceptsAny {
var converted bool
sourceType, _, _ := strings.Cut(contentType, ";")
findConvertiblePair:
for _, mimeType := range mimeTypes {
switch {
case sourceType == "application/yaml" && mimeType == "application/json":
yamlData, err := io.ReadAll(reader)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonData, err := yaml.YAMLToJSON(yamlData)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
reader = bytes.NewReader(jsonData)
converted = true
break findConvertiblePair
}
}
// // Check if the content type may be returned.
// accept := r.Header.Get("Accept")
// if accept != "" {
// mimeTypes := strings.Split(accept, ",")
// // First, clean mime types.
// for i, mimeType := range mimeTypes {
// mimeType = strings.TrimSpace(mimeType)
// mimeType, _, _ = strings.Cut(mimeType, ";")
// mimeTypes[i] = mimeType
// }
// // Second, check if we may return anything.
// var acceptsAny bool
// for _, mimeType := range mimeTypes {
// switch mimeType {
// case "*", "*/*":
// acceptsAny = true
// }
// }
// // Third, check if we can convert.
// if !acceptsAny {
// var converted bool
// sourceType, _, _ := strings.Cut(contentType, ";")
// findConvertiblePair:
// for _, mimeType := range mimeTypes {
// switch {
// case sourceType == "application/yaml" && mimeType == "application/json":
// yamlData, err := io.ReadAll(reader)
// if err != nil {
// http.Error(w, err.Error(), http.StatusInternalServerError)
// return
// }
// jsonData, err := yaml.YAMLToJSON(yamlData)
// if err != nil {
// http.Error(w, err.Error(), http.StatusInternalServerError)
// return
// }
// reader = bytes.NewReader(jsonData)
// converted = true
// break findConvertiblePair
// }
// }
// If we could not convert to acceptable format, return an error.
if !converted {
http.Error(w, "conversion to requested format not supported", http.StatusNotAcceptable)
return
}
}
}
// // If we could not convert to acceptable format, return an error.
// if !converted {
// http.Error(w, "conversion to requested format not supported", http.StatusNotAcceptable)
// return
// }
// }
// }
// Write file.
w.WriteHeader(http.StatusOK)
if r.Method != http.MethodHead {
_, err = io.Copy(w, reader)
if err != nil {
log.Errorf("updates: failed to serve resource file: %s", err)
return
}
}
},
}); err != nil {
return err
}
// // Write file.
// w.WriteHeader(http.StatusOK)
// if r.Method != http.MethodHead {
// _, err = io.Copy(w, reader)
// if err != nil {
// log.Errorf("updates: failed to serve resource file: %s", err)
// return
// }
// }
// },
// }); err != nil {
// return err
// }
return nil
}
// return nil
// }

View File

@@ -4,9 +4,9 @@ import (
"github.com/tevino/abool"
"github.com/safing/portmaster/base/config"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/helper"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/service/mgr"
// "github.com/safing/portmaster/service/updates/helper"
)
const cfgDevModeKey = "core/devMode"
@@ -27,152 +27,152 @@ var (
forceDownload = abool.New()
)
func registerConfig() error {
err := config.Register(&config.Option{
Name: "Release Channel",
Key: helper.ReleaseChannelKey,
Description: `Use "Stable" for the best experience. The "Beta" channel will have the newest features and fixes, but may also break and cause interruption. Use others only temporarily and when instructed.`,
OptType: config.OptTypeString,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
RequiresRestart: true,
DefaultValue: helper.ReleaseChannelStable,
PossibleValues: []config.PossibleValue{
{
Name: "Stable",
Description: "Production releases.",
Value: helper.ReleaseChannelStable,
},
{
Name: "Beta",
Description: "Production releases for testing new features that may break and cause interruption.",
Value: helper.ReleaseChannelBeta,
},
{
Name: "Support",
Description: "Support releases or version changes for troubleshooting. Only use temporarily and when instructed.",
Value: helper.ReleaseChannelSupport,
},
{
Name: "Staging",
Description: "Dangerous development releases for testing random things and experimenting. Only use temporarily and when instructed.",
Value: helper.ReleaseChannelStaging,
},
},
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -4,
config.DisplayHintAnnotation: config.DisplayHintOneOf,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
}
// func registerConfig() error {
// err := config.Register(&config.Option{
// Name: "Release Channel",
// Key: helper.ReleaseChannelKey,
// Description: `Use "Stable" for the best experience. The "Beta" channel will have the newest features and fixes, but may also break and cause interruption. Use others only temporarily and when instructed.`,
// OptType: config.OptTypeString,
// ExpertiseLevel: config.ExpertiseLevelExpert,
// ReleaseLevel: config.ReleaseLevelStable,
// RequiresRestart: true,
// DefaultValue: helper.ReleaseChannelStable,
// PossibleValues: []config.PossibleValue{
// {
// Name: "Stable",
// Description: "Production releases.",
// Value: helper.ReleaseChannelStable,
// },
// {
// Name: "Beta",
// Description: "Production releases for testing new features that may break and cause interruption.",
// Value: helper.ReleaseChannelBeta,
// },
// {
// Name: "Support",
// Description: "Support releases or version changes for troubleshooting. Only use temporarily and when instructed.",
// Value: helper.ReleaseChannelSupport,
// },
// {
// Name: "Staging",
// Description: "Dangerous development releases for testing random things and experimenting. Only use temporarily and when instructed.",
// Value: helper.ReleaseChannelStaging,
// },
// },
// Annotations: config.Annotations{
// config.DisplayOrderAnnotation: -4,
// config.DisplayHintAnnotation: config.DisplayHintOneOf,
// config.CategoryAnnotation: "Updates",
// },
// })
// if err != nil {
// return err
// }
err = config.Register(&config.Option{
Name: "Automatic Software Updates",
Key: enableSoftwareUpdatesKey,
Description: "Automatically check for and download software updates. This does not include intelligence data updates.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
RequiresRestart: false,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -12,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
}
// err = config.Register(&config.Option{
// Name: "Automatic Software Updates",
// Key: enableSoftwareUpdatesKey,
// Description: "Automatically check for and download software updates. This does not include intelligence data updates.",
// OptType: config.OptTypeBool,
// ExpertiseLevel: config.ExpertiseLevelExpert,
// ReleaseLevel: config.ReleaseLevelStable,
// RequiresRestart: false,
// DefaultValue: true,
// Annotations: config.Annotations{
// config.DisplayOrderAnnotation: -12,
// config.CategoryAnnotation: "Updates",
// },
// })
// if err != nil {
// return err
// }
err = config.Register(&config.Option{
Name: "Automatic Intelligence Data Updates",
Key: enableIntelUpdatesKey,
Description: "Automatically check for and download intelligence data updates. This includes filter lists, geo-ip data, and more. Does not include software updates.",
OptType: config.OptTypeBool,
ExpertiseLevel: config.ExpertiseLevelExpert,
ReleaseLevel: config.ReleaseLevelStable,
RequiresRestart: false,
DefaultValue: true,
Annotations: config.Annotations{
config.DisplayOrderAnnotation: -11,
config.CategoryAnnotation: "Updates",
},
})
if err != nil {
return err
}
// err = config.Register(&config.Option{
// Name: "Automatic Intelligence Data Updates",
// Key: enableIntelUpdatesKey,
// Description: "Automatically check for and download intelligence data updates. This includes filter lists, geo-ip data, and more. Does not include software updates.",
// OptType: config.OptTypeBool,
// ExpertiseLevel: config.ExpertiseLevelExpert,
// ReleaseLevel: config.ReleaseLevelStable,
// RequiresRestart: false,
// DefaultValue: true,
// Annotations: config.Annotations{
// config.DisplayOrderAnnotation: -11,
// config.CategoryAnnotation: "Updates",
// },
// })
// if err != nil {
// return err
// }
return nil
}
// return nil
// }
func initConfig() {
releaseChannel = config.Concurrent.GetAsString(helper.ReleaseChannelKey, helper.ReleaseChannelStable)
initialReleaseChannel = releaseChannel()
previousReleaseChannel = releaseChannel()
// func initConfig() {
// releaseChannel = config.Concurrent.GetAsString(helper.ReleaseChannelKey, helper.ReleaseChannelStable)
// initialReleaseChannel = releaseChannel()
// previousReleaseChannel = releaseChannel()
enableSoftwareUpdates = config.Concurrent.GetAsBool(enableSoftwareUpdatesKey, true)
enableIntelUpdates = config.Concurrent.GetAsBool(enableIntelUpdatesKey, true)
softwareUpdatesCurrentlyEnabled = enableSoftwareUpdates()
intelUpdatesCurrentlyEnabled = enableIntelUpdates()
// enableSoftwareUpdates = config.Concurrent.GetAsBool(enableSoftwareUpdatesKey, true)
// enableIntelUpdates = config.Concurrent.GetAsBool(enableIntelUpdatesKey, true)
// softwareUpdatesCurrentlyEnabled = enableSoftwareUpdates()
// intelUpdatesCurrentlyEnabled = enableIntelUpdates()
devMode = config.Concurrent.GetAsBool(cfgDevModeKey, false)
previousDevMode = devMode()
}
// devMode = config.Concurrent.GetAsBool(cfgDevModeKey, false)
// previousDevMode = devMode()
// }
func updateRegistryConfig(_ *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
changed := false
// func updateRegistryConfig(_ *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// changed := false
if enableSoftwareUpdates() != softwareUpdatesCurrentlyEnabled {
softwareUpdatesCurrentlyEnabled = enableSoftwareUpdates()
changed = true
}
// if enableSoftwareUpdates() != softwareUpdatesCurrentlyEnabled {
// softwareUpdatesCurrentlyEnabled = enableSoftwareUpdates()
// changed = true
// }
if enableIntelUpdates() != intelUpdatesCurrentlyEnabled {
intelUpdatesCurrentlyEnabled = enableIntelUpdates()
changed = true
}
// if enableIntelUpdates() != intelUpdatesCurrentlyEnabled {
// intelUpdatesCurrentlyEnabled = enableIntelUpdates()
// changed = true
// }
if devMode() != previousDevMode {
registry.SetDevMode(devMode())
previousDevMode = devMode()
changed = true
}
// if devMode() != previousDevMode {
// registry.SetDevMode(devMode())
// previousDevMode = devMode()
// changed = true
// }
if releaseChannel() != previousReleaseChannel {
previousReleaseChannel = releaseChannel()
changed = true
}
// if releaseChannel() != previousReleaseChannel {
// previousReleaseChannel = releaseChannel()
// changed = true
// }
if changed {
// Update indexes based on new settings.
warning := helper.SetIndexes(
registry,
releaseChannel(),
true,
softwareUpdatesCurrentlyEnabled,
intelUpdatesCurrentlyEnabled,
)
if warning != nil {
log.Warningf("updates: %s", warning)
}
// if changed {
// // Update indexes based on new settings.
// warning := helper.SetIndexes(
// registry,
// releaseChannel(),
// true,
// softwareUpdatesCurrentlyEnabled,
// intelUpdatesCurrentlyEnabled,
// )
// if warning != nil {
// log.Warningf("updates: %s", warning)
// }
// Select versions depending on new indexes and modes.
registry.SelectVersions()
module.EventVersionsUpdated.Submit(struct{}{})
// // Select versions depending on new indexes and modes.
// registry.SelectVersions()
// module.EventVersionsUpdated.Submit(struct{}{})
if softwareUpdatesCurrentlyEnabled || intelUpdatesCurrentlyEnabled {
module.states.Clear()
if err := TriggerUpdate(true, false); err != nil {
log.Warningf("updates: failed to trigger update: %s", err)
}
log.Infof("updates: automatic updates are now enabled")
} else {
log.Warningf("updates: automatic updates are now completely disabled")
}
}
// if softwareUpdatesCurrentlyEnabled || intelUpdatesCurrentlyEnabled {
// module.states.Clear()
// if err := TriggerUpdate(true, false); err != nil {
// log.Warningf("updates: failed to trigger update: %s", err)
// }
// log.Infof("updates: automatic updates are now enabled")
// } else {
// log.Warningf("updates: automatic updates are now completely disabled")
// }
// }
return false, nil
}
// return false, nil
// }

View File

@@ -1,238 +1,237 @@
package updates
import (
"fmt"
"sort"
"strings"
"sync"
// import (
// "fmt"
// "sort"
// "sync"
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/base/utils/debug"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/helper"
)
// "github.com/safing/portmaster/base/database/record"
// "github.com/safing/portmaster/base/info"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/updater"
// "github.com/safing/portmaster/base/utils/debug"
// "github.com/safing/portmaster/service/mgr"
// "github.com/safing/portmaster/service/updates/helper"
// )
const (
// versionsDBKey is the database key for update version information.
versionsDBKey = "core:status/versions"
// const (
// // versionsDBKey is the database key for update version information.
// versionsDBKey = "core:status/versions"
// versionsDBKey is the database key for simple update version information.
simpleVersionsDBKey = "core:status/simple-versions"
// // versionsDBKey is the database key for simple update version information.
// simpleVersionsDBKey = "core:status/simple-versions"
// updateStatusDBKey is the database key for update status information.
updateStatusDBKey = "core:status/updates"
)
// // updateStatusDBKey is the database key for update status information.
// updateStatusDBKey = "core:status/updates"
// )
// Versions holds update versions and status information.
type Versions struct {
record.Base
sync.Mutex
// // Versions holds update versions and status information.
// type Versions struct {
// record.Base
// sync.Mutex
Core *info.Info
Resources map[string]*updater.Resource
Channel string
Beta bool
Staging bool
}
// Core *info.Info
// Resources map[string]*updater.Resource
// Channel string
// Beta bool
// Staging bool
// }
// SimpleVersions holds simplified update versions and status information.
type SimpleVersions struct {
record.Base
sync.Mutex
// // SimpleVersions holds simplified update versions and status information.
// type SimpleVersions struct {
// record.Base
// sync.Mutex
Build *info.Info
Resources map[string]*SimplifiedResourceVersion
Channel string
}
// Build *info.Info
// Resources map[string]*SimplifiedResourceVersion
// Channel string
// }
// SimplifiedResourceVersion holds version information about one resource.
type SimplifiedResourceVersion struct {
Version string
}
// // SimplifiedResourceVersion holds version information about one resource.
// type SimplifiedResourceVersion struct {
// Version string
// }
// UpdateStateExport is a wrapper to export the updates state.
type UpdateStateExport struct {
record.Base
sync.Mutex
// // UpdateStateExport is a wrapper to export the updates state.
// type UpdateStateExport struct {
// record.Base
// sync.Mutex
*updater.UpdateState
}
// *updater.UpdateState
// }
// GetVersions returns the update versions and status information.
// Resources must be locked when accessed.
func GetVersions() *Versions {
return &Versions{
Core: info.GetInfo(),
Resources: registry.Export(),
Channel: initialReleaseChannel,
Beta: initialReleaseChannel == helper.ReleaseChannelBeta,
Staging: initialReleaseChannel == helper.ReleaseChannelStaging,
}
}
// // GetVersions returns the update versions and status information.
// // Resources must be locked when accessed.
// func GetVersions() *Versions {
// return &Versions{
// Core: info.GetInfo(),
// Resources: nil,
// Channel: initialReleaseChannel,
// Beta: initialReleaseChannel == helper.ReleaseChannelBeta,
// Staging: initialReleaseChannel == helper.ReleaseChannelStaging,
// }
// }
// GetSimpleVersions returns the simplified update versions and status information.
func GetSimpleVersions() *SimpleVersions {
// Fill base info.
v := &SimpleVersions{
Build: info.GetInfo(),
Resources: make(map[string]*SimplifiedResourceVersion),
Channel: initialReleaseChannel,
}
// // GetSimpleVersions returns the simplified update versions and status information.
// func GetSimpleVersions() *SimpleVersions {
// // Fill base info.
// v := &SimpleVersions{
// Build: info.GetInfo(),
// Resources: make(map[string]*SimplifiedResourceVersion),
// Channel: initialReleaseChannel,
// }
// Iterate through all versions and add version info.
for id, resource := range registry.Export() {
func() {
resource.Lock()
defer resource.Unlock()
// // Iterate through all versions and add version info.
// // for id, resource := range registry.Export() {
// // func() {
// // resource.Lock()
// // defer resource.Unlock()
// Get current in-used or selected version.
var rv *updater.ResourceVersion
switch {
case resource.ActiveVersion != nil:
rv = resource.ActiveVersion
case resource.SelectedVersion != nil:
rv = resource.SelectedVersion
}
// // // Get current in-used or selected version.
// // var rv *updater.ResourceVersion
// // switch {
// // case resource.ActiveVersion != nil:
// // rv = resource.ActiveVersion
// // case resource.SelectedVersion != nil:
// // rv = resource.SelectedVersion
// // }
// Get information from resource.
if rv != nil {
v.Resources[id] = &SimplifiedResourceVersion{
Version: rv.VersionNumber,
}
}
}()
}
// // // Get information from resource.
// // if rv != nil {
// // v.Resources[id] = &SimplifiedResourceVersion{
// // Version: rv.VersionNumber,
// // }
// // }
// // }()
// // }
return v
}
// return v
// }
// GetStateExport gets the update state from the registry and returns it in an
// exportable struct.
func GetStateExport() *UpdateStateExport {
export := registry.GetState()
return &UpdateStateExport{
UpdateState: &export.Updates,
}
}
// // GetStateExport gets the update state from the registry and returns it in an
// // exportable struct.
// func GetStateExport() *UpdateStateExport {
// // export := registry.GetState()
// return &UpdateStateExport{
// // UpdateState: &export.Updates,
// }
// }
// LoadStateExport loads the exported update state from the database.
func LoadStateExport() (*UpdateStateExport, error) {
r, err := db.Get(updateStatusDBKey)
if err != nil {
return nil, err
}
// // LoadStateExport loads the exported update state from the database.
// func LoadStateExport() (*UpdateStateExport, error) {
// r, err := db.Get(updateStatusDBKey)
// if err != nil {
// return nil, err
// }
// unwrap
if r.IsWrapped() {
// only allocate a new struct, if we need it
newRecord := &UpdateStateExport{}
err = record.Unwrap(r, newRecord)
if err != nil {
return nil, err
}
return newRecord, nil
}
// // unwrap
// if r.IsWrapped() {
// // only allocate a new struct, if we need it
// newRecord := &UpdateStateExport{}
// err = record.Unwrap(r, newRecord)
// if err != nil {
// return nil, err
// }
// return newRecord, nil
// }
// or adjust type
newRecord, ok := r.(*UpdateStateExport)
if !ok {
return nil, fmt.Errorf("record not of type *UpdateStateExport, but %T", r)
}
return newRecord, nil
}
// // or adjust type
// newRecord, ok := r.(*UpdateStateExport)
// if !ok {
// return nil, fmt.Errorf("record not of type *UpdateStateExport, but %T", r)
// }
// return newRecord, nil
// }
func initVersionExport() (err error) {
if err := GetVersions().save(); err != nil {
log.Warningf("updates: failed to export version information: %s", err)
}
if err := GetSimpleVersions().save(); err != nil {
log.Warningf("updates: failed to export version information: %s", err)
}
// func initVersionExport() (err error) {
// if err := GetVersions().save(); err != nil {
// log.Warningf("updates: failed to export version information: %s", err)
// }
// if err := GetSimpleVersions().save(); err != nil {
// log.Warningf("updates: failed to export version information: %s", err)
// }
module.EventVersionsUpdated.AddCallback("export version status", export)
return nil
}
// // module.EventVersionsUpdated.AddCallback("export version status", export)
// return nil
// }
func (v *Versions) save() error {
if !v.KeyIsSet() {
v.SetKey(versionsDBKey)
}
return db.Put(v)
}
// func (v *Versions) save() error {
// if !v.KeyIsSet() {
// v.SetKey(versionsDBKey)
// }
// return db.Put(v)
// }
func (v *SimpleVersions) save() error {
if !v.KeyIsSet() {
v.SetKey(simpleVersionsDBKey)
}
return db.Put(v)
}
// func (v *SimpleVersions) save() error {
// if !v.KeyIsSet() {
// v.SetKey(simpleVersionsDBKey)
// }
// return db.Put(v)
// }
func (s *UpdateStateExport) save() error {
if !s.KeyIsSet() {
s.SetKey(updateStatusDBKey)
}
return db.Put(s)
}
// func (s *UpdateStateExport) save() error {
// if !s.KeyIsSet() {
// s.SetKey(updateStatusDBKey)
// }
// return db.Put(s)
// }
// export is an event hook.
func export(_ *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// Export versions.
if err := GetVersions().save(); err != nil {
return false, err
}
if err := GetSimpleVersions().save(); err != nil {
return false, err
}
// Export udpate state.
if err := GetStateExport().save(); err != nil {
return false, err
}
// // export is an event hook.
// func export(_ *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// // Export versions.
// if err := GetVersions().save(); err != nil {
// return false, err
// }
// if err := GetSimpleVersions().save(); err != nil {
// return false, err
// }
// // Export udpate state.
// if err := GetStateExport().save(); err != nil {
// return false, err
// }
return false, nil
}
// return false, nil
// }
// AddToDebugInfo adds the update system status to the given debug.Info.
func AddToDebugInfo(di *debug.Info) {
// Get resources from registry.
resources := registry.Export()
platformPrefix := helper.PlatformIdentifier("")
// // AddToDebugInfo adds the update system status to the given debug.Info.
// func AddToDebugInfo(di *debug.Info) {
// // Get resources from registry.
// // resources := registry.Export()
// // platformPrefix := helper.PlatformIdentifier("")
// Collect data for debug info.
var active, selected []string
var activeCnt, totalCnt int
for id, r := range resources {
// Ignore resources for other platforms.
if !strings.HasPrefix(id, "all/") && !strings.HasPrefix(id, platformPrefix) {
continue
}
// // Collect data for debug info.
// var active, selected []string
// var activeCnt, totalCnt int
// // for id, r := range resources {
// // // Ignore resources for other platforms.
// // if !strings.HasPrefix(id, "all/") && !strings.HasPrefix(id, platformPrefix) {
// // continue
// // }
totalCnt++
if r.ActiveVersion != nil {
activeCnt++
active = append(active, fmt.Sprintf("%s: %s", id, r.ActiveVersion.VersionNumber))
}
if r.SelectedVersion != nil {
selected = append(selected, fmt.Sprintf("%s: %s", id, r.SelectedVersion.VersionNumber))
}
}
sort.Strings(active)
sort.Strings(selected)
// // totalCnt++
// // if r.ActiveVersion != nil {
// // activeCnt++
// // active = append(active, fmt.Sprintf("%s: %s", id, r.ActiveVersion.VersionNumber))
// // }
// // if r.SelectedVersion != nil {
// // selected = append(selected, fmt.Sprintf("%s: %s", id, r.SelectedVersion.VersionNumber))
// // }
// // }
// sort.Strings(active)
// sort.Strings(selected)
// Compile to one list.
lines := make([]string, 0, len(active)+len(selected)+3)
lines = append(lines, "Active:")
lines = append(lines, active...)
lines = append(lines, "")
lines = append(lines, "Selected:")
lines = append(lines, selected...)
// // Compile to one list.
// lines := make([]string, 0, len(active)+len(selected)+3)
// lines = append(lines, "Active:")
// lines = append(lines, active...)
// lines = append(lines, "")
// lines = append(lines, "Selected:")
// lines = append(lines, selected...)
// Add section.
di.AddSection(
fmt.Sprintf("Updates: %s (%d/%d)", initialReleaseChannel, activeCnt, totalCnt),
debug.UseCodeSection|debug.AddContentLineBreaks,
lines...,
)
}
// // Add section.
// di.AddSection(
// fmt.Sprintf("Updates: %s (%d/%d)", initialReleaseChannel, activeCnt, totalCnt),
// debug.UseCodeSection|debug.AddContentLineBreaks,
// lines...,
// )
// }

View File

@@ -1,72 +1,65 @@
package updates
import (
"path"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/updates/helper"
)
// GetPlatformFile returns the latest platform specific file identified by the given identifier.
func GetPlatformFile(identifier string) (*updater.File, error) {
identifier = helper.PlatformIdentifier(identifier)
// func GetPlatformFile(identifier string) (*updater.File, error) {
// identifier = helper.PlatformIdentifier(identifier)
file, err := registry.GetFile(identifier)
if err != nil {
return nil, err
}
// file, err := registry.GetFile(identifier)
// if err != nil {
// return nil, err
// }
module.EventVersionsUpdated.Submit(struct{}{})
return file, nil
}
// module.EventVersionsUpdated.Submit(struct{}{})
// return file, nil
// }
// GetFile returns the latest generic file identified by the given identifier.
func GetFile(identifier string) (*updater.File, error) {
identifier = path.Join("all", identifier)
// func GetFile(identifier string) (*updater.File, error) {
// identifier = path.Join("all", identifier)
file, err := registry.GetFile(identifier)
if err != nil {
return nil, err
}
// file, err := registry.GetFile(identifier)
// if err != nil {
// return nil, err
// }
module.EventVersionsUpdated.Submit(struct{}{})
return file, nil
}
// module.EventVersionsUpdated.Submit(struct{}{})
// return file, nil
// }
// GetPlatformVersion returns the selected platform specific version of the
// given identifier.
// The returned resource version may not be modified.
func GetPlatformVersion(identifier string) (*updater.ResourceVersion, error) {
identifier = helper.PlatformIdentifier(identifier)
// func GetPlatformVersion(identifier string) (*updater.ResourceVersion, error) {
// identifier = helper.PlatformIdentifier(identifier)
rv, err := registry.GetVersion(identifier)
if err != nil {
return nil, err
}
// rv, err := registry.GetVersion(identifier)
// if err != nil {
// return nil, err
// }
return rv, nil
}
// return rv, nil
// }
// GetVersion returns the selected generic version of the given identifier.
// The returned resource version may not be modified.
func GetVersion(identifier string) (*updater.ResourceVersion, error) {
identifier = path.Join("all", identifier)
// func GetVersion(identifier string) (*updater.ResourceVersion, error) {
// identifier = path.Join("all", identifier)
rv, err := registry.GetVersion(identifier)
if err != nil {
return nil, err
}
// rv, err := registry.GetVersion(identifier)
// if err != nil {
// return nil, err
// }
return rv, nil
}
// return rv, nil
// }
// GetVersionWithFullID returns the selected generic version of the given full identifier.
// The returned resource version may not be modified.
func GetVersionWithFullID(identifier string) (*updater.ResourceVersion, error) {
rv, err := registry.GetVersion(identifier)
if err != nil {
return nil, err
}
// func GetVersionWithFullID(identifier string) (*updater.ResourceVersion, error) {
// rv, err := registry.GetVersion(identifier)
// if err != nil {
// return nil, err
// }
return rv, nil
}
// return rv, nil
// }

View File

@@ -1,57 +1,58 @@
package helper
import (
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
// import (
// "errors"
// "fmt"
// "os"
// "path/filepath"
// "runtime"
// "strings"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
)
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/updater"
// "github.com/safing/portmaster/service/updates/registry"
// )
var pmElectronUpdate *updater.File
// var pmElectronUpdate *registry.File
const suidBitWarning = `Failed to set SUID permissions for chrome-sandbox. This is required for Linux kernel versions that do not have unprivileged user namespaces (CONFIG_USER_NS_UNPRIVILEGED) enabled. If you're running and up-to-date distribution kernel you can likely ignore this warning. If you encounter issue starting the user interface please either update your kernel or set the SUID bit (mode 0%0o) on %s`
// const suidBitWarning = `Failed to set SUID permissions for chrome-sandbox. This is required for Linux kernel versions that do not have unprivileged user namespaces (CONFIG_USER_NS_UNPRIVILEGED) enabled. If you're running and up-to-date distribution kernel you can likely ignore this warning. If you encounter issue starting the user interface please either update your kernel or set the SUID bit (mode 0%0o) on %s`
// EnsureChromeSandboxPermissions makes sure the chrome-sandbox distributed
// by our app-electron package has the SUID bit set on systems that do not
// allow unprivileged CLONE_NEWUSER (clone(3)).
// On non-linux systems or systems that have kernel.unprivileged_userns_clone
// set to 1 EnsureChromeSandboPermissions is a NO-OP.
func EnsureChromeSandboxPermissions(reg *updater.ResourceRegistry) error {
if runtime.GOOS != "linux" {
return nil
}
// // EnsureChromeSandboxPermissions makes sure the chrome-sandbox distributed
// // by our app-electron package has the SUID bit set on systems that do not
// // allow unprivileged CLONE_NEWUSER (clone(3)).
// // On non-linux systems or systems that have kernel.unprivileged_userns_clone
// // set to 1 EnsureChromeSandboPermissions is a NO-OP.
// func EnsureChromeSandboxPermissions(reg *updater.ResourceRegistry) error {
// if runtime.GOOS != "linux" {
// return nil
// }
if pmElectronUpdate != nil && !pmElectronUpdate.UpgradeAvailable() {
return nil
}
// if pmElectronUpdate != nil && !pmElectronUpdate.UpgradeAvailable() {
// return nil
// }
identifier := PlatformIdentifier("app/portmaster-app.zip")
// identifier := PlatformIdentifier("app/portmaster-app.zip")
var err error
pmElectronUpdate, err = reg.GetFile(identifier)
if err != nil {
if errors.Is(err, updater.ErrNotAvailableLocally) {
return nil
}
return fmt.Errorf("failed to get file: %w", err)
}
// var err error
// pmElectronUpdate, err = reg.GetFile(identifier)
// if err != nil {
// if errors.Is(err, updater.ErrNotAvailableLocally) {
// return nil
// }
// return fmt.Errorf("failed to get file: %w", err)
// }
unpackedPath := strings.TrimSuffix(
pmElectronUpdate.Path(),
filepath.Ext(pmElectronUpdate.Path()),
)
sandboxFile := filepath.Join(unpackedPath, "chrome-sandbox")
if err := os.Chmod(sandboxFile, 0o0755|os.ModeSetuid); err != nil {
log.Errorf(suidBitWarning, 0o0755|os.ModeSetuid, sandboxFile)
// unpackedPath := strings.TrimSuffix(
// pmElectronUpdate.Path(),
// filepath.Ext(pmElectronUpdate.Path()),
// )
// sandboxFile := filepath.Join(unpackedPath, "chrome-sandbox")
// if err := os.Chmod(sandboxFile, 0o0755|os.ModeSetuid); err != nil {
// log.Errorf(suidBitWarning, 0o0755|os.ModeSetuid, sandboxFile)
return fmt.Errorf("failed to chmod: %w", err)
}
log.Debugf("updates: fixed SUID permission for chrome-sandbox")
// return fmt.Errorf("failed to chmod: %w", err)
// }
// log.Debugf("updates: fixed SUID permission for chrome-sandbox")
return nil
}
// return nil
// }

View File

@@ -1,136 +1,136 @@
package helper
import (
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
// import (
// "errors"
// "fmt"
// "io/fs"
// "os"
// "path/filepath"
"github.com/safing/jess/filesig"
"github.com/safing/portmaster/base/updater"
)
// "github.com/safing/jess/filesig"
// "github.com/safing/portmaster/base/updater"
// )
// Release Channel Configuration Keys.
const (
ReleaseChannelKey = "core/releaseChannel"
ReleaseChannelJSONKey = "core.releaseChannel"
)
// // Release Channel Configuration Keys.
// const (
// ReleaseChannelKey = "core/releaseChannel"
// ReleaseChannelJSONKey = "core.releaseChannel"
// )
// Release Channels.
const (
ReleaseChannelStable = "stable"
ReleaseChannelBeta = "beta"
ReleaseChannelStaging = "staging"
ReleaseChannelSupport = "support"
)
// // Release Channels.
// const (
// ReleaseChannelStable = "stable"
// ReleaseChannelBeta = "beta"
// ReleaseChannelStaging = "staging"
// ReleaseChannelSupport = "support"
// )
const jsonSuffix = ".json"
// const jsonSuffix = ".json"
// SetIndexes sets the update registry indexes and also configures the registry
// to use pre-releases based on the channel.
func SetIndexes(
registry *updater.ResourceRegistry,
releaseChannel string,
deleteUnusedIndexes bool,
autoDownload bool,
autoDownloadIntel bool,
) (warning error) {
usePreReleases := false
// // SetIndexes sets the update registry indexes and also configures the registry
// // to use pre-releases based on the channel.
// func SetIndexes(
// registry *updater.ResourceRegistry,
// releaseChannel string,
// deleteUnusedIndexes bool,
// autoDownload bool,
// autoDownloadIntel bool,
// ) (warning error) {
// usePreReleases := false
// Be reminded that the order is important, as indexes added later will
// override the current release from earlier indexes.
// // Be reminded that the order is important, as indexes added later will
// // override the current release from earlier indexes.
// Reset indexes before adding them (again).
registry.ResetIndexes()
// // Reset indexes before adding them (again).
// registry.ResetIndexes()
// Add the intel index first, in order to be able to override it with the
// other indexes when needed.
registry.AddIndex(updater.Index{
Path: "all/intel/intel.json",
AutoDownload: autoDownloadIntel,
})
// // Add the intel index first, in order to be able to override it with the
// // other indexes when needed.
// registry.AddIndex(updater.Index{
// Path: "all/intel/intel.json",
// AutoDownload: autoDownloadIntel,
// })
// Always add the stable index as a base.
registry.AddIndex(updater.Index{
Path: ReleaseChannelStable + jsonSuffix,
AutoDownload: autoDownload,
})
// // Always add the stable index as a base.
// registry.AddIndex(updater.Index{
// Path: ReleaseChannelStable + jsonSuffix,
// AutoDownload: autoDownload,
// })
// Add beta index if in beta or staging channel.
indexPath := ReleaseChannelBeta + jsonSuffix
if releaseChannel == ReleaseChannelBeta ||
releaseChannel == ReleaseChannelStaging ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{
Path: indexPath,
PreRelease: true,
AutoDownload: autoDownload,
})
usePreReleases = true
} else if deleteUnusedIndexes {
err := deleteIndex(registry, indexPath)
if err != nil {
warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
}
}
// // Add beta index if in beta or staging channel.
// indexPath := ReleaseChannelBeta + jsonSuffix
// if releaseChannel == ReleaseChannelBeta ||
// releaseChannel == ReleaseChannelStaging ||
// (releaseChannel == "" && indexExists(registry, indexPath)) {
// registry.AddIndex(updater.Index{
// Path: indexPath,
// PreRelease: true,
// AutoDownload: autoDownload,
// })
// usePreReleases = true
// } else if deleteUnusedIndexes {
// err := deleteIndex(registry, indexPath)
// if err != nil {
// warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
// }
// }
// Add staging index if in staging channel.
indexPath = ReleaseChannelStaging + jsonSuffix
if releaseChannel == ReleaseChannelStaging ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{
Path: indexPath,
PreRelease: true,
AutoDownload: autoDownload,
})
usePreReleases = true
} else if deleteUnusedIndexes {
err := deleteIndex(registry, indexPath)
if err != nil {
warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
}
}
// // Add staging index if in staging channel.
// indexPath = ReleaseChannelStaging + jsonSuffix
// if releaseChannel == ReleaseChannelStaging ||
// (releaseChannel == "" && indexExists(registry, indexPath)) {
// registry.AddIndex(updater.Index{
// Path: indexPath,
// PreRelease: true,
// AutoDownload: autoDownload,
// })
// usePreReleases = true
// } else if deleteUnusedIndexes {
// err := deleteIndex(registry, indexPath)
// if err != nil {
// warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
// }
// }
// Add support index if in support channel.
indexPath = ReleaseChannelSupport + jsonSuffix
if releaseChannel == ReleaseChannelSupport ||
(releaseChannel == "" && indexExists(registry, indexPath)) {
registry.AddIndex(updater.Index{
Path: indexPath,
AutoDownload: autoDownload,
})
usePreReleases = true
} else if deleteUnusedIndexes {
err := deleteIndex(registry, indexPath)
if err != nil {
warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
}
}
// // Add support index if in support channel.
// indexPath = ReleaseChannelSupport + jsonSuffix
// if releaseChannel == ReleaseChannelSupport ||
// (releaseChannel == "" && indexExists(registry, indexPath)) {
// registry.AddIndex(updater.Index{
// Path: indexPath,
// AutoDownload: autoDownload,
// })
// usePreReleases = true
// } else if deleteUnusedIndexes {
// err := deleteIndex(registry, indexPath)
// if err != nil {
// warning = fmt.Errorf("failed to delete unused index %s: %w", indexPath, err)
// }
// }
// Set pre-release usage.
registry.SetUsePreReleases(usePreReleases)
// // Set pre-release usage.
// registry.SetUsePreReleases(usePreReleases)
return warning
}
// return warning
// }
func indexExists(registry *updater.ResourceRegistry, indexPath string) bool {
_, err := os.Stat(filepath.Join(registry.StorageDir().Path, indexPath))
return err == nil
}
// func indexExists(registry *updater.ResourceRegistry, indexPath string) bool {
// _, err := os.Stat(filepath.Join(registry.StorageDir().Path, indexPath))
// return err == nil
// }
func deleteIndex(registry *updater.ResourceRegistry, indexPath string) error {
// Remove index itself.
err := os.Remove(filepath.Join(registry.StorageDir().Path, indexPath))
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
// func deleteIndex(registry *updater.ResourceRegistry, indexPath string) error {
// // Remove index itself.
// err := os.Remove(filepath.Join(registry.StorageDir().Path, indexPath))
// if err != nil && !errors.Is(err, fs.ErrNotExist) {
// return err
// }
// Remove any accompanying signature.
err = os.Remove(filepath.Join(registry.StorageDir().Path, indexPath+filesig.Extension))
if err != nil && !errors.Is(err, fs.ErrNotExist) {
return err
}
// // Remove any accompanying signature.
// err = os.Remove(filepath.Join(registry.StorageDir().Path, indexPath+filesig.Extension))
// if err != nil && !errors.Is(err, fs.ErrNotExist) {
// return err
// }
return nil
}
// return nil
// }

View File

@@ -1,42 +1,42 @@
package helper
import (
"github.com/safing/jess"
"github.com/safing/portmaster/base/updater"
)
// import (
// "github.com/safing/jess"
// "github.com/safing/portmaster/base/updater"
// )
var (
// VerificationConfig holds the complete verification configuration for the registry.
VerificationConfig = map[string]*updater.VerificationOptions{
"": { // Default.
TrustStore: BinarySigningTrustStore,
DownloadPolicy: updater.SignaturePolicyRequire,
DiskLoadPolicy: updater.SignaturePolicyWarn,
},
"all/intel/": nil, // Disable until IntelHub supports signing.
}
// var (
// // VerificationConfig holds the complete verification configuration for the registry.
// VerificationConfig = map[string]*updater.VerificationOptions{
// "": { // Default.
// TrustStore: BinarySigningTrustStore,
// DownloadPolicy: updater.SignaturePolicyRequire,
// DiskLoadPolicy: updater.SignaturePolicyWarn,
// },
// "all/intel/": nil, // Disable until IntelHub supports signing.
// }
// BinarySigningKeys holds the signing keys in text format.
BinarySigningKeys = []string{
// Safing Code Signing Key #1
"recipient:public-ed25519-key:safing-code-signing-key-1:92bgBLneQUWrhYLPpBDjqHbpFPuNVCPAaivQ951A4aq72HcTiw7R1QmPJwFM1mdePAvEVDjkeb8S4fp2pmRCsRa8HrCvWQEjd88rfZ6TznJMfY4g7P8ioGFjfpyx2ZJ8WCZJG5Qt4Z9nkabhxo2Nbi3iywBTYDLSbP5CXqi7jryW7BufWWuaRVufFFzhwUC2ryWFWMdkUmsAZcvXwde4KLN9FrkWAy61fGaJ8GCwGnGCSitANnU2cQrsGBXZzxmzxwrYD",
// Safing Code Signing Key #2
"recipient:public-ed25519-key:safing-code-signing-key-2:92bgBLneQUWrhYLPpBDjqHbPC2d1o5JMyZFdavWBNVtdvbPfzDewLW95ScXfYPHd3QvWHSWCtB4xpthaYWxSkK1kYiGp68DPa2HaU8yQ5dZhaAUuV4Kzv42pJcWkCeVnBYqgGBXobuz52rFqhDJy3rz7soXEmYhJEJWwLwMeioK3VzN3QmGSYXXjosHMMNC76rjufSoLNtUQUWZDSnHmqbuxbKMCCsjFXUGGhtZVyb7bnu7QLTLk6SKHBJDMB6zdL9sw3",
}
// // BinarySigningKeys holds the signing keys in text format.
// BinarySigningKeys = []string{
// // Safing Code Signing Key #1
// "recipient:public-ed25519-key:safing-code-signing-key-1:92bgBLneQUWrhYLPpBDjqHbpFPuNVCPAaivQ951A4aq72HcTiw7R1QmPJwFM1mdePAvEVDjkeb8S4fp2pmRCsRa8HrCvWQEjd88rfZ6TznJMfY4g7P8ioGFjfpyx2ZJ8WCZJG5Qt4Z9nkabhxo2Nbi3iywBTYDLSbP5CXqi7jryW7BufWWuaRVufFFzhwUC2ryWFWMdkUmsAZcvXwde4KLN9FrkWAy61fGaJ8GCwGnGCSitANnU2cQrsGBXZzxmzxwrYD",
// // Safing Code Signing Key #2
// "recipient:public-ed25519-key:safing-code-signing-key-2:92bgBLneQUWrhYLPpBDjqHbPC2d1o5JMyZFdavWBNVtdvbPfzDewLW95ScXfYPHd3QvWHSWCtB4xpthaYWxSkK1kYiGp68DPa2HaU8yQ5dZhaAUuV4Kzv42pJcWkCeVnBYqgGBXobuz52rFqhDJy3rz7soXEmYhJEJWwLwMeioK3VzN3QmGSYXXjosHMMNC76rjufSoLNtUQUWZDSnHmqbuxbKMCCsjFXUGGhtZVyb7bnu7QLTLk6SKHBJDMB6zdL9sw3",
// }
// BinarySigningTrustStore is an in-memory trust store with the signing keys.
BinarySigningTrustStore = jess.NewMemTrustStore()
)
// // BinarySigningTrustStore is an in-memory trust store with the signing keys.
// BinarySigningTrustStore = jess.NewMemTrustStore()
// )
func init() {
for _, signingKey := range BinarySigningKeys {
rcpt, err := jess.RecipientFromTextFormat(signingKey)
if err != nil {
panic(err)
}
err = BinarySigningTrustStore.StoreSignet(rcpt)
if err != nil {
panic(err)
}
}
}
// func init() {
// for _, signingKey := range BinarySigningKeys {
// rcpt, err := jess.RecipientFromTextFormat(signingKey)
// if err != nil {
// panic(err)
// }
// err = BinarySigningTrustStore.StoreSignet(rcpt)
// if err != nil {
// panic(err)
// }
// }
// }

View File

@@ -1,95 +1,95 @@
package helper
import (
"fmt"
"runtime"
// import (
// "fmt"
// "runtime"
"github.com/tevino/abool"
)
// "github.com/tevino/abool"
// )
const onWindows = runtime.GOOS == "windows"
// const onWindows = runtime.GOOS == "windows"
var intelOnly = abool.New()
// var intelOnly = abool.New()
// IntelOnly specifies that only intel data is mandatory.
func IntelOnly() {
intelOnly.Set()
}
// // IntelOnly specifies that only intel data is mandatory.
// func IntelOnly() {
// intelOnly.Set()
// }
// PlatformIdentifier converts identifier for the current platform.
func PlatformIdentifier(identifier string) string {
// From https://golang.org/pkg/runtime/#GOARCH
// GOOS is the running program's operating system target: one of darwin, freebsd, linux, and so on.
// GOARCH is the running program's architecture target: one of 386, amd64, arm, s390x, and so on.
return fmt.Sprintf("%s_%s/%s", runtime.GOOS, runtime.GOARCH, identifier)
}
// // PlatformIdentifier converts identifier for the current platform.
// func PlatformIdentifier(identifier string) string {
// // From https://golang.org/pkg/runtime/#GOARCH
// // GOOS is the running program's operating system target: one of darwin, freebsd, linux, and so on.
// // GOARCH is the running program's architecture target: one of 386, amd64, arm, s390x, and so on.
// return fmt.Sprintf("%s_%s/%s", runtime.GOOS, runtime.GOARCH, identifier)
// }
// MandatoryUpdates returns mandatory updates that should be loaded on install
// or reset.
func MandatoryUpdates() (identifiers []string) {
// Intel
identifiers = append(
identifiers,
// // MandatoryUpdates returns mandatory updates that should be loaded on install
// // or reset.
// func MandatoryUpdates() (identifiers []string) {
// // Intel
// identifiers = append(
// identifiers,
// Filter lists data
"all/intel/lists/index.dsd",
"all/intel/lists/base.dsdl",
"all/intel/lists/intermediate.dsdl",
"all/intel/lists/urgent.dsdl",
// // Filter lists data
// "all/intel/lists/index.dsd",
// "all/intel/lists/base.dsdl",
// "all/intel/lists/intermediate.dsdl",
// "all/intel/lists/urgent.dsdl",
// Geo IP data
"all/intel/geoip/geoipv4.mmdb.gz",
"all/intel/geoip/geoipv6.mmdb.gz",
)
// // Geo IP data
// "all/intel/geoip/geoipv4.mmdb.gz",
// "all/intel/geoip/geoipv6.mmdb.gz",
// )
// Stop here if we only want intel data.
if intelOnly.IsSet() {
return identifiers
}
// // Stop here if we only want intel data.
// if intelOnly.IsSet() {
// return identifiers
// }
// Binaries
if onWindows {
identifiers = append(
identifiers,
PlatformIdentifier("core/portmaster-core.exe"),
PlatformIdentifier("kext/portmaster-kext.sys"),
PlatformIdentifier("kext/portmaster-kext.pdb"),
PlatformIdentifier("start/portmaster-start.exe"),
PlatformIdentifier("notifier/portmaster-notifier.exe"),
PlatformIdentifier("notifier/portmaster-wintoast.dll"),
PlatformIdentifier("app2/portmaster-app.zip"),
)
} else {
identifiers = append(
identifiers,
PlatformIdentifier("core/portmaster-core"),
PlatformIdentifier("start/portmaster-start"),
PlatformIdentifier("notifier/portmaster-notifier"),
PlatformIdentifier("app2/portmaster-app"),
)
}
// // Binaries
// if onWindows {
// identifiers = append(
// identifiers,
// PlatformIdentifier("core/portmaster-core.exe"),
// PlatformIdentifier("kext/portmaster-kext.sys"),
// PlatformIdentifier("kext/portmaster-kext.pdb"),
// PlatformIdentifier("start/portmaster-start.exe"),
// PlatformIdentifier("notifier/portmaster-notifier.exe"),
// PlatformIdentifier("notifier/portmaster-wintoast.dll"),
// PlatformIdentifier("app2/portmaster-app.zip"),
// )
// } else {
// identifiers = append(
// identifiers,
// PlatformIdentifier("core/portmaster-core"),
// PlatformIdentifier("start/portmaster-start"),
// PlatformIdentifier("notifier/portmaster-notifier"),
// PlatformIdentifier("app2/portmaster-app"),
// )
// }
// Components, Assets and Data
identifiers = append(
identifiers,
// // Components, Assets and Data
// identifiers = append(
// identifiers,
// User interface components
PlatformIdentifier("app/portmaster-app.zip"),
"all/ui/modules/portmaster.zip",
"all/ui/modules/assets.zip",
)
// // User interface components
// PlatformIdentifier("app/portmaster-app.zip"),
// "all/ui/modules/portmaster.zip",
// "all/ui/modules/assets.zip",
// )
return identifiers
}
// return identifiers
// }
// AutoUnpackUpdates returns assets that need unpacking.
func AutoUnpackUpdates() []string {
if intelOnly.IsSet() {
return []string{}
}
// // AutoUnpackUpdates returns assets that need unpacking.
// func AutoUnpackUpdates() []string {
// if intelOnly.IsSet() {
// return []string{}
// }
return []string{
PlatformIdentifier("app/portmaster-app.zip"),
PlatformIdentifier("app2/portmaster-app.zip"),
}
}
// return []string{
// PlatformIdentifier("app/portmaster-app.zip"),
// PlatformIdentifier("app2/portmaster-app.zip"),
// }
// }

View File

@@ -1,110 +0,0 @@
package updates
import (
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"github.com/safing/portmaster/base/log"
)
type UpdateIndex struct {
Directory string
DownloadDirectory string
Ignore []string
IndexURLs []string
IndexFile string
AutoApply bool
}
func (ui *UpdateIndex) downloadIndexFile() (err error) {
_ = os.MkdirAll(ui.Directory, defaultDirMode)
_ = os.MkdirAll(ui.DownloadDirectory, defaultDirMode)
for _, url := range ui.IndexURLs {
err = ui.downloadIndexFileFromURL(url)
if err != nil {
log.Warningf("updates: %s", err)
continue
}
// Downloading was successful.
err = nil
break
}
return
}
func (ui *UpdateIndex) checkForUpdates() (bool, error) {
err := ui.downloadIndexFile()
if err != nil {
return false, err
}
currentBundle, err := ui.GetInstallBundle()
if err != nil {
return true, err // Current installed bundle not found, act as there is update.
}
updateBundle, err := ui.GetUpdateBundle()
if err != nil {
return false, err
}
return currentBundle.Version != updateBundle.Version, nil
}
func (ui *UpdateIndex) downloadIndexFileFromURL(url string) error {
client := http.Client{}
resp, err := client.Get(url)
if err != nil {
return fmt.Errorf("failed a get request to %s: %w", url, err)
}
defer func() { _ = resp.Body.Close() }()
filePath := fmt.Sprintf("%s/%s", ui.DownloadDirectory, ui.IndexFile)
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, defaultFileMode)
if err != nil {
return err
}
defer func() { _ = file.Close() }()
_, err = io.Copy(file, resp.Body)
if err != nil {
return err
}
return nil
}
func (ui *UpdateIndex) GetInstallBundle() (*Bundle, error) {
indexFile := fmt.Sprintf("%s/%s", ui.Directory, ui.IndexFile)
return ui.GetBundle(indexFile)
}
func (ui *UpdateIndex) GetUpdateBundle() (*Bundle, error) {
indexFile := fmt.Sprintf("%s/%s", ui.DownloadDirectory, ui.IndexFile)
return ui.GetBundle(indexFile)
}
func (ui *UpdateIndex) GetBundle(indexFile string) (*Bundle, error) {
// Check if the file exists.
file, err := os.Open(indexFile)
if err != nil {
return nil, fmt.Errorf("failed to open index file: %w", err)
}
defer func() { _ = file.Close() }()
// Read
content, err := io.ReadAll(file)
if err != nil {
return nil, err
}
// Parse
var bundle Bundle
err = json.Unmarshal(content, &bundle)
if err != nil {
return nil, err
}
return &bundle, nil
}

View File

@@ -6,9 +6,6 @@ import (
"time"
"github.com/safing/portmaster/base/database"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/service/mgr"
)
const (
@@ -17,10 +14,6 @@ const (
enableSoftwareUpdatesKey = "core/automaticUpdates"
enableIntelUpdatesKey = "core/automaticIntelUpdates"
// ModuleName is the name of the update module
// and can be used when declaring module dependencies.
ModuleName = "updates"
// VersionUpdateEvent is emitted every time a new
// version of a monitored resource is selected.
// During module initialization VersionUpdateEvent
@@ -37,8 +30,6 @@ const (
)
var (
registry *updater.ResourceRegistry
userAgentFromFlag string
updateServerFromFlag string
@@ -57,205 +48,13 @@ const (
updateTaskRepeatDuration = 1 * time.Hour
)
func start() error {
// module.restartWorkerMgr.Repeat(10 * time.Minute)
// module.instance.Config().EventConfigChange.AddCallback("update registry config", updateRegistryConfig)
// // create registry
// registry = &updater.ResourceRegistry{
// Name: ModuleName,
// UpdateURLs: DefaultUpdateURLs,
// UserAgent: UserAgent,
// MandatoryUpdates: helper.MandatoryUpdates(),
// AutoUnpack: helper.AutoUnpackUpdates(),
// Verification: helper.VerificationConfig,
// DevMode: devMode(),
// Online: true,
// }
// // Override values from flags.
// if userAgentFromFlag != "" {
// registry.UserAgent = userAgentFromFlag
// }
// if updateServerFromFlag != "" {
// registry.UpdateURLs = []string{updateServerFromFlag}
// }
// // pre-init state
// updateStateExport, err := LoadStateExport()
// if err != nil {
// log.Debugf("updates: failed to load exported update state: %s", err)
// } else if updateStateExport.UpdateState != nil {
// err := registry.PreInitUpdateState(*updateStateExport.UpdateState)
// if err != nil {
// return err
// }
// }
// initialize
// err := registry.Initialize(dataroot.Root().ChildDir(updatesDirName, 0o0755))
// if err != nil {
// return err
// }
// // register state provider
// err = registerRegistryStateProvider()
// if err != nil {
// return err
// }
// registry.StateNotifyFunc = pushRegistryState
// // Set indexes based on the release channel.
// warning := helper.SetIndexes(
// registry,
// initialReleaseChannel,
// true,
// enableSoftwareUpdates() && !DisableSoftwareAutoUpdate,
// enableIntelUpdates(),
// )
// if warning != nil {
// log.Warningf("updates: %s", warning)
// }
// err = registry.LoadIndexes(module.m.Ctx())
// if err != nil {
// log.Warningf("updates: failed to load indexes: %s", err)
// }
// err = registry.ScanStorage("")
// if err != nil {
// log.Warningf("updates: error during storage scan: %s", err)
// }
// registry.SelectVersions()
// module.EventVersionsUpdated.Submit(struct{}{})
// // Initialize the version export - this requires the registry to be set up.
// err = initVersionExport()
// if err != nil {
// return err
// }
// // start updater task
// if !disableTaskSchedule {
// _ = module.updateWorkerMgr.Repeat(30 * time.Minute)
// }
// if updateASAP {
// module.updateWorkerMgr.Go()
// }
// // react to upgrades
// if err := initUpgrader(); err != nil {
// return err
// }
// warnOnIncorrectParentPath()
return nil
}
// TriggerUpdate queues the update task to execute ASAP.
func TriggerUpdate(forceIndexCheck, downloadAll bool) error {
// switch {
// case !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates():
// return errors.New("automatic updating is disabled")
// default:
// if forceIndexCheck {
// forceCheck.Set()
// }
// if downloadAll {
// forceDownload.Set()
// }
// // If index check if forced, start quicker.
// module.updateWorkerMgr.Go()
// }
log.Debugf("updates: triggering update to run as soon as possible")
return nil
}
// DisableUpdateSchedule disables the update schedule.
// If called, updates are only checked when TriggerUpdate()
// is called.
func DisableUpdateSchedule() error {
// TODO: Updater state should be always on
// switch module.Status() {
// case modules.StatusStarting, modules.StatusOnline, modules.StatusStopping:
// return errors.New("module already online")
// }
return nil
}
func checkForUpdates(ctx *mgr.WorkerCtx) (err error) {
// Set correct error if context was canceled.
// defer func() {
// select {
// case <-ctx.Done():
// err = context.Canceled
// default:
// }
// }()
// // Get flags.
// forceIndexCheck := forceCheck.SetToIf(true, false)
// downloadAll := forceDownload.SetToIf(true, false)
// // Check again if downloading updates is enabled, or forced.
// if !forceIndexCheck && !enableSoftwareUpdates() && !enableIntelUpdates() {
// log.Warningf("updates: automatic updates are disabled")
// return nil
// }
// defer func() {
// // Resolve any error and send success notification.
// if err == nil {
// log.Infof("updates: successfully checked for updates")
// notifyUpdateSuccess(forceIndexCheck)
// return
// }
// // Log and notify error.
// log.Errorf("updates: check failed: %s", err)
// notifyUpdateCheckFailed(forceIndexCheck, err)
// }()
// if err = registry.UpdateIndexes(ctx.Ctx()); err != nil {
// err = fmt.Errorf("failed to update indexes: %w", err)
// return //nolint:nakedret // TODO: Would "return err" work with the defer?
// }
// err = registry.DownloadUpdates(ctx.Ctx(), downloadAll)
// if err != nil {
// err = fmt.Errorf("failed to download updates: %w", err)
// return //nolint:nakedret // TODO: Would "return err" work with the defer?
// }
// registry.SelectVersions()
// // Unpack selected resources.
// err = registry.UnpackResources()
// if err != nil {
// err = fmt.Errorf("failed to unpack updates: %w", err)
// return //nolint:nakedret // TODO: Would "return err" work with the defer?
// }
// // Purge old resources
// registry.Purge(2)
// module.EventResourcesUpdated.Submit(struct{}{})
return nil
}
func stop() error {
if registry != nil {
err := registry.Cleanup()
if err != nil {
log.Warningf("updates: failed to clean up registry: %s", err)
}
}
// if registry != nil {
// err := registry.Cleanup()
// if err != nil {
// log.Warningf("updates: failed to clean up registry: %s", err)
// }
// }
return nil
}

View File

@@ -2,10 +2,8 @@ package updates
import (
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"sync/atomic"
"github.com/safing/portmaster/base/api"
@@ -13,34 +11,33 @@ import (
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/registry"
)
const (
defaultFileMode = os.FileMode(0o0644)
defaultDirMode = os.FileMode(0o0755)
)
var applyUpdates bool
func init() {
flag.BoolVar(&applyUpdates, "update", false, "apply downloaded updates")
}
// Updates provides access to released artifacts.
type Updates struct {
m *mgr.Manager
states *mgr.StateMgr
updateWorkerMgr *mgr.WorkerMgr
restartWorkerMgr *mgr.WorkerMgr
updateBinaryWorkerMgr *mgr.WorkerMgr
updateIntelWorkerMgr *mgr.WorkerMgr
restartWorkerMgr *mgr.WorkerMgr
EventResourcesUpdated *mgr.EventMgr[struct{}]
EventVersionsUpdated *mgr.EventMgr[struct{}]
binUpdates UpdateIndex
intelUpdates UpdateIndex
registry registry.Registry
instance instance
}
var (
module *Updates
shimLoaded atomic.Bool
)
var shimLoaded atomic.Bool
// New returns a new UI module.
func New(instance instance) (*Updates, error) {
@@ -49,20 +46,22 @@ func New(instance instance) (*Updates, error) {
}
m := mgr.New("Updates")
module = &Updates{
module := &Updates{
m: m,
states: m.NewStateMgr(),
EventResourcesUpdated: mgr.NewEventMgr[struct{}](ResourceUpdateEvent, m),
EventVersionsUpdated: mgr.NewEventMgr[struct{}](VersionUpdateEvent, m),
instance: instance,
instance: instance,
}
// Events
module.updateWorkerMgr = m.NewWorkerMgr("updater", module.checkForUpdates, nil)
module.updateBinaryWorkerMgr = m.NewWorkerMgr("binary updater", module.checkForBinaryUpdates, nil)
module.updateIntelWorkerMgr = m.NewWorkerMgr("intel updater", module.checkForIntelUpdates, nil)
module.restartWorkerMgr = m.NewWorkerMgr("automatic restart", automaticRestart, nil)
module.binUpdates = UpdateIndex{
binIndex := registry.UpdateIndex{
Directory: "/usr/lib/portmaster",
DownloadDirectory: "/var/portmaster/new_bin",
Ignore: []string{"databases", "intel", "config.json"},
@@ -71,62 +70,48 @@ func New(instance instance) (*Updates, error) {
AutoApply: false,
}
module.intelUpdates = UpdateIndex{
intelIndex := registry.UpdateIndex{
Directory: "/var/portmaster/intel",
DownloadDirectory: "/var/portmaster/new_intel",
IndexURLs: []string{"http://localhost:8000/test-intel.json"},
IndexFile: "intel-index.json",
AutoApply: true,
}
module.registry = registry.New(binIndex, intelIndex)
return module, nil
}
func deleteUnfinishedDownloads(rootDir string) error {
return filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {
func (u *Updates) checkForBinaryUpdates(_ *mgr.WorkerCtx) error {
hasUpdates, err := u.registry.CheckForBinaryUpdates()
if err != nil {
log.Errorf("updates: failed to check for binary updates: %s", err)
}
if hasUpdates {
log.Infof("updates: there is updates available in the binary bundle")
err = u.registry.DownloadBinaryUpdates()
if err != nil {
return err
log.Errorf("updates: failed to download bundle: %s", err)
}
// Check if the current file has the specified extension
if !info.IsDir() && strings.HasSuffix(info.Name(), ".download") {
log.Warningf("updates deleting unfinished: %s\n", path)
err := os.Remove(path)
if err != nil {
return fmt.Errorf("failed to delete file %s: %w", path, err)
}
}
return nil
})
} else {
log.Infof("updates: no new binary updates")
}
return nil
}
func (u *Updates) checkForUpdates(_ *mgr.WorkerCtx) error {
_ = deleteUnfinishedDownloads(u.binUpdates.DownloadDirectory)
hasUpdate, err := u.binUpdates.checkForUpdates()
func (u *Updates) checkForIntelUpdates(_ *mgr.WorkerCtx) error {
hasUpdates, err := u.registry.CheckForIntelUpdates()
if err != nil {
log.Warningf("failed to get binary index file: %s", err)
log.Errorf("updates: failed to check for intel updates: %s", err)
}
if hasUpdate {
binBundle, err := u.binUpdates.GetUpdateBundle()
if err == nil {
log.Debugf("Bin Bundle: %+v", binBundle)
_ = os.MkdirAll(u.binUpdates.DownloadDirectory, defaultDirMode)
binBundle.downloadAndVerify(u.binUpdates.DownloadDirectory)
}
}
_ = deleteUnfinishedDownloads(u.intelUpdates.DownloadDirectory)
hasUpdate, err = u.intelUpdates.checkForUpdates()
if err != nil {
log.Warningf("failed to get intel index file: %s", err)
}
if hasUpdate {
intelBundle, err := u.intelUpdates.GetUpdateBundle()
if err == nil {
log.Debugf("Intel Bundle: %+v", intelBundle)
_ = os.MkdirAll(u.intelUpdates.DownloadDirectory, defaultDirMode)
intelBundle.downloadAndVerify(u.intelUpdates.DownloadDirectory)
if hasUpdates {
log.Infof("updates: there is updates available in the intel bundle")
err = u.registry.DownloadIntelUpdates()
if err != nil {
log.Errorf("updates: failed to download bundle: %s", err)
}
} else {
log.Infof("updates: no new intel data updates")
}
return nil
}
@@ -143,38 +128,36 @@ func (u *Updates) Manager() *mgr.Manager {
// Start starts the module.
func (u *Updates) Start() error {
initConfig()
u.m.Go("check for updates", func(w *mgr.WorkerCtx) error {
binBundle, err := u.binUpdates.GetInstallBundle()
if err != nil {
log.Warningf("failed to get binary bundle: %s", err)
} else {
err = binBundle.Verify(u.binUpdates.Directory)
if err != nil {
log.Warningf("binary bundle is not valid: %s", err)
} else {
log.Infof("binary bundle is valid")
}
}
// initConfig()
intelBundle, err := u.intelUpdates.GetInstallBundle()
if applyUpdates {
err := u.registry.ApplyBinaryUpdates()
if err != nil {
log.Warningf("failed to get intel bundle: %s", err)
} else {
err = intelBundle.Verify(u.intelUpdates.Directory)
if err != nil {
log.Warningf("intel bundle is not valid: %s", err)
} else {
log.Infof("intel bundle is valid")
}
log.Errorf("updates: failed to apply binary updates: %s", err)
}
err = u.registry.ApplyIntelUpdates()
if err != nil {
log.Errorf("updates: failed to apply intel updates: %s", err)
}
u.instance.Restart()
return nil
})
u.updateWorkerMgr.Go()
}
err := u.registry.Initialize()
if err != nil {
// TODO(vladimir): Find a better way to handle this error. The service will stop if parsing of the bundle files fails.
return fmt.Errorf("failed to initialize registry: %w", err)
}
u.updateBinaryWorkerMgr.Go()
u.updateIntelWorkerMgr.Go()
return nil
}
func (u *Updates) GetFile(id string) (*registry.File, error) {
return u.registry.GetFile(id)
}
// Stop stops the module.
func (u *Updates) Stop() error {
return stop()

View File

@@ -1,12 +1,8 @@
package updates
import (
"fmt"
"strings"
"sync/atomic"
"time"
"github.com/safing/portmaster/base/notifications"
)
const (
@@ -25,109 +21,109 @@ func (u *Updates) notificationsEnabled() bool {
return u.instance.Notifications() != nil
}
func notifyUpdateSuccess(force bool) {
if !module.notificationsEnabled() {
return
}
// func notifyUpdateSuccess(force bool) {
// if !module.notificationsEnabled() {
// return
// }
updateFailedCnt.Store(0)
module.states.Clear()
updateState := registry.GetState().Updates
// updateFailedCnt.Store(0)
// module.states.Clear()
// updateState := registry.GetState().Updates
flavor := updateSuccess
switch {
case len(updateState.PendingDownload) > 0:
// Show notification if there are pending downloads.
flavor = updateSuccessPending
case updateState.LastDownloadAt != nil &&
time.Since(*updateState.LastDownloadAt) < 5*time.Second:
// Show notification if we downloaded something within the last minute.
flavor = updateSuccessDownloaded
case force:
// Always show notification if update was manually triggered.
default:
// Otherwise, the update was uneventful. Do not show notification.
return
}
// flavor := updateSuccess
// switch {
// case len(updateState.PendingDownload) > 0:
// // Show notification if there are pending downloads.
// flavor = updateSuccessPending
// case updateState.LastDownloadAt != nil &&
// time.Since(*updateState.LastDownloadAt) < 5*time.Second:
// // Show notification if we downloaded something within the last minute.
// flavor = updateSuccessDownloaded
// case force:
// // Always show notification if update was manually triggered.
// default:
// // Otherwise, the update was uneventful. Do not show notification.
// return
// }
switch flavor {
case updateSuccess:
notifications.Notify(&notifications.Notification{
EventID: updateSuccess,
Type: notifications.Info,
Title: "Portmaster Is Up-To-Date",
Message: "Portmaster successfully checked for updates. Everything is up to date.\n\n" + getUpdatingInfoMsg(),
Expires: time.Now().Add(1 * time.Minute).Unix(),
AvailableActions: []*notifications.Action{
{
ID: "ack",
Text: "OK",
},
},
})
// switch flavor {
// case updateSuccess:
// notifications.Notify(&notifications.Notification{
// EventID: updateSuccess,
// Type: notifications.Info,
// Title: "Portmaster Is Up-To-Date",
// Message: "Portmaster successfully checked for updates. Everything is up to date.\n\n" + getUpdatingInfoMsg(),
// Expires: time.Now().Add(1 * time.Minute).Unix(),
// AvailableActions: []*notifications.Action{
// {
// ID: "ack",
// Text: "OK",
// },
// },
// })
case updateSuccessPending:
msg := fmt.Sprintf(
`%d updates are available for download:
// case updateSuccessPending:
// msg := fmt.Sprintf(
// `%d updates are available for download:
- %s
// - %s
Press "Download Now" to download and automatically apply all pending updates. You will be notified of important updates that need restarting.`,
len(updateState.PendingDownload),
strings.Join(updateState.PendingDownload, "\n- "),
)
// Press "Download Now" to download and automatically apply all pending updates. You will be notified of important updates that need restarting.`,
// len(updateState.PendingDownload),
// strings.Join(updateState.PendingDownload, "\n- "),
// )
notifications.Notify(&notifications.Notification{
EventID: updateSuccess,
Type: notifications.Info,
Title: fmt.Sprintf("%d Updates Available", len(updateState.PendingDownload)),
Message: msg,
AvailableActions: []*notifications.Action{
{
ID: "ack",
Text: "OK",
},
{
ID: "download",
Text: "Download Now",
Type: notifications.ActionTypeWebhook,
Payload: &notifications.ActionTypeWebhookPayload{
URL: apiPathCheckForUpdates + "?download",
ResultAction: "display",
},
},
},
})
// notifications.Notify(&notifications.Notification{
// EventID: updateSuccess,
// Type: notifications.Info,
// Title: fmt.Sprintf("%d Updates Available", len(updateState.PendingDownload)),
// Message: msg,
// AvailableActions: []*notifications.Action{
// {
// ID: "ack",
// Text: "OK",
// },
// {
// ID: "download",
// Text: "Download Now",
// Type: notifications.ActionTypeWebhook,
// Payload: &notifications.ActionTypeWebhookPayload{
// URL: apiPathCheckForUpdates + "?download",
// ResultAction: "display",
// },
// },
// },
// })
case updateSuccessDownloaded:
msg := fmt.Sprintf(
`%d updates were downloaded and applied:
// case updateSuccessDownloaded:
// msg := fmt.Sprintf(
// `%d updates were downloaded and applied:
- %s
// - %s
%s
`,
len(updateState.LastDownload),
strings.Join(updateState.LastDownload, "\n- "),
getUpdatingInfoMsg(),
)
// %s
// `,
// len(updateState.LastDownload),
// strings.Join(updateState.LastDownload, "\n- "),
// getUpdatingInfoMsg(),
// )
notifications.Notify(&notifications.Notification{
EventID: updateSuccess,
Type: notifications.Info,
Title: fmt.Sprintf("%d Updates Applied", len(updateState.LastDownload)),
Message: msg,
Expires: time.Now().Add(1 * time.Minute).Unix(),
AvailableActions: []*notifications.Action{
{
ID: "ack",
Text: "OK",
},
},
})
// notifications.Notify(&notifications.Notification{
// EventID: updateSuccess,
// Type: notifications.Info,
// Title: fmt.Sprintf("%d Updates Applied", len(updateState.LastDownload)),
// Message: msg,
// Expires: time.Now().Add(1 * time.Minute).Unix(),
// AvailableActions: []*notifications.Action{
// {
// ID: "ack",
// Text: "OK",
// },
// },
// })
}
}
// }
// }
func getUpdatingInfoMsg() string {
switch {
@@ -140,41 +136,41 @@ func getUpdatingInfoMsg() string {
}
}
func notifyUpdateCheckFailed(force bool, err error) {
if !module.notificationsEnabled() {
return
}
// func notifyUpdateCheckFailed(force bool, err error) {
// if !module.notificationsEnabled() {
// return
// }
failedCnt := updateFailedCnt.Add(1)
lastSuccess := registry.GetState().Updates.LastSuccessAt
// failedCnt := updateFailedCnt.Add(1)
// lastSuccess := registry.GetState().Updates.LastSuccessAt
switch {
case force:
// Always show notification if update was manually triggered.
case failedCnt < failedUpdateNotifyCountThreshold:
// Not failed often enough for notification.
return
case lastSuccess == nil:
// No recorded successful update.
case time.Now().Add(-failedUpdateNotifyDurationThreshold).Before(*lastSuccess):
// Failed too recently for notification.
return
}
// switch {
// case force:
// // Always show notification if update was manually triggered.
// case failedCnt < failedUpdateNotifyCountThreshold:
// // Not failed often enough for notification.
// return
// case lastSuccess == nil:
// // No recorded successful update.
// case time.Now().Add(-failedUpdateNotifyDurationThreshold).Before(*lastSuccess):
// // Failed too recently for notification.
// return
// }
notifications.NotifyWarn(
updateFailed,
"Update Check Failed",
fmt.Sprintf(
"Portmaster failed to check for updates. This might be a temporary issue of your device, your network or the update servers. The Portmaster will automatically try again later. The error was: %s",
err,
),
notifications.Action{
Text: "Try Again Now",
Type: notifications.ActionTypeWebhook,
Payload: &notifications.ActionTypeWebhookPayload{
URL: apiPathCheckForUpdates,
ResultAction: "display",
},
},
).SyncWithState(module.states)
}
// notifications.NotifyWarn(
// updateFailed,
// "Update Check Failed",
// fmt.Sprintf(
// "Portmaster failed to check for updates. This might be a temporary issue of your device, your network or the update servers. The Portmaster will automatically try again later. The error was: %s",
// err,
// ),
// notifications.Action{
// Text: "Try Again Now",
// Type: notifications.ActionTypeWebhook,
// Payload: &notifications.ActionTypeWebhookPayload{
// URL: apiPathCheckForUpdates,
// ResultAction: "display",
// },
// },
// ).SyncWithState(module.states)
// }

View File

@@ -1,204 +1,201 @@
package updates
import (
"bytes"
"crypto/sha256"
_ "embed"
"encoding/hex"
"errors"
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
// import (
// "crypto/sha256"
// _ "embed"
// "encoding/hex"
// "errors"
// "fmt"
// "io/fs"
// "os"
// "path/filepath"
"github.com/tevino/abool"
"golang.org/x/exp/slices"
// "github.com/tevino/abool"
// "golang.org/x/exp/slices"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/utils/renameio"
)
// "github.com/safing/portmaster/base/dataroot"
// "github.com/safing/portmaster/base/log"
// )
var (
portmasterCoreServiceFilePath = "portmaster.service"
portmasterNotifierServiceFilePath = "portmaster_notifier.desktop"
backupExtension = ".backup"
// var (
// portmasterCoreServiceFilePath = "portmaster.service"
// portmasterNotifierServiceFilePath = "portmaster_notifier.desktop"
// backupExtension = ".backup"
//go:embed assets/portmaster.service
currentPortmasterCoreServiceFile []byte
// //go:embed assets/portmaster.service
// currentPortmasterCoreServiceFile []byte
checkedSystemIntegration = abool.New()
// checkedSystemIntegration = abool.New()
// ErrRequiresManualUpgrade is returned when a system integration file requires a manual upgrade.
ErrRequiresManualUpgrade = errors.New("requires a manual upgrade")
)
// // ErrRequiresManualUpgrade is returned when a system integration file requires a manual upgrade.
// ErrRequiresManualUpgrade = errors.New("requires a manual upgrade")
// )
func upgradeSystemIntegration() {
// Check if we already checked the system integration.
if !checkedSystemIntegration.SetToIf(false, true) {
return
}
// func upgradeSystemIntegration() {
// // Check if we already checked the system integration.
// if !checkedSystemIntegration.SetToIf(false, true) {
// return
// }
// Upgrade portmaster core systemd service.
err := upgradeSystemIntegrationFile(
"portmaster core systemd service",
filepath.Join(dataroot.Root().Path, portmasterCoreServiceFilePath),
0o0600,
currentPortmasterCoreServiceFile,
[]string{
"bc26dd37e6953af018ad3676ee77570070e075f2b9f5df6fa59d65651a481468", // Commit 19c76c7 on 2022-01-25
"cc0cb49324dfe11577e8c066dd95cc03d745b50b2153f32f74ca35234c3e8cb5", // Commit ef479e5 on 2022-01-24
"d08a3b5f3aee351f8e120e6e2e0a089964b94c9e9d0a9e5fa822e60880e315fd", // Commit b64735e on 2021-12-07
},
)
if err != nil {
log.Warningf("updates: %s", err)
return
}
// // Upgrade portmaster core systemd service.
// err := upgradeSystemIntegrationFile(
// "portmaster core systemd service",
// filepath.Join(dataroot.Root().Path, portmasterCoreServiceFilePath),
// 0o0600,
// currentPortmasterCoreServiceFile,
// []string{
// "bc26dd37e6953af018ad3676ee77570070e075f2b9f5df6fa59d65651a481468", // Commit 19c76c7 on 2022-01-25
// "cc0cb49324dfe11577e8c066dd95cc03d745b50b2153f32f74ca35234c3e8cb5", // Commit ef479e5 on 2022-01-24
// "d08a3b5f3aee351f8e120e6e2e0a089964b94c9e9d0a9e5fa822e60880e315fd", // Commit b64735e on 2021-12-07
// },
// )
// if err != nil {
// log.Warningf("updates: %s", err)
// return
// }
// Upgrade portmaster notifier systemd user service.
// Permissions only!
err = upgradeSystemIntegrationFile(
"portmaster notifier systemd user service",
filepath.Join(dataroot.Root().Path, portmasterNotifierServiceFilePath),
0o0644,
nil, // Do not update contents.
nil, // Do not update contents.
)
if err != nil {
log.Warningf("updates: %s", err)
return
}
}
// // Upgrade portmaster notifier systemd user service.
// // Permissions only!
// err = upgradeSystemIntegrationFile(
// "portmaster notifier systemd user service",
// filepath.Join(dataroot.Root().Path, portmasterNotifierServiceFilePath),
// 0o0644,
// nil, // Do not update contents.
// nil, // Do not update contents.
// )
// if err != nil {
// log.Warningf("updates: %s", err)
// return
// }
// }
// upgradeSystemIntegrationFile upgrades the file contents and permissions.
// System integration files are not necessarily present and may also be
// edited by third parties, such as the OS itself or other installers.
// The supplied hashes must be sha256 hex-encoded.
func upgradeSystemIntegrationFile(
name string,
filePath string,
fileMode fs.FileMode,
fileData []byte,
permittedUpgradeHashes []string,
) error {
// Upgrade file contents.
if len(fileData) > 0 {
if err := upgradeSystemIntegrationFileContents(name, filePath, fileData, permittedUpgradeHashes); err != nil {
return err
}
}
// // upgradeSystemIntegrationFile upgrades the file contents and permissions.
// // System integration files are not necessarily present and may also be
// // edited by third parties, such as the OS itself or other installers.
// // The supplied hashes must be sha256 hex-encoded.
// func upgradeSystemIntegrationFile(
// name string,
// filePath string,
// fileMode fs.FileMode,
// fileData []byte,
// permittedUpgradeHashes []string,
// ) error {
// // Upgrade file contents.
// if len(fileData) > 0 {
// if err := upgradeSystemIntegrationFileContents(name, filePath, fileData, permittedUpgradeHashes); err != nil {
// return err
// }
// }
// Upgrade file permissions.
if fileMode != 0 {
if err := upgradeSystemIntegrationFilePermissions(name, filePath, fileMode); err != nil {
return err
}
}
// // Upgrade file permissions.
// if fileMode != 0 {
// if err := upgradeSystemIntegrationFilePermissions(name, filePath, fileMode); err != nil {
// return err
// }
// }
return nil
}
// return nil
// }
// upgradeSystemIntegrationFileContents upgrades the file contents.
// System integration files are not necessarily present and may also be
// edited by third parties, such as the OS itself or other installers.
// The supplied hashes must be sha256 hex-encoded.
func upgradeSystemIntegrationFileContents(
name string,
filePath string,
fileData []byte,
permittedUpgradeHashes []string,
) error {
// Read existing file.
existingFileData, err := os.ReadFile(filePath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("failed to read %s at %s: %w", name, filePath, err)
}
// // upgradeSystemIntegrationFileContents upgrades the file contents.
// // System integration files are not necessarily present and may also be
// // edited by third parties, such as the OS itself or other installers.
// // The supplied hashes must be sha256 hex-encoded.
// func upgradeSystemIntegrationFileContents(
// name string,
// filePath string,
// fileData []byte,
// permittedUpgradeHashes []string,
// ) error {
// // Read existing file.
// existingFileData, err := os.ReadFile(filePath)
// if err != nil {
// if errors.Is(err, os.ErrNotExist) {
// return nil
// }
// return fmt.Errorf("failed to read %s at %s: %w", name, filePath, err)
// }
// Check if file is already the current version.
existingSum := sha256.Sum256(existingFileData)
existingHexSum := hex.EncodeToString(existingSum[:])
currentSum := sha256.Sum256(fileData)
currentHexSum := hex.EncodeToString(currentSum[:])
if existingHexSum == currentHexSum {
log.Debugf("updates: %s at %s is up to date", name, filePath)
return nil
}
// // Check if file is already the current version.
// existingSum := sha256.Sum256(existingFileData)
// existingHexSum := hex.EncodeToString(existingSum[:])
// currentSum := sha256.Sum256(fileData)
// currentHexSum := hex.EncodeToString(currentSum[:])
// if existingHexSum == currentHexSum {
// log.Debugf("updates: %s at %s is up to date", name, filePath)
// return nil
// }
// Check if we are allowed to upgrade from the existing file.
if !slices.Contains[[]string, string](permittedUpgradeHashes, existingHexSum) {
return fmt.Errorf("%s at %s (sha256:%s) %w, as it is not a previously published version and cannot be automatically upgraded - try installing again", name, filePath, existingHexSum, ErrRequiresManualUpgrade)
}
// // Check if we are allowed to upgrade from the existing file.
// if !slices.Contains[[]string, string](permittedUpgradeHashes, existingHexSum) {
// return fmt.Errorf("%s at %s (sha256:%s) %w, as it is not a previously published version and cannot be automatically upgraded - try installing again", name, filePath, existingHexSum, ErrRequiresManualUpgrade)
// }
// Start with upgrade!
// // Start with upgrade!
// Make backup of existing file.
err = CopyFile(filePath, filePath+backupExtension)
if err != nil {
return fmt.Errorf(
"failed to create backup of %s from %s to %s: %w",
name,
filePath,
filePath+backupExtension,
err,
)
}
// // Make backup of existing file.
// err = CopyFile(filePath, filePath+backupExtension)
// if err != nil {
// return fmt.Errorf(
// "failed to create backup of %s from %s to %s: %w",
// name,
// filePath,
// filePath+backupExtension,
// err,
// )
// }
// Open destination file for writing.
atomicDstFile, err := renameio.TempFile(registry.TmpDir().Path, filePath)
if err != nil {
return fmt.Errorf("failed to create tmp file to update %s at %s: %w", name, filePath, err)
}
defer atomicDstFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// // Open destination file for writing.
// // atomicDstFile, err := renameio.TempFile(registry.TmpDir().Path, filePath)
// // if err != nil {
// // return fmt.Errorf("failed to create tmp file to update %s at %s: %w", name, filePath, err)
// // }
// // defer atomicDstFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// Write file.
_, err = io.Copy(atomicDstFile, bytes.NewReader(fileData))
if err != nil {
return err
}
// // // Write file.
// // _, err = io.Copy(atomicDstFile, bytes.NewReader(fileData))
// // if err != nil {
// // return err
// // }
// Finalize file.
err = atomicDstFile.CloseAtomicallyReplace()
if err != nil {
return fmt.Errorf("failed to finalize update of %s at %s: %w", name, filePath, err)
}
// // // Finalize file.
// // err = atomicDstFile.CloseAtomicallyReplace()
// // if err != nil {
// // return fmt.Errorf("failed to finalize update of %s at %s: %w", name, filePath, err)
// // }
log.Warningf("updates: %s at %s was upgraded to %s - a reboot may be required", name, filePath, currentHexSum)
return nil
}
// log.Warningf("updates: %s at %s was upgraded to %s - a reboot may be required", name, filePath, currentHexSum)
// return nil
// }
// upgradeSystemIntegrationFilePermissions upgrades the file permissions.
// System integration files are not necessarily present and may also be
// edited by third parties, such as the OS itself or other installers.
func upgradeSystemIntegrationFilePermissions(
name string,
filePath string,
fileMode fs.FileMode,
) error {
// Get current file permissions.
stat, err := os.Stat(filePath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil
}
return fmt.Errorf("failed to read %s file metadata at %s: %w", name, filePath, err)
}
// // upgradeSystemIntegrationFilePermissions upgrades the file permissions.
// // System integration files are not necessarily present and may also be
// // edited by third parties, such as the OS itself or other installers.
// func upgradeSystemIntegrationFilePermissions(
// name string,
// filePath string,
// fileMode fs.FileMode,
// ) error {
// // Get current file permissions.
// stat, err := os.Stat(filePath)
// if err != nil {
// if errors.Is(err, os.ErrNotExist) {
// return nil
// }
// return fmt.Errorf("failed to read %s file metadata at %s: %w", name, filePath, err)
// }
// If permissions are as expected, do nothing.
if stat.Mode().Perm() == fileMode {
return nil
}
// // If permissions are as expected, do nothing.
// if stat.Mode().Perm() == fileMode {
// return nil
// }
// Otherwise, set correct permissions.
err = os.Chmod(filePath, fileMode)
if err != nil {
return fmt.Errorf("failed to update %s file permissions at %s: %w", name, filePath, err)
}
// // Otherwise, set correct permissions.
// err = os.Chmod(filePath, fileMode)
// if err != nil {
// return fmt.Errorf("failed to update %s file permissions at %s: %w", name, filePath, err)
// }
log.Warningf("updates: %s file permissions at %s updated to %v", name, filePath, fileMode)
return nil
}
// log.Warningf("updates: %s file permissions at %s updated to %v", name, filePath, fileMode)
// return nil
// }

View File

@@ -1 +0,0 @@
package updates

View File

@@ -1,4 +1,4 @@
package updates
package registry
import (
"archive/zip"
@@ -17,6 +17,12 @@ import (
"github.com/safing/portmaster/base/log"
)
const (
defaultFileMode = os.FileMode(0o0644)
executableFileMode = os.FileMode(0o0744)
defaultDirMode = os.FileMode(0o0755)
)
const MaxUnpackSize = 1 << 30 // 2^30 == 1GB
type Artifact struct {
@@ -29,40 +35,40 @@ type Artifact struct {
}
type Bundle struct {
dir string
Name string `json:"Bundle"`
Version string `json:"Version"`
Published time.Time `json:"Published"`
Artifacts []Artifact `json:"Artifacts"`
}
func (bundle Bundle) downloadAndVerify(dataDir string) {
func (bundle Bundle) downloadAndVerify() {
client := http.Client{}
for _, artifact := range bundle.Artifacts {
filePath := fmt.Sprintf("%s/%s", dataDir, artifact.Filename)
filePath := fmt.Sprintf("%s/%s", bundle.dir, artifact.Filename)
// TODO(vladimir): is this needed?
_ = os.MkdirAll(filepath.Dir(filePath), os.ModePerm)
_ = os.MkdirAll(filepath.Dir(filePath), defaultDirMode)
// Check file is already downloaded and valid.
exists, err := checkIfFileIsValid(filePath, artifact)
exists, _ := checkIfFileIsValid(filePath, artifact)
if exists {
log.Debugf("file already download: %s", filePath)
log.Debugf("updates: file already downloaded: %s", filePath)
continue
} else if err != nil {
log.Errorf("error while checking old download: %s", err)
}
// Download artifact
err = processArtifact(&client, artifact, filePath)
err := processArtifact(&client, artifact, filePath)
if err != nil {
log.Errorf("updates: %s", err)
}
}
}
func (bundle Bundle) Verify(dataDir string) error {
// Verify checks if the files are present int the dataDir and have the correct hash.
func (bundle Bundle) Verify() error {
for _, artifact := range bundle.Artifacts {
artifactPath := fmt.Sprintf("%s/%s", dataDir, artifact.Filename)
artifactPath := fmt.Sprintf("%s/%s", bundle.dir, artifact.Filename)
file, err := os.Open(artifactPath)
if err != nil {
return fmt.Errorf("failed to open file %s: %w", artifactPath, err)
@@ -86,8 +92,7 @@ func checkIfFileIsValid(filename string, artifact Artifact) (bool, error) {
// Check if file already exists
file, err := os.Open(filename)
if err != nil {
//nolint:nilerr
return false, nil
return false, err
}
defer func() { _ = file.Close() }()
@@ -131,7 +136,7 @@ func processArtifact(client *http.Client, artifact Artifact, filePath string) er
// Verify
hash := sha256.Sum256(content)
if !bytes.Equal(providedHash, hash[:]) {
// FIXME(vladimir): just for testing. Make it an error before commit.
// FIXME(vladimir): just for testing. Make it an error.
err = fmt.Errorf("failed to verify artifact: %s", artifact.Filename)
log.Debugf("updates: %s", err)
}
@@ -142,6 +147,11 @@ func processArtifact(client *http.Client, artifact Artifact, filePath string) er
if err != nil {
return fmt.Errorf("failed to create file: %w", err)
}
if artifact.Platform == "" {
_ = file.Chmod(defaultFileMode)
} else {
_ = file.Chmod(executableFileMode)
}
_, err = file.Write(content)
if err != nil {
return fmt.Errorf("failed to write to file: %w", err)

View File

@@ -0,0 +1,56 @@
package registry
import (
"fmt"
"io"
"net/http"
"os"
"github.com/safing/portmaster/base/log"
)
type UpdateIndex struct {
Directory string
DownloadDirectory string
Ignore []string
IndexURLs []string
IndexFile string
AutoApply bool
}
func (ui *UpdateIndex) downloadIndexFile() (err error) {
_ = os.MkdirAll(ui.DownloadDirectory, defaultDirMode)
for _, url := range ui.IndexURLs {
err = ui.downloadIndexFileFromURL(url)
if err != nil {
log.Warningf("updates: %s", err)
continue
}
// Downloading was successful.
err = nil
break
}
return
}
func (ui *UpdateIndex) downloadIndexFileFromURL(url string) error {
client := http.Client{}
resp, err := client.Get(url)
if err != nil {
return fmt.Errorf("failed a get request to %s: %w", url, err)
}
defer func() { _ = resp.Body.Close() }()
filePath := fmt.Sprintf("%s/%s", ui.DownloadDirectory, ui.IndexFile)
file, err := os.OpenFile(filePath, os.O_RDWR|os.O_CREATE, defaultFileMode)
if err != nil {
return err
}
defer func() { _ = file.Close() }()
_, err = io.Copy(file, resp.Body)
if err != nil {
return err
}
return nil
}

View File

@@ -0,0 +1,245 @@
package registry
import (
"encoding/json"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/safing/portmaster/base/log"
)
var ErrNotFound error = errors.New("file not found")
type File struct {
id string
path string
}
func (f *File) Identifier() string {
return f.id
}
func (f *File) Path() string {
return f.path
}
func (f *File) Version() string {
return ""
}
type Registry struct {
binaryUpdateIndex UpdateIndex
intelUpdateIndex UpdateIndex
binaryBundle *Bundle
intelBundle *Bundle
binaryUpdateBundle *Bundle
intelUpdateBundle *Bundle
files map[string]File
}
// New create new Registry.
func New(binIndex UpdateIndex, intelIndex UpdateIndex) Registry {
return Registry{
binaryUpdateIndex: binIndex,
intelUpdateIndex: intelIndex,
files: make(map[string]File),
}
}
// Initialize parses and initializes currently installed bundles.
func (reg *Registry) Initialize() error {
var err error
// Parse current installed binary bundle.
reg.binaryBundle, err = parseBundle(reg.binaryUpdateIndex.Directory, reg.binaryUpdateIndex.IndexFile)
if err != nil {
return fmt.Errorf("failed to parse binary bundle: %w", err)
}
// Parse current installed intel bundle.
reg.intelBundle, err = parseBundle(reg.intelUpdateIndex.Directory, reg.intelUpdateIndex.IndexFile)
if err != nil {
return fmt.Errorf("failed to parse intel bundle: %w", err)
}
// Add bundle artifacts to registry.
reg.processBundle(reg.binaryBundle)
reg.processBundle(reg.intelBundle)
return nil
}
func (reg *Registry) processBundle(bundle *Bundle) {
for _, artifact := range bundle.Artifacts {
artifactPath := fmt.Sprintf("%s/%s", bundle.dir, artifact.Filename)
reg.files[artifact.Filename] = File{id: artifact.Filename, path: artifactPath}
}
}
// GetFile returns the object of a artifact by id.
func (reg *Registry) GetFile(id string) (*File, error) {
file, ok := reg.files[id]
if ok {
return &file, nil
} else {
log.Errorf("updates: requested file id not found: %s", id)
return nil, ErrNotFound
}
}
// CheckForBinaryUpdates checks if there is a new binary bundle updates.
func (reg *Registry) CheckForBinaryUpdates() (bool, error) {
err := reg.binaryUpdateIndex.downloadIndexFile()
if err != nil {
return false, err
}
reg.binaryUpdateBundle, err = parseBundle(reg.binaryUpdateIndex.DownloadDirectory, reg.binaryUpdateIndex.IndexFile)
if err != nil {
return false, fmt.Errorf("failed to parse bundle file: %w", err)
}
// TODO(vladimir): Make a better check.
if reg.binaryBundle.Version != reg.binaryUpdateBundle.Version {
return true, nil
}
return false, nil
}
// DownloadBinaryUpdates downloads available binary updates.
func (reg *Registry) DownloadBinaryUpdates() error {
if reg.binaryUpdateBundle == nil {
// CheckForBinaryUpdates needs to be called before this.
return fmt.Errorf("no valid update bundle found")
}
_ = deleteUnfinishedDownloads(reg.binaryBundle.dir)
reg.binaryUpdateBundle.downloadAndVerify()
return nil
}
// CheckForIntelUpdates checks if there is a new intel data bundle updates.
func (reg *Registry) CheckForIntelUpdates() (bool, error) {
err := reg.intelUpdateIndex.downloadIndexFile()
if err != nil {
return false, err
}
reg.intelUpdateBundle, err = parseBundle(reg.intelUpdateIndex.DownloadDirectory, reg.intelUpdateIndex.IndexFile)
if err != nil {
return false, fmt.Errorf("failed to parse bundle file: %w", err)
}
// TODO(vladimir): Make a better check.
if reg.intelBundle.Version != reg.intelUpdateBundle.Version {
return true, nil
}
return false, nil
}
// DownloadIntelUpdates downloads available intel data updates.
func (reg *Registry) DownloadIntelUpdates() error {
if reg.intelUpdateBundle == nil {
// CheckForIntelUpdates needs to be called before this.
return fmt.Errorf("no valid update bundle found")
}
_ = deleteUnfinishedDownloads(reg.intelBundle.dir)
reg.intelUpdateBundle.downloadAndVerify()
return nil
}
// ApplyBinaryUpdates removes the current binary folder and replaces it with the downloaded one.
func (reg *Registry) ApplyBinaryUpdates() error {
bundle, err := parseBundle(reg.binaryUpdateIndex.DownloadDirectory, reg.binaryUpdateIndex.IndexFile)
if err != nil {
return fmt.Errorf("failed to parse index file: %w", err)
}
err = bundle.Verify()
if err != nil {
return fmt.Errorf("binary bundle is not valid: %w", err)
}
err = os.RemoveAll(reg.binaryUpdateIndex.Directory)
if err != nil {
return fmt.Errorf("failed to remove dir: %w", err)
}
err = os.Rename(reg.binaryUpdateIndex.DownloadDirectory, reg.binaryUpdateIndex.Directory)
if err != nil {
return fmt.Errorf("failed to move dir: %w", err)
}
return nil
}
// ApplyIntelUpdates removes the current intel folder and replaces it with the downloaded one.
func (reg *Registry) ApplyIntelUpdates() error {
bundle, err := parseBundle(reg.intelUpdateIndex.DownloadDirectory, reg.intelUpdateIndex.IndexFile)
if err != nil {
return fmt.Errorf("failed to parse index file: %w", err)
}
err = bundle.Verify()
if err != nil {
return fmt.Errorf("binary bundle is not valid: %w", err)
}
err = os.RemoveAll(reg.intelUpdateIndex.Directory)
if err != nil {
return fmt.Errorf("failed to remove dir: %w", err)
}
err = os.Rename(reg.intelUpdateIndex.DownloadDirectory, reg.intelUpdateIndex.Directory)
if err != nil {
return fmt.Errorf("failed to move dir: %w", err)
}
return nil
}
func parseBundle(dir string, indexFile string) (*Bundle, error) {
filepath := fmt.Sprintf("%s/%s", dir, indexFile)
// Check if the file exists.
file, err := os.Open(filepath)
if err != nil {
return nil, fmt.Errorf("failed to open index file: %w", err)
}
defer func() { _ = file.Close() }()
// Read
content, err := io.ReadAll(file)
if err != nil {
return nil, err
}
// Parse
var bundle Bundle
err = json.Unmarshal(content, &bundle)
if err != nil {
return nil, err
}
bundle.dir = dir
return &bundle, nil
}
func deleteUnfinishedDownloads(rootDir string) error {
return filepath.Walk(rootDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
// Check if the current file has the specified extension
if !info.IsDir() && strings.HasSuffix(info.Name(), ".download") {
log.Warningf("updates deleting unfinished: %s\n", path)
err := os.Remove(path)
if err != nil {
return fmt.Errorf("failed to delete file %s: %w", path, err)
}
}
return nil
})
}

View File

@@ -54,7 +54,7 @@ func DelayedRestart(delay time.Duration) {
// Schedule the restart task.
log.Warningf("updates: restart triggered, will execute in %s", delay)
restartAt := time.Now().Add(delay)
module.restartWorkerMgr.Delay(delay)
// module.restartWorkerMgr.Delay(delay)
// Set restartTime.
restartTimeLock.Lock()
@@ -68,23 +68,23 @@ func AbortRestart() {
log.Warningf("updates: restart aborted")
// Cancel schedule.
module.restartWorkerMgr.Delay(0)
// module.restartWorkerMgr.Delay(0)
}
}
// TriggerRestartIfPending triggers an automatic restart, if one is pending.
// This can be used to prepone a scheduled restart if the conditions are preferable.
func TriggerRestartIfPending() {
if restartPending.IsSet() {
module.restartWorkerMgr.Go()
}
// if restartPending.IsSet() {
// module.restartWorkerMgr.Go()
// }
}
// RestartNow immediately executes a restart.
// This only works if the process is managed by portmaster-start.
func RestartNow() {
restartPending.Set()
module.restartWorkerMgr.Go()
// module.restartWorkerMgr.Go()
}
func automaticRestart(w *mgr.WorkerCtx) error {
@@ -108,11 +108,11 @@ func automaticRestart(w *mgr.WorkerCtx) error {
}
// Set restart exit code.
if !rebooting {
module.instance.Restart()
} else {
module.instance.Shutdown()
}
// if !rebooting {
// module.instance.Restart()
// } else {
// module.instance.Shutdown()
// }
}
return nil

View File

@@ -1,49 +1,49 @@
package updates
import (
"github.com/safing/portmaster/base/database/record"
"github.com/safing/portmaster/base/runtime"
"github.com/safing/portmaster/base/updater"
)
// import (
// "github.com/safing/portmaster/base/database/record"
// "github.com/safing/portmaster/base/runtime"
// "github.com/safing/portmaster/base/updater"
// )
var pushRegistryStatusUpdate runtime.PushFunc
// var pushRegistryStatusUpdate runtime.PushFunc
// RegistryStateExport is a wrapper to export the registry state.
type RegistryStateExport struct {
record.Base
*updater.RegistryState
}
// // RegistryStateExport is a wrapper to export the registry state.
// type RegistryStateExport struct {
// record.Base
// *updater.RegistryState
// }
func exportRegistryState(s *updater.RegistryState) *RegistryStateExport {
if s == nil {
state := registry.GetState()
s = &state
}
// func exportRegistryState(s *updater.RegistryState) *RegistryStateExport {
// // if s == nil {
// // state := registry.GetState()
// // s = &state
// // }
export := &RegistryStateExport{
RegistryState: s,
}
// export := &RegistryStateExport{
// RegistryState: s,
// }
export.CreateMeta()
export.SetKey("runtime:core/updates/state")
// export.CreateMeta()
// export.SetKey("runtime:core/updates/state")
return export
}
// return export
// }
func pushRegistryState(s *updater.RegistryState) {
export := exportRegistryState(s)
pushRegistryStatusUpdate(export)
}
// func pushRegistryState(s *updater.RegistryState) {
// export := exportRegistryState(s)
// pushRegistryStatusUpdate(export)
// }
func registerRegistryStateProvider() (err error) {
registryStateProvider := runtime.SimpleValueGetterFunc(func(_ string) ([]record.Record, error) {
return []record.Record{exportRegistryState(nil)}, nil
})
// func registerRegistryStateProvider() (err error) {
// registryStateProvider := runtime.SimpleValueGetterFunc(func(_ string) ([]record.Record, error) {
// return []record.Record{exportRegistryState(nil)}, nil
// })
pushRegistryStatusUpdate, err = runtime.Register("core/updates/state", registryStateProvider)
if err != nil {
return err
}
// pushRegistryStatusUpdate, err = runtime.Register("core/updates/state", registryStateProvider)
// if err != nil {
// return err
// }
return nil
}
// return nil
// }

View File

@@ -1,406 +1,403 @@
package updates
import (
"context"
"fmt"
"io"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
// import (
// "context"
// "fmt"
// "os"
// "os/exec"
// "path/filepath"
// "regexp"
// "strings"
// "time"
processInfo "github.com/shirou/gopsutil/process"
"github.com/tevino/abool"
// processInfo "github.com/shirou/gopsutil/process"
// "github.com/tevino/abool"
"github.com/safing/portmaster/base/dataroot"
"github.com/safing/portmaster/base/info"
"github.com/safing/portmaster/base/log"
"github.com/safing/portmaster/base/notifications"
"github.com/safing/portmaster/base/rng"
"github.com/safing/portmaster/base/updater"
"github.com/safing/portmaster/base/utils/renameio"
"github.com/safing/portmaster/service/mgr"
"github.com/safing/portmaster/service/updates/helper"
)
// "github.com/safing/portmaster/base/dataroot"
// "github.com/safing/portmaster/base/info"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/notifications"
// "github.com/safing/portmaster/base/rng"
// "github.com/safing/portmaster/base/updater"
// "github.com/safing/portmaster/service/mgr"
// )
const (
upgradedSuffix = "-upgraded"
exeExt = ".exe"
)
// const (
// upgradedSuffix = "-upgraded"
// exeExt = ".exe"
// )
var (
upgraderActive = abool.NewBool(false)
// var (
// upgraderActive = abool.NewBool(false)
pmCtrlUpdate *updater.File
pmCoreUpdate *updater.File
// pmCtrlUpdate *updater.File
// pmCoreUpdate *updater.File
spnHubUpdate *updater.File
// spnHubUpdate *updater.File
rawVersionRegex = regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+b?\*?$`)
)
// rawVersionRegex = regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+b?\*?$`)
// )
func initUpgrader() error {
module.EventResourcesUpdated.AddCallback("run upgrades", upgrader)
return nil
}
// func initUpgrader() error {
// // module.EventResourcesUpdated.AddCallback("run upgrades", upgrader)
// return nil
// }
func upgrader(m *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// Lock runs, but discard additional runs.
if !upgraderActive.SetToIf(false, true) {
return false, nil
}
defer upgraderActive.SetTo(false)
// func upgrader(m *mgr.WorkerCtx, _ struct{}) (cancel bool, err error) {
// // Lock runs, but discard additional runs.
// if !upgraderActive.SetToIf(false, true) {
// return false, nil
// }
// defer upgraderActive.SetTo(false)
// Upgrade portmaster-start.
err = upgradePortmasterStart()
if err != nil {
log.Warningf("updates: failed to upgrade portmaster-start: %s", err)
}
// // Upgrade portmaster-start.
// err = upgradePortmasterStart()
// if err != nil {
// log.Warningf("updates: failed to upgrade portmaster-start: %s", err)
// }
// Upgrade based on binary.
binBaseName := strings.Split(filepath.Base(os.Args[0]), "_")[0]
switch binBaseName {
case "portmaster-core":
// Notify about upgrade.
if err := upgradeCoreNotify(); err != nil {
log.Warningf("updates: failed to notify about core upgrade: %s", err)
}
// // Upgrade based on binary.
// binBaseName := strings.Split(filepath.Base(os.Args[0]), "_")[0]
// switch binBaseName {
// case "portmaster-core":
// // Notify about upgrade.
// if err := upgradeCoreNotify(); err != nil {
// log.Warningf("updates: failed to notify about core upgrade: %s", err)
// }
// Fix chrome sandbox permissions.
if err := helper.EnsureChromeSandboxPermissions(registry); err != nil {
log.Warningf("updates: failed to handle electron upgrade: %s", err)
}
// // Fix chrome sandbox permissions.
// // if err := helper.EnsureChromeSandboxPermissions(registry); err != nil {
// // log.Warningf("updates: failed to handle electron upgrade: %s", err)
// // }
// Upgrade system integration.
upgradeSystemIntegration()
// // Upgrade system integration.
// upgradeSystemIntegration()
case "spn-hub":
// Trigger upgrade procedure.
if err := upgradeHub(); err != nil {
log.Warningf("updates: failed to initiate hub upgrade: %s", err)
}
}
// case "spn-hub":
// // Trigger upgrade procedure.
// if err := upgradeHub(); err != nil {
// log.Warningf("updates: failed to initiate hub upgrade: %s", err)
// }
// }
return false, nil
}
// return false, nil
// }
func upgradeCoreNotify() error {
if pmCoreUpdate != nil && !pmCoreUpdate.UpgradeAvailable() {
return nil
}
// func upgradeCoreNotify() error {
// if pmCoreUpdate != nil && !pmCoreUpdate.UpgradeAvailable() {
// return nil
// }
// make identifier
identifier := "core/portmaster-core" // identifier, use forward slash!
if onWindows {
identifier += exeExt
}
// // make identifier
// identifier := "core/portmaster-core" // identifier, use forward slash!
// if onWindows {
// identifier += exeExt
// }
// get newest portmaster-core
newFile, err := GetPlatformFile(identifier)
if err != nil {
return err
}
pmCoreUpdate = newFile
// // get newest portmaster-core
// // newFile, err := GetPlatformFile(identifier)
// // if err != nil {
// // return err
// // }
// // pmCoreUpdate = newFile
// check for new version
if info.VersionNumber() != pmCoreUpdate.Version() {
n := notifications.Notify(&notifications.Notification{
EventID: "updates:core-update-available",
Type: notifications.Info,
Title: fmt.Sprintf(
"Portmaster Update v%s Is Ready!",
pmCoreUpdate.Version(),
),
Category: "Core",
Message: fmt.Sprintf(
`A new Portmaster version is ready to go! Restart the Portmaster to upgrade to %s.`,
pmCoreUpdate.Version(),
),
ShowOnSystem: true,
AvailableActions: []*notifications.Action{
// TODO: Use special UI action in order to reload UI on restart.
{
ID: "restart",
Text: "Restart",
},
{
ID: "later",
Text: "Not now",
},
},
})
n.SetActionFunction(upgradeCoreNotifyActionHandler)
// // check for new version
// if info.VersionNumber() != pmCoreUpdate.Version() {
// n := notifications.Notify(&notifications.Notification{
// EventID: "updates:core-update-available",
// Type: notifications.Info,
// Title: fmt.Sprintf(
// "Portmaster Update v%s Is Ready!",
// pmCoreUpdate.Version(),
// ),
// Category: "Core",
// Message: fmt.Sprintf(
// `A new Portmaster version is ready to go! Restart the Portmaster to upgrade to %s.`,
// pmCoreUpdate.Version(),
// ),
// ShowOnSystem: true,
// AvailableActions: []*notifications.Action{
// // TODO: Use special UI action in order to reload UI on restart.
// {
// ID: "restart",
// Text: "Restart",
// },
// {
// ID: "later",
// Text: "Not now",
// },
// },
// })
// n.SetActionFunction(upgradeCoreNotifyActionHandler)
log.Debugf("updates: new portmaster version available, sending notification to user")
}
// log.Debugf("updates: new portmaster version available, sending notification to user")
// }
return nil
}
// return nil
// }
func upgradeCoreNotifyActionHandler(_ context.Context, n *notifications.Notification) error {
switch n.SelectedActionID {
case "restart":
log.Infof("updates: user triggered restart via core update notification")
RestartNow()
case "later":
n.Delete()
}
// func upgradeCoreNotifyActionHandler(_ context.Context, n *notifications.Notification) error {
// switch n.SelectedActionID {
// case "restart":
// log.Infof("updates: user triggered restart via core update notification")
// RestartNow()
// case "later":
// n.Delete()
// }
return nil
}
// return nil
// }
func upgradeHub() error {
if spnHubUpdate != nil && !spnHubUpdate.UpgradeAvailable() {
return nil
}
// func upgradeHub() error {
// if spnHubUpdate != nil && !spnHubUpdate.UpgradeAvailable() {
// return nil
// }
// Make identifier for getting file from updater.
identifier := "hub/spn-hub" // identifier, use forward slash!
if onWindows {
identifier += exeExt
}
// // Make identifier for getting file from updater.
// identifier := "hub/spn-hub" // identifier, use forward slash!
// if onWindows {
// identifier += exeExt
// }
// Get newest spn-hub file.
newFile, err := GetPlatformFile(identifier)
if err != nil {
return err
}
spnHubUpdate = newFile
// // Get newest spn-hub file.
// // newFile, err := GetPlatformFile(identifier)
// // if err != nil {
// // return err
// // }
// // spnHubUpdate = newFile
// Check if the new version is different.
if info.GetInfo().Version != spnHubUpdate.Version() {
// Get random delay with up to three hours.
delayMinutes, err := rng.Number(3 * 60)
if err != nil {
return err
}
// // Check if the new version is different.
// if info.GetInfo().Version != spnHubUpdate.Version() {
// // Get random delay with up to three hours.
// delayMinutes, err := rng.Number(3 * 60)
// if err != nil {
// return err
// }
// Delay restart for at least one hour for preparations.
DelayedRestart(time.Duration(delayMinutes+60) * time.Minute)
// // Delay restart for at least one hour for preparations.
// DelayedRestart(time.Duration(delayMinutes+60) * time.Minute)
// Increase update checks in order to detect aborts better.
// if !disableTaskSchedule {
module.updateWorkerMgr.Repeat(10 * time.Minute)
// }
} else {
AbortRestart()
// // Increase update checks in order to detect aborts better.
// // if !disableTaskSchedule {
// // module.updateBinaryWorkerMgr.Repeat(10 * time.Minute)
// // }
// } else {
// AbortRestart()
// Set update task schedule back to normal.
// if !disableTaskSchedule {
module.updateWorkerMgr.Repeat(updateTaskRepeatDuration)
// }
}
// // Set update task schedule back to normal.
// // if !disableTaskSchedule {
// // module.updateBinaryWorkerMgr.Repeat(updateTaskRepeatDuration)
// // }
// }
return nil
}
// return nil
// }
func upgradePortmasterStart() error {
filename := "portmaster-start"
if onWindows {
filename += exeExt
}
// func upgradePortmasterStart() error {
// filename := "portmaster-start"
// if onWindows {
// filename += exeExt
// }
// check if we can upgrade
if pmCtrlUpdate == nil || pmCtrlUpdate.UpgradeAvailable() {
// get newest portmaster-start
newFile, err := GetPlatformFile("start/" + filename) // identifier, use forward slash!
if err != nil {
return err
}
pmCtrlUpdate = newFile
} else {
return nil
}
// // check if we can upgrade
// if pmCtrlUpdate == nil || pmCtrlUpdate.UpgradeAvailable() {
// // get newest portmaster-start
// // newFile, err := GetPlatformFile("start/" + filename) // identifier, use forward slash!
// // if err != nil {
// // return err
// // }
// // pmCtrlUpdate = newFile
// } else {
// return nil
// }
// update portmaster-start in data root
rootPmStartPath := filepath.Join(dataroot.Root().Path, filename)
err := upgradeBinary(rootPmStartPath, pmCtrlUpdate)
if err != nil {
return err
}
// // update portmaster-start in data root
// rootPmStartPath := filepath.Join(dataroot.Root().Path, filename)
// err := upgradeBinary(rootPmStartPath, pmCtrlUpdate)
// if err != nil {
// return err
// }
return nil
}
// return nil
// }
func warnOnIncorrectParentPath() {
expectedFileName := "portmaster-start"
if onWindows {
expectedFileName += exeExt
}
// func warnOnIncorrectParentPath() {
// expectedFileName := "portmaster-start"
// if onWindows {
// expectedFileName += exeExt
// }
// upgrade parent process, if it's portmaster-start
parent, err := processInfo.NewProcess(int32(os.Getppid()))
if err != nil {
log.Tracef("could not get parent process: %s", err)
return
}
parentName, err := parent.Name()
if err != nil {
log.Tracef("could not get parent process name: %s", err)
return
}
if parentName != expectedFileName {
// Only warn about this if not in dev mode.
if !devMode() {
log.Warningf("updates: parent process does not seem to be portmaster-start, name is %s", parentName)
}
// // upgrade parent process, if it's portmaster-start
// parent, err := processInfo.NewProcess(int32(os.Getppid()))
// if err != nil {
// log.Tracef("could not get parent process: %s", err)
// return
// }
// parentName, err := parent.Name()
// if err != nil {
// log.Tracef("could not get parent process name: %s", err)
// return
// }
// if parentName != expectedFileName {
// // Only warn about this if not in dev mode.
// if !devMode() {
// log.Warningf("updates: parent process does not seem to be portmaster-start, name is %s", parentName)
// }
// TODO(ppacher): once we released a new installer and folks had time
// to update we should send a module warning/hint to the
// UI notifying the user that he's still using portmaster-control.
return
}
// // TODO(ppacher): once we released a new installer and folks had time
// // to update we should send a module warning/hint to the
// // UI notifying the user that he's still using portmaster-control.
// return
// }
parentPath, err := parent.Exe()
if err != nil {
log.Tracef("could not get parent process path: %s", err)
return
}
// // parentPath, err := parent.Exe()
// // if err != nil {
// // log.Tracef("could not get parent process path: %s", err)
// // return
// // }
absPath, err := filepath.Abs(parentPath)
if err != nil {
log.Tracef("could not get absolut parent process path: %s", err)
return
}
// // absPath, err := filepath.Abs(parentPath)
// // if err != nil {
// // log.Tracef("could not get absolut parent process path: %s", err)
// // return
// // }
root := filepath.Dir(registry.StorageDir().Path)
if !strings.HasPrefix(absPath, root) {
log.Warningf("detected unexpected path %s for portmaster-start", absPath)
notifications.NotifyWarn(
"updates:unsupported-parent",
"Unsupported Launcher",
fmt.Sprintf(
"The Portmaster has been launched by an unexpected %s binary at %s. Please configure your system to use the binary at %s as this version will be kept up to date automatically.",
expectedFileName,
absPath,
filepath.Join(root, expectedFileName),
),
)
}
}
// // root := filepath.Dir(registry.StorageDir().Path)
// // if !strings.HasPrefix(absPath, root) {
// // log.Warningf("detected unexpected path %s for portmaster-start", absPath)
// // notifications.NotifyWarn(
// // "updates:unsupported-parent",
// // "Unsupported Launcher",
// // fmt.Sprintf(
// // "The Portmaster has been launched by an unexpected %s binary at %s. Please configure your system to use the binary at %s as this version will be kept up to date automatically.",
// // expectedFileName,
// // absPath,
// // filepath.Join(root, expectedFileName),
// // ),
// // )
// // }
// }
func upgradeBinary(fileToUpgrade string, file *updater.File) error {
fileExists := false
_, err := os.Stat(fileToUpgrade)
if err == nil {
// file exists and is accessible
fileExists = true
}
// func upgradeBinary(fileToUpgrade string, file *updater.File) error {
// fileExists := false
// _, err := os.Stat(fileToUpgrade)
// if err == nil {
// // file exists and is accessible
// fileExists = true
// }
if fileExists {
// get current version
var currentVersion string
cmd := exec.Command(fileToUpgrade, "version", "--short")
out, err := cmd.Output()
if err == nil {
// abort if version matches
currentVersion = strings.Trim(strings.TrimSpace(string(out)), "*")
if currentVersion == file.Version() {
log.Debugf("updates: %s is already v%s", fileToUpgrade, file.Version())
// already up to date!
return nil
}
} else {
log.Warningf("updates: failed to run %s to get version for upgrade check: %s", fileToUpgrade, err)
currentVersion = "0.0.0"
}
// if fileExists {
// // get current version
// var currentVersion string
// cmd := exec.Command(fileToUpgrade, "version", "--short")
// out, err := cmd.Output()
// if err == nil {
// // abort if version matches
// currentVersion = strings.Trim(strings.TrimSpace(string(out)), "*")
// if currentVersion == file.Version() {
// log.Debugf("updates: %s is already v%s", fileToUpgrade, file.Version())
// // already up to date!
// return nil
// }
// } else {
// log.Warningf("updates: failed to run %s to get version for upgrade check: %s", fileToUpgrade, err)
// currentVersion = "0.0.0"
// }
// test currentVersion for sanity
if !rawVersionRegex.MatchString(currentVersion) {
log.Debugf("updates: version string returned by %s is invalid: %s", fileToUpgrade, currentVersion)
}
// // test currentVersion for sanity
// if !rawVersionRegex.MatchString(currentVersion) {
// log.Debugf("updates: version string returned by %s is invalid: %s", fileToUpgrade, currentVersion)
// }
// try removing old version
err = os.Remove(fileToUpgrade)
if err != nil {
// ensure tmp dir is here
err = registry.TmpDir().Ensure()
if err != nil {
return fmt.Errorf("could not prepare tmp directory for moving file that needs upgrade: %w", err)
}
// // try removing old version
// err = os.Remove(fileToUpgrade)
// if err != nil {
// // ensure tmp dir is here
// // err = registry.TmpDir().Ensure()
// // if err != nil {
// // return fmt.Errorf("could not prepare tmp directory for moving file that needs upgrade: %w", err)
// // }
// maybe we're on windows and it's in use, try moving
err = os.Rename(fileToUpgrade, filepath.Join(
registry.TmpDir().Path,
fmt.Sprintf(
"%s-%d%s",
filepath.Base(fileToUpgrade),
time.Now().UTC().Unix(),
upgradedSuffix,
),
))
if err != nil {
return fmt.Errorf("unable to move file that needs upgrade: %w", err)
}
}
}
// // maybe we're on windows and it's in use, try moving
// // err = os.Rename(fileToUpgrade, filepath.Join(
// // registry.TmpDir().Path,
// // fmt.Sprintf(
// // "%s-%d%s",
// // filepath.Base(fileToUpgrade),
// // time.Now().UTC().Unix(),
// // upgradedSuffix,
// // ),
// // ))
// // if err != nil {
// // return fmt.Errorf("unable to move file that needs upgrade: %w", err)
// // }
// }
// }
// copy upgrade
err = CopyFile(file.Path(), fileToUpgrade)
if err != nil {
// try again
time.Sleep(1 * time.Second)
err = CopyFile(file.Path(), fileToUpgrade)
if err != nil {
return err
}
}
// // copy upgrade
// err = CopyFile(file.Path(), fileToUpgrade)
// if err != nil {
// // try again
// time.Sleep(1 * time.Second)
// err = CopyFile(file.Path(), fileToUpgrade)
// if err != nil {
// return err
// }
// }
// check permissions
if !onWindows {
info, err := os.Stat(fileToUpgrade)
if err != nil {
return fmt.Errorf("failed to get file info on %s: %w", fileToUpgrade, err)
}
if info.Mode() != 0o0755 {
err := os.Chmod(fileToUpgrade, 0o0755) //nolint:gosec // Set execute permissions.
if err != nil {
return fmt.Errorf("failed to set permissions on %s: %w", fileToUpgrade, err)
}
}
}
// // check permissions
// if !onWindows {
// info, err := os.Stat(fileToUpgrade)
// if err != nil {
// return fmt.Errorf("failed to get file info on %s: %w", fileToUpgrade, err)
// }
// if info.Mode() != 0o0755 {
// err := os.Chmod(fileToUpgrade, 0o0755) //nolint:gosec // Set execute permissions.
// if err != nil {
// return fmt.Errorf("failed to set permissions on %s: %w", fileToUpgrade, err)
// }
// }
// }
log.Infof("updates: upgraded %s to v%s", fileToUpgrade, file.Version())
return nil
}
// log.Infof("updates: upgraded %s to v%s", fileToUpgrade, file.Version())
// return nil
// }
// CopyFile atomically copies a file using the update registry's tmp dir.
func CopyFile(srcPath, dstPath string) error {
// check tmp dir
err := registry.TmpDir().Ensure()
if err != nil {
return fmt.Errorf("could not prepare tmp directory for copying file: %w", err)
}
// // CopyFile atomically copies a file using the update registry's tmp dir.
// func CopyFile(srcPath, dstPath string) error {
// // check tmp dir
// // err := registry.TmpDir().Ensure()
// // if err != nil {
// // return fmt.Errorf("could not prepare tmp directory for copying file: %w", err)
// // }
// open file for writing
atomicDstFile, err := renameio.TempFile(registry.TmpDir().Path, dstPath)
if err != nil {
return fmt.Errorf("could not create temp file for atomic copy: %w", err)
}
defer atomicDstFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// // open file for writing
// // atomicDstFile, err := renameio.TempFile(registry.TmpDir().Path, dstPath)
// // if err != nil {
// // return fmt.Errorf("could not create temp file for atomic copy: %w", err)
// // }
// // defer atomicDstFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// open source
srcFile, err := os.Open(srcPath)
if err != nil {
return err
}
defer func() {
_ = srcFile.Close()
}()
// // // open source
// // srcFile, err := os.Open(srcPath)
// // if err != nil {
// // return err
// // }
// // defer func() {
// // _ = srcFile.Close()
// // }()
// copy data
_, err = io.Copy(atomicDstFile, srcFile)
if err != nil {
return err
}
// // // copy data
// // _, err = io.Copy(atomicDstFile, srcFile)
// // if err != nil {
// // return err
// // }
// finalize file
err = atomicDstFile.CloseAtomicallyReplace()
if err != nil {
return fmt.Errorf("updates: failed to finalize copy to file %s: %w", dstPath, err)
}
// // // finalize file
// // err = atomicDstFile.CloseAtomicallyReplace()
// // if err != nil {
// // return fmt.Errorf("updates: failed to finalize copy to file %s: %w", dstPath, err)
// // }
return nil
}
// return nil
// }