[WIP] Remove old code

This commit is contained in:
Vladimir Stoilov
2024-09-25 17:42:05 +03:00
parent 08830f29c6
commit 7cf20b2cf8
21 changed files with 0 additions and 3088 deletions

View File

@@ -1,2 +0,0 @@
// Package updater is an update registry that manages updates and versions.
package updater

View File

@@ -1,15 +0,0 @@
package updater
// // Export exports the list of resources.
// func (reg *ResourceRegistry) Export() map[string]*Resource {
// reg.RLock()
// defer reg.RUnlock()
// // copy the map
// copiedResources := make(map[string]*Resource)
// for key, val := range reg.resources {
// copiedResources[key] = val.Export()
// }
// return copiedResources
// }

View File

@@ -1,347 +0,0 @@
package updater
// import (
// "bytes"
// "context"
// "errors"
// "fmt"
// "hash"
// "io"
// "net/http"
// "net/url"
// "os"
// "path"
// "path/filepath"
// "time"
// "github.com/safing/jess/filesig"
// "github.com/safing/jess/lhash"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils/renameio"
// )
// func (reg *ResourceRegistry) fetchFile(ctx context.Context, client *http.Client, rv *ResourceVersion, tries int) error {
// // backoff when retrying
// if tries > 0 {
// select {
// case <-ctx.Done():
// return nil // module is shutting down
// case <-time.After(time.Duration(tries*tries) * time.Second):
// }
// }
// // check destination dir
// dirPath := filepath.Dir(rv.storagePath())
// err := reg.storageDir.EnsureAbsPath(dirPath)
// if err != nil {
// return fmt.Errorf("could not create updates folder: %s", dirPath)
// }
// // If verification is enabled, download signature first.
// var (
// verifiedHash *lhash.LabeledHash
// sigFileData []byte
// )
// if rv.resource.VerificationOptions != nil {
// verifiedHash, sigFileData, err = reg.fetchAndVerifySigFile(
// ctx, client,
// rv.resource.VerificationOptions,
// rv.versionedSigPath(), rv.SigningMetadata(),
// tries,
// )
// if err != nil {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("signature verification failed: %w", err)
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
// }
// }
// }
// // open file for writing
// atomicFile, err := renameio.TempFile(reg.tmpDir.Path, rv.storagePath())
// if err != nil {
// return fmt.Errorf("could not create temp file for download: %w", err)
// }
// defer atomicFile.Cleanup() //nolint:errcheck // ignore error for now, tmp dir will be cleaned later again anyway
// // start file download
// resp, downloadURL, err := reg.makeRequest(ctx, client, rv.versionedPath(), tries)
// if err != nil {
// return err
// }
// defer func() {
// _ = resp.Body.Close()
// }()
// // Write to the hasher at the same time, if needed.
// var hasher hash.Hash
// var writeDst io.Writer = atomicFile
// if verifiedHash != nil {
// hasher = verifiedHash.Algorithm().RawHasher()
// writeDst = io.MultiWriter(hasher, atomicFile)
// }
// // Download and write file.
// n, err := io.Copy(writeDst, resp.Body)
// if err != nil {
// return fmt.Errorf("failed to download %q: %w", downloadURL, err)
// }
// if resp.ContentLength != n {
// return fmt.Errorf("failed to finish download of %q: written %d out of %d bytes", downloadURL, n, resp.ContentLength)
// }
// // Before file is finalized, check if hash, if available.
// if hasher != nil {
// downloadDigest := hasher.Sum(nil)
// if verifiedHash.EqualRaw(downloadDigest) {
// log.Infof("%s: verified signature of %s", reg.Name, downloadURL)
// } else {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return errors.New("file does not match signed checksum")
// case SignaturePolicyWarn:
// log.Warningf("%s: checksum does not match file from %s", reg.Name, downloadURL)
// case SignaturePolicyDisable:
// log.Debugf("%s: checksum does not match file from %s", reg.Name, downloadURL)
// }
// // Reset hasher to signal that the sig should not be written.
// hasher = nil
// }
// }
// // Write signature file, if we have one and if verification succeeded.
// if len(sigFileData) > 0 && hasher != nil {
// sigFilePath := rv.storagePath() + filesig.Extension
// err := os.WriteFile(sigFilePath, sigFileData, 0o0644) //nolint:gosec
// if err != nil {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("failed to write signature file %s: %w", sigFilePath, err)
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to write signature file %s: %s", reg.Name, sigFilePath, err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to write signature file %s: %s", reg.Name, sigFilePath, err)
// }
// }
// }
// // finalize file
// err = atomicFile.CloseAtomicallyReplace()
// if err != nil {
// return fmt.Errorf("%s: failed to finalize file %s: %w", reg.Name, rv.storagePath(), err)
// }
// // set permissions
// if !onWindows {
// // TODO: only set executable files to 0755, set other to 0644
// err = os.Chmod(rv.storagePath(), 0o0755) //nolint:gosec // See TODO above.
// if err != nil {
// log.Warningf("%s: failed to set permissions on downloaded file %s: %s", reg.Name, rv.storagePath(), err)
// }
// }
// log.Debugf("%s: fetched %s and stored to %s", reg.Name, downloadURL, rv.storagePath())
// return nil
// }
// func (reg *ResourceRegistry) fetchMissingSig(ctx context.Context, client *http.Client, rv *ResourceVersion, tries int) error {
// // backoff when retrying
// if tries > 0 {
// select {
// case <-ctx.Done():
// return nil // module is shutting down
// case <-time.After(time.Duration(tries*tries) * time.Second):
// }
// }
// // Check destination dir.
// dirPath := filepath.Dir(rv.storagePath())
// err := reg.storageDir.EnsureAbsPath(dirPath)
// if err != nil {
// return fmt.Errorf("could not create updates folder: %s", dirPath)
// }
// // Download and verify the missing signature.
// verifiedHash, sigFileData, err := reg.fetchAndVerifySigFile(
// ctx, client,
// rv.resource.VerificationOptions,
// rv.versionedSigPath(), rv.SigningMetadata(),
// tries,
// )
// if err != nil {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("signature verification failed: %w", err)
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to verify downloaded signature of %s: %s", reg.Name, rv.versionedPath(), err)
// }
// return nil
// }
// // Check if the signature matches the resource file.
// ok, err := verifiedHash.MatchesFile(rv.storagePath())
// if err != nil {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("error while verifying resource file: %w", err)
// case SignaturePolicyWarn:
// log.Warningf("%s: error while verifying resource file %s", reg.Name, rv.storagePath())
// case SignaturePolicyDisable:
// log.Debugf("%s: error while verifying resource file %s", reg.Name, rv.storagePath())
// }
// return nil
// }
// if !ok {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return errors.New("resource file does not match signed checksum")
// case SignaturePolicyWarn:
// log.Warningf("%s: checksum does not match resource file from %s", reg.Name, rv.storagePath())
// case SignaturePolicyDisable:
// log.Debugf("%s: checksum does not match resource file from %s", reg.Name, rv.storagePath())
// }
// return nil
// }
// // Write signature file.
// err = os.WriteFile(rv.storageSigPath(), sigFileData, 0o0644) //nolint:gosec
// if err != nil {
// switch rv.resource.VerificationOptions.DownloadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("failed to write signature file %s: %w", rv.storageSigPath(), err)
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to write signature file %s: %s", reg.Name, rv.storageSigPath(), err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to write signature file %s: %s", reg.Name, rv.storageSigPath(), err)
// }
// }
// log.Debugf("%s: fetched %s and stored to %s", reg.Name, rv.versionedSigPath(), rv.storageSigPath())
// return nil
// }
// func (reg *ResourceRegistry) fetchAndVerifySigFile(ctx context.Context, client *http.Client, verifOpts *VerificationOptions, sigFilePath string, requiredMetadata map[string]string, tries int) (*lhash.LabeledHash, []byte, error) {
// // Download signature file.
// resp, _, err := reg.makeRequest(ctx, client, sigFilePath, tries)
// if err != nil {
// return nil, nil, err
// }
// defer func() {
// _ = resp.Body.Close()
// }()
// sigFileData, err := io.ReadAll(resp.Body)
// if err != nil {
// return nil, nil, err
// }
// // Extract all signatures.
// sigs, err := filesig.ParseSigFile(sigFileData)
// switch {
// case len(sigs) == 0 && err != nil:
// return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
// case len(sigs) == 0:
// return nil, nil, errors.New("no signatures found in signature file")
// case err != nil:
// return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
// }
// // Verify all signatures.
// var verifiedHash *lhash.LabeledHash
// for _, sig := range sigs {
// fd, err := filesig.VerifyFileData(
// sig,
// requiredMetadata,
// verifOpts.TrustStore,
// )
// if err != nil {
// return nil, sigFileData, err
// }
// // Save or check verified hash.
// if verifiedHash == nil {
// verifiedHash = fd.FileHash()
// } else if !fd.FileHash().Equal(verifiedHash) {
// // Return an error if two valid hashes mismatch.
// // For simplicity, all hash algorithms must be the same for now.
// return nil, sigFileData, errors.New("file hashes from different signatures do not match")
// }
// }
// return verifiedHash, sigFileData, nil
// }
// func (reg *ResourceRegistry) fetchData(ctx context.Context, client *http.Client, downloadPath string, tries int) (fileData []byte, downloadedFrom string, err error) {
// // backoff when retrying
// if tries > 0 {
// select {
// case <-ctx.Done():
// return nil, "", nil // module is shutting down
// case <-time.After(time.Duration(tries*tries) * time.Second):
// }
// }
// // start file download
// resp, downloadURL, err := reg.makeRequest(ctx, client, downloadPath, tries)
// if err != nil {
// return nil, downloadURL, err
// }
// defer func() {
// _ = resp.Body.Close()
// }()
// // download and write file
// buf := bytes.NewBuffer(make([]byte, 0, resp.ContentLength))
// n, err := io.Copy(buf, resp.Body)
// if err != nil {
// return nil, downloadURL, fmt.Errorf("failed to download %q: %w", downloadURL, err)
// }
// if resp.ContentLength != n {
// return nil, downloadURL, fmt.Errorf("failed to finish download of %q: written %d out of %d bytes", downloadURL, n, resp.ContentLength)
// }
// return buf.Bytes(), downloadURL, nil
// }
// func (reg *ResourceRegistry) makeRequest(ctx context.Context, client *http.Client, downloadPath string, tries int) (resp *http.Response, downloadURL string, err error) {
// // parse update URL
// updateBaseURL := reg.UpdateURLs[tries%len(reg.UpdateURLs)]
// u, err := url.Parse(updateBaseURL)
// if err != nil {
// return nil, "", fmt.Errorf("failed to parse update URL %q: %w", updateBaseURL, err)
// }
// // add download path
// u.Path = path.Join(u.Path, downloadPath)
// // compile URL
// downloadURL = u.String()
// // create request
// req, err := http.NewRequestWithContext(ctx, http.MethodGet, downloadURL, http.NoBody)
// if err != nil {
// return nil, "", fmt.Errorf("failed to create request for %q: %w", downloadURL, err)
// }
// // set user agent
// if reg.UserAgent != "" {
// req.Header.Set("User-Agent", reg.UserAgent)
// }
// // start request
// resp, err = client.Do(req)
// if err != nil {
// return nil, "", fmt.Errorf("failed to make request to %q: %w", downloadURL, err)
// }
// // check return code
// if resp.StatusCode != http.StatusOK {
// _ = resp.Body.Close()
// return nil, "", fmt.Errorf("failed to fetch %q: %d %s", downloadURL, resp.StatusCode, resp.Status)
// }
// return resp, downloadURL, err
// }

View File

@@ -1,148 +0,0 @@
package updater
import (
// semver "github.com/hashicorp/go-version"
)
// File represents a file from the update system.
// type File struct {
// resource *Resource
// version *ResourceVersion
// notifier *notifier
// versionedPath string
// storagePath string
// }
// // Identifier returns the identifier of the file.
// func (file *File) Identifier() string {
// return file.resource.Identifier
// }
// // Version returns the version of the file.
// func (file *File) Version() string {
// return file.version.VersionNumber
// }
// // SemVer returns the semantic version of the file.
// func (file *File) SemVer() *semver.Version {
// return file.version.semVer
// }
// // EqualsVersion normalizes the given version and checks equality with semver.
// func (file *File) EqualsVersion(version string) bool {
// return file.version.EqualsVersion(version)
// }
// // Path returns the absolute filepath of the file.
// func (file *File) Path() string {
// return file.storagePath
// }
// // SigningMetadata returns the metadata to be included in signatures.
// func (file *File) SigningMetadata() map[string]string {
// return map[string]string{
// "id": file.Identifier(),
// "version": file.Version(),
// }
// }
// Verify verifies the given file.
// func (file *File) Verify() ([]*filesig.FileData, error) {
// // Check if verification is configured.
// if file.resource.VerificationOptions == nil {
// return nil, ErrVerificationNotConfigured
// }
// // Verify file.
// fileData, err := filesig.VerifyFile(
// file.storagePath,
// file.storagePath+filesig.Extension,
// file.SigningMetadata(),
// file.resource.VerificationOptions.TrustStore,
// )
// if err != nil {
// switch file.resource.VerificationOptions.DiskLoadPolicy {
// case SignaturePolicyRequire:
// return nil, err
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to verify %s: %s", file.resource.registry.Name, file.storagePath, err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to verify %s: %s", file.resource.registry.Name, file.storagePath, err)
// }
// }
// return fileData, nil
// }
// Blacklist notifies the update system that this file is somehow broken, and should be ignored from now on, until restarted.
// func (file *File) Blacklist() error {
// return file.resource.Blacklist(file.version.VersionNumber)
// }
// markActiveWithLocking marks the file as active, locking the resource in the process.
// func (file *File) markActiveWithLocking() {
// file.resource.Lock()
// defer file.resource.Unlock()
// // update last used version
// if file.resource.ActiveVersion != file.version {
// log.Debugf("updater: setting active version of resource %s from %s to %s", file.resource.Identifier, file.resource.ActiveVersion, file.version.VersionNumber)
// file.resource.ActiveVersion = file.version
// }
// }
// Unpacker describes the function that is passed to
// File.Unpack. It receives a reader to the compressed/packed
// file and should return a reader that provides
// unpacked file contents. If the returned reader implements
// io.Closer it's close method is invoked when an error
// or io.EOF is returned from Read().
// type Unpacker func(io.Reader) (io.Reader, error)
// Unpack returns the path to the unpacked version of file and
// unpacks it on demand using unpacker.
// func (file *File) Unpack(suffix string, unpacker Unpacker) (string, error) {
// path := strings.TrimSuffix(file.Path(), suffix)
// if suffix == "" {
// path += "-unpacked"
// }
// _, err := os.Stat(path)
// if err == nil {
// return path, nil
// }
// if !errors.Is(err, fs.ErrNotExist) {
// return "", err
// }
// f, err := os.Open(file.Path())
// if err != nil {
// return "", err
// }
// defer func() {
// _ = f.Close()
// }()
// r, err := unpacker(f)
// if err != nil {
// return "", err
// }
// ioErr := utils.CreateAtomic(path, r, &utils.AtomicFileOptions{
// TempDir: file.resource.registry.TmpDir().Path,
// })
// if c, ok := r.(io.Closer); ok {
// if err := c.Close(); err != nil && ioErr == nil {
// // if ioErr is already set we ignore the error from
// // closing the unpacker.
// ioErr = err
// }
// }
// return path, ioErr
// }

View File

@@ -1,57 +0,0 @@
package updater
import (
"path"
"regexp"
"strings"
)
var (
fileVersionRegex = regexp.MustCompile(`_v[0-9]+-[0-9]+-[0-9]+(-[a-z]+)?`)
rawVersionRegex = regexp.MustCompile(`^[0-9]+\.[0-9]+\.[0-9]+(-[a-z]+)?$`)
)
// GetIdentifierAndVersion splits the given file path into its identifier and version.
func GetIdentifierAndVersion(versionedPath string) (identifier, version string, ok bool) {
dirPath, filename := path.Split(versionedPath)
// Extract version from filename.
rawVersion := fileVersionRegex.FindString(filename)
if rawVersion == "" {
// No version present in file, making it invalid.
return "", "", false
}
// Trim the `_v` that gets caught by the regex and
// replace `-` with `.` to get the version string.
version = strings.Replace(strings.TrimLeft(rawVersion, "_v"), "-", ".", 2)
// Put the filename back together without version.
i := strings.Index(filename, rawVersion)
if i < 0 {
// extracted version not in string (impossible)
return "", "", false
}
filename = filename[:i] + filename[i+len(rawVersion):]
// Put the full path back together and return it.
// `dirPath + filename` is guaranteed by path.Split()
return dirPath + filename, version, true
}
// GetVersionedPath combines the identifier and version and returns it as a file path.
func GetVersionedPath(identifier, version string) (versionedPath string) {
identifierPath, filename := path.Split(identifier)
// Split the filename where the version should go.
splittedFilename := strings.SplitN(filename, ".", 2)
// Replace `.` with `-` for the filename format.
transformedVersion := strings.Replace(version, ".", "-", 2)
// Put everything back together and return it.
versionedPath = identifierPath + splittedFilename[0] + "_v" + transformedVersion
if len(splittedFilename) > 1 {
versionedPath += "." + splittedFilename[1]
}
return versionedPath
}

View File

@@ -1,80 +0,0 @@
package updater
import (
"regexp"
"testing"
"github.com/stretchr/testify/assert"
)
func testRegexMatch(t *testing.T, testRegex *regexp.Regexp, testString string, shouldMatch bool) {
t.Helper()
if testRegex.MatchString(testString) != shouldMatch {
if shouldMatch {
t.Errorf("regex %s should match %s", testRegex, testString)
} else {
t.Errorf("regex %s should not match %s", testRegex, testString)
}
}
}
func testRegexFind(t *testing.T, testRegex *regexp.Regexp, testString string, shouldMatch bool) {
t.Helper()
if (testRegex.FindString(testString) != "") != shouldMatch {
if shouldMatch {
t.Errorf("regex %s should find %s", testRegex, testString)
} else {
t.Errorf("regex %s should not find %s", testRegex, testString)
}
}
}
func testVersionTransformation(t *testing.T, testFilename, testIdentifier, testVersion string) {
t.Helper()
identifier, version, ok := GetIdentifierAndVersion(testFilename)
if !ok {
t.Errorf("failed to get identifier and version of %s", testFilename)
}
assert.Equal(t, testIdentifier, identifier, "identifier does not match")
assert.Equal(t, testVersion, version, "version does not match")
versionedPath := GetVersionedPath(testIdentifier, testVersion)
assert.Equal(t, testFilename, versionedPath, "filename (versioned path) does not match")
}
func TestRegexes(t *testing.T) {
t.Parallel()
testRegexMatch(t, rawVersionRegex, "0.1.2", true)
testRegexMatch(t, rawVersionRegex, "0.1.2-beta", true)
testRegexMatch(t, rawVersionRegex, "0.1.2-staging", true)
testRegexMatch(t, rawVersionRegex, "12.13.14", true)
testRegexMatch(t, rawVersionRegex, "v0.1.2", false)
testRegexMatch(t, rawVersionRegex, "0.", false)
testRegexMatch(t, rawVersionRegex, "0.1", false)
testRegexMatch(t, rawVersionRegex, "0.1.", false)
testRegexMatch(t, rawVersionRegex, ".1.2", false)
testRegexMatch(t, rawVersionRegex, ".1.", false)
testRegexMatch(t, rawVersionRegex, "012345", false)
testRegexFind(t, fileVersionRegex, "/path/to/file_v0-0-0", true)
testRegexFind(t, fileVersionRegex, "/path/to/file_v1-2-3", true)
testRegexFind(t, fileVersionRegex, "/path/to/file_v1-2-3.exe", true)
testRegexFind(t, fileVersionRegex, "/path/to/file-v1-2-3", false)
testRegexFind(t, fileVersionRegex, "/path/to/file_v1.2.3", false)
testRegexFind(t, fileVersionRegex, "/path/to/file_1-2-3", false)
testRegexFind(t, fileVersionRegex, "/path/to/file_v1-2", false)
testRegexFind(t, fileVersionRegex, "/path/to/file-v1-2-3", false)
testVersionTransformation(t, "/path/to/file_v0-0-0", "/path/to/file", "0.0.0")
testVersionTransformation(t, "/path/to/file_v1-2-3", "/path/to/file", "1.2.3")
testVersionTransformation(t, "/path/to/file_v1-2-3-beta", "/path/to/file", "1.2.3-beta")
testVersionTransformation(t, "/path/to/file_v1-2-3-staging", "/path/to/file", "1.2.3-staging")
testVersionTransformation(t, "/path/to/file_v1-2-3.exe", "/path/to/file.exe", "1.2.3")
testVersionTransformation(t, "/path/to/file_v1-2-3-staging.exe", "/path/to/file.exe", "1.2.3-staging")
}

View File

@@ -1,87 +0,0 @@
package updater
import (
"errors"
)
// Errors returned by the updater package.
var (
ErrNotFound = errors.New("the requested file could not be found")
ErrNotAvailableLocally = errors.New("the requested file is not available locally")
ErrVerificationNotConfigured = errors.New("verification not configured for this resource")
)
// GetFile returns the selected (mostly newest) file with the given
// identifier or an error, if it fails.
// func (reg *ResourceRegistry) GetFile(identifier string) (*File, error) {
// return nil, fmt.Errorf("invalid file: %s", identifier)
// reg.RLock()
// res, ok := reg.resources[identifier]
// reg.RUnlock()
// if !ok {
// return nil, ErrNotFound
// }
// file := res.GetFile()
// // check if file is available locally
// if file.version.Available {
// file.markActiveWithLocking()
// // Verify file, if configured.
// _, err := file.Verify()
// if err != nil && !errors.Is(err, ErrVerificationNotConfigured) {
// // TODO: If verification is required, try deleting the resource and downloading it again.
// return nil, fmt.Errorf("failed to verify file: %w", err)
// }
// return file, nil
// }
// // check if online
// if !reg.Online {
// return nil, ErrNotAvailableLocally
// }
// // check download dir
// err := reg.tmpDir.Ensure()
// if err != nil {
// return nil, fmt.Errorf("could not prepare tmp directory for download: %w", err)
// }
// // Start registry operation.
// reg.state.StartOperation(StateFetching)
// defer reg.state.EndOperation()
// // download file
// log.Tracef("%s: starting download of %s", reg.Name, file.versionedPath)
// client := &http.Client{}
// for tries := range 5 {
// err = reg.fetchFile(context.TODO(), client, file.version, tries)
// if err != nil {
// log.Tracef("%s: failed to download %s: %s, retrying (%d)", reg.Name, file.versionedPath, err, tries+1)
// } else {
// file.markActiveWithLocking()
// // TODO: We just download the file - should we verify it again?
// return file, nil
// }
// }
// log.Warningf("%s: failed to download %s: %s", reg.Name, file.versionedPath, err)
// return nil, err
// }
// GetVersion returns the selected version of the given identifier.
// The returned resource version may not be modified.
// func (reg *ResourceRegistry) GetVersion(identifier string) (*ResourceVersion, error) {
// reg.RLock()
// res, ok := reg.resources[identifier]
// reg.RUnlock()
// if !ok {
// return nil, ErrNotFound
// }
// res.Lock()
// defer res.Unlock()
// return res.SelectedVersion, nil
// }

View File

@@ -1,109 +0,0 @@
package updater
import (
"encoding/json"
"errors"
"fmt"
"time"
)
const (
baseIndexExtension = ".json"
v2IndexExtension = ".v2.json"
)
// Index describes an index file pulled by the updater.
type Index struct {
// Path is the path to the index file
// on the update server.
Path string
// Channel holds the release channel name of the index.
// It must match the filename without extension.
Channel string
// PreRelease signifies that all versions of this index should be marked as
// pre-releases, no matter if the versions actually have a pre-release tag or
// not.
PreRelease bool
// AutoDownload specifies whether new versions should be automatically downloaded.
AutoDownload bool
// LastRelease holds the time of the last seen release of this index.
LastRelease time.Time
}
// IndexFile represents an index file.
type IndexFile struct {
Channel string
Published time.Time
Releases map[string]string
}
var (
// ErrIndexChecksumMismatch is returned when an index does not match its
// signed checksum.
ErrIndexChecksumMismatch = errors.New("index checksum does mot match signature")
// ErrIndexFromFuture is returned when an index is parsed with a
// Published timestamp that lies in the future.
ErrIndexFromFuture = errors.New("index is from the future")
// ErrIndexIsOlder is returned when an index is parsed with an older
// Published timestamp than the current Published timestamp.
ErrIndexIsOlder = errors.New("index is older than the current one")
// ErrIndexChannelMismatch is returned when an index is parsed with a
// different channel that the expected one.
ErrIndexChannelMismatch = errors.New("index does not match the expected channel")
)
// ParseIndexFile parses an index file and checks if it is valid.
func ParseIndexFile(indexData []byte, channel string, lastIndexRelease time.Time) (*IndexFile, error) {
// Load into struct.
indexFile := &IndexFile{}
err := json.Unmarshal(indexData, indexFile)
if err != nil {
return nil, fmt.Errorf("failed to parse signed index data: %w", err)
}
// Fallback to old format if there are no releases and no channel is defined.
// TODO: Remove in v1
if len(indexFile.Releases) == 0 && indexFile.Channel == "" {
return loadOldIndexFormat(indexData, channel)
}
// Check the index metadata.
switch {
case !indexFile.Published.IsZero() && time.Now().Before(indexFile.Published):
return indexFile, ErrIndexFromFuture
case !indexFile.Published.IsZero() &&
!lastIndexRelease.IsZero() &&
lastIndexRelease.After(indexFile.Published):
return indexFile, ErrIndexIsOlder
case channel != "" &&
indexFile.Channel != "" &&
channel != indexFile.Channel:
return indexFile, ErrIndexChannelMismatch
}
return indexFile, nil
}
func loadOldIndexFormat(indexData []byte, channel string) (*IndexFile, error) {
releases := make(map[string]string)
err := json.Unmarshal(indexData, &releases)
if err != nil {
return nil, err
}
return &IndexFile{
Channel: channel,
// Do NOT define `Published`, as this would break the "is newer" check.
Releases: releases,
}, nil
}

View File

@@ -1,57 +0,0 @@
package updater
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
var (
oldFormat = `{
"all/ui/modules/assets.zip": "0.3.0",
"all/ui/modules/portmaster.zip": "0.2.4",
"linux_amd64/core/portmaster-core": "0.8.13"
}`
newFormat = `{
"Channel": "stable",
"Published": "2022-01-02T00:00:00Z",
"Releases": {
"all/ui/modules/assets.zip": "0.3.0",
"all/ui/modules/portmaster.zip": "0.2.4",
"linux_amd64/core/portmaster-core": "0.8.13"
}
}`
formatTestChannel = "stable"
formatTestReleases = map[string]string{
"all/ui/modules/assets.zip": "0.3.0",
"all/ui/modules/portmaster.zip": "0.2.4",
"linux_amd64/core/portmaster-core": "0.8.13",
}
)
func TestIndexParsing(t *testing.T) {
t.Parallel()
lastRelease, err := time.Parse(time.RFC3339, "2022-01-01T00:00:00Z")
if err != nil {
t.Fatal(err)
}
oldIndexFile, err := ParseIndexFile([]byte(oldFormat), formatTestChannel, lastRelease)
if err != nil {
t.Fatal(err)
}
newIndexFile, err := ParseIndexFile([]byte(newFormat), formatTestChannel, lastRelease)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, formatTestChannel, oldIndexFile.Channel, "channel should be the same")
assert.Equal(t, formatTestChannel, newIndexFile.Channel, "channel should be the same")
assert.Equal(t, formatTestReleases, oldIndexFile.Releases, "releases should be the same")
assert.Equal(t, formatTestReleases, newIndexFile.Releases, "releases should be the same")
}

View File

@@ -1,33 +0,0 @@
package updater
// import (
// "github.com/tevino/abool"
// )
// type notifier struct {
// upgradeAvailable *abool.AtomicBool
// notifyChannel chan struct{}
// }
// func newNotifier() *notifier {
// return &notifier{
// upgradeAvailable: abool.NewBool(false),
// notifyChannel: make(chan struct{}),
// }
// }
// func (n *notifier) markAsUpgradeable() {
// if n.upgradeAvailable.SetToIf(false, true) {
// close(n.notifyChannel)
// }
// }
// // UpgradeAvailable returns whether an upgrade is available for this file.
// func (file *File) UpgradeAvailable() bool {
// return file.notifier.upgradeAvailable.IsSet()
// }
// // WaitForAvailableUpgrade blocks (selectable) until an upgrade for this file is available.
// func (file *File) WaitForAvailableUpgrade() <-chan struct{} {
// return file.notifier.notifyChannel
// }

View File

@@ -1,270 +0,0 @@
package updater
// import (
// "errors"
// "fmt"
// "os"
// "path/filepath"
// "runtime"
// "strings"
// "sync"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
// )
// const (
// onWindows = runtime.GOOS == "windows"
// )
// ResourceRegistry is a registry for managing update resources.
// type ResourceRegistry struct {
// sync.RWMutex
// Name string
// storageDir *utils.DirStructure
// tmpDir *utils.DirStructure
// indexes []*Index
// state *RegistryState
// resources map[string]*Resource
// UpdateURLs []string
// UserAgent string
// MandatoryUpdates []string
// AutoUnpack []string
// // Verification holds a map of VerificationOptions assigned to their
// // applicable identifier path prefix.
// // Use an empty string to denote the default.
// // Use empty options to disable verification for a path prefix.
// Verification map[string]*VerificationOptions
// // UsePreReleases signifies that pre-releases should be used when selecting a
// // version. Even if false, a pre-release version will still be used if it is
// // defined as the current version by an index.
// UsePreReleases bool
// // DevMode specifies if a local 0.0.0 version should be always chosen, when available.
// DevMode bool
// // Online specifies if resources may be downloaded if not available locally.
// Online bool
// // StateNotifyFunc may be set to receive any changes to the registry state.
// // The specified function may lock the state, but may not block or take a
// // lot of time.
// StateNotifyFunc func(*RegistryState)
// }
// // AddIndex adds a new index to the resource registry.
// // The order is important, as indexes added later will override the current
// // release from earlier indexes.
// func (reg *ResourceRegistry) AddIndex(idx Index) {
// reg.Lock()
// defer reg.Unlock()
// // Get channel name from path.
// idx.Channel = strings.TrimSuffix(
// filepath.Base(idx.Path), filepath.Ext(idx.Path),
// )
// reg.indexes = append(reg.indexes, &idx)
// }
// // PreInitUpdateState sets the initial update state of the registry before initialization.
// func (reg *ResourceRegistry) PreInitUpdateState(s UpdateState) error {
// if reg.state != nil {
// return errors.New("registry already initialized")
// }
// reg.state = &RegistryState{
// Updates: s,
// }
// return nil
// }
// // Initialize initializes a raw registry struct and makes it ready for usage.
// func (reg *ResourceRegistry) Initialize(storageDir *utils.DirStructure) error {
// // check if storage dir is available
// err := storageDir.Ensure()
// if err != nil {
// return err
// }
// // set default name
// if reg.Name == "" {
// reg.Name = "updater"
// }
// // initialize private attributes
// reg.storageDir = storageDir
// reg.tmpDir = storageDir.ChildDir("tmp", 0o0700)
// reg.resources = make(map[string]*Resource)
// if reg.state == nil {
// reg.state = &RegistryState{}
// }
// reg.state.ID = StateReady
// reg.state.reg = reg
// // remove tmp dir to delete old entries
// err = reg.Cleanup()
// if err != nil {
// log.Warningf("%s: failed to remove tmp dir: %s", reg.Name, err)
// }
// // (re-)create tmp dir
// err = reg.tmpDir.Ensure()
// if err != nil {
// log.Warningf("%s: failed to create tmp dir: %s", reg.Name, err)
// }
// // Check verification options.
// if reg.Verification != nil {
// for prefix, opts := range reg.Verification {
// // Check if verification is disable for this prefix.
// if opts == nil {
// continue
// }
// // If enabled, a trust store is required.
// if opts.TrustStore == nil {
// return fmt.Errorf("verification enabled for prefix %q, but no trust store configured", prefix)
// }
// // DownloadPolicy must be equal or stricter than DiskLoadPolicy.
// if opts.DiskLoadPolicy < opts.DownloadPolicy {
// return errors.New("verification download policy must be equal or stricter than the disk load policy")
// }
// // Warn if all policies are disabled.
// if opts.DownloadPolicy == SignaturePolicyDisable &&
// opts.DiskLoadPolicy == SignaturePolicyDisable {
// log.Warningf("%s: verification enabled for prefix %q, but all policies set to disable", reg.Name, prefix)
// }
// }
// }
// return nil
// }
// // StorageDir returns the main storage dir of the resource registry.
// func (reg *ResourceRegistry) StorageDir() *utils.DirStructure {
// return reg.storageDir
// }
// // TmpDir returns the temporary working dir of the resource registry.
// func (reg *ResourceRegistry) TmpDir() *utils.DirStructure {
// return reg.tmpDir
// }
// // SetDevMode sets the development mode flag.
// func (reg *ResourceRegistry) SetDevMode(on bool) {
// reg.Lock()
// defer reg.Unlock()
// reg.DevMode = on
// }
// // SetUsePreReleases sets the UsePreReleases flag.
// func (reg *ResourceRegistry) SetUsePreReleases(yes bool) {
// reg.Lock()
// defer reg.Unlock()
// reg.UsePreReleases = yes
// }
// // AddResource adds a resource to the registry. Does _not_ select new version.
// func (reg *ResourceRegistry) AddResource(identifier, version string, index *Index, available, currentRelease, preRelease bool) error {
// reg.Lock()
// defer reg.Unlock()
// err := reg.addResource(identifier, version, index, available, currentRelease, preRelease)
// return err
// }
// func (reg *ResourceRegistry) addResource(identifier, version string, index *Index, available, currentRelease, preRelease bool) error {
// res, ok := reg.resources[identifier]
// if !ok {
// res = reg.newResource(identifier)
// reg.resources[identifier] = res
// }
// res.Index = index
// return res.AddVersion(version, available, currentRelease, preRelease)
// }
// // AddResources adds resources to the registry. Errors are logged, the last one is returned. Despite errors, non-failing resources are still added. Does _not_ select new versions.
// func (reg *ResourceRegistry) AddResources(versions map[string]string, index *Index, available, currentRelease, preRelease bool) error {
// reg.Lock()
// defer reg.Unlock()
// // add versions and their flags to registry
// var lastError error
// for identifier, version := range versions {
// lastError = reg.addResource(identifier, version, index, available, currentRelease, preRelease)
// if lastError != nil {
// log.Warningf("%s: failed to add resource %s: %s", reg.Name, identifier, lastError)
// }
// }
// return lastError
// }
// // SelectVersions selects new resource versions depending on the current registry state.
// func (reg *ResourceRegistry) SelectVersions() {
// reg.RLock()
// defer reg.RUnlock()
// for _, res := range reg.resources {
// res.Lock()
// res.selectVersion()
// res.Unlock()
// }
// }
// // GetSelectedVersions returns a list of the currently selected versions.
// func (reg *ResourceRegistry) GetSelectedVersions() (versions map[string]string) {
// reg.RLock()
// defer reg.RUnlock()
// for _, res := range reg.resources {
// res.Lock()
// versions[res.Identifier] = res.SelectedVersion.VersionNumber
// res.Unlock()
// }
// return
// }
// // Purge deletes old updates, retaining a certain amount, specified by the keep
// // parameter. Will at least keep 2 updates per resource.
// func (reg *ResourceRegistry) Purge(keep int) {
// reg.RLock()
// defer reg.RUnlock()
// for _, res := range reg.resources {
// res.Purge(keep)
// }
// }
// // ResetResources removes all resources from the registry.
// func (reg *ResourceRegistry) ResetResources() {
// reg.Lock()
// defer reg.Unlock()
// reg.resources = make(map[string]*Resource)
// }
// // ResetIndexes removes all indexes from the registry.
// func (reg *ResourceRegistry) ResetIndexes() {
// reg.Lock()
// defer reg.Unlock()
// reg.indexes = make([]*Index, 0, len(reg.indexes))
// }
// // Cleanup removes temporary files.
// func (reg *ResourceRegistry) Cleanup() error {
// // delete download tmp dir
// return os.RemoveAll(reg.tmpDir.Path)
// }

View File

@@ -1,35 +0,0 @@
package updater
import (
"os"
"testing"
"github.com/safing/portmaster/base/utils"
)
var registry *ResourceRegistry
func TestMain(m *testing.M) {
// setup
tmpDir, err := os.MkdirTemp("", "ci-portmaster-")
if err != nil {
panic(err)
}
registry = &ResourceRegistry{
UsePreReleases: true,
DevMode: true,
Online: true,
}
err = registry.Initialize(utils.NewDirStructure(tmpDir, 0o0777))
if err != nil {
panic(err)
}
// run
// call flag.Parse() here if TestMain uses flags
ret := m.Run()
// teardown
_ = os.RemoveAll(tmpDir)
os.Exit(ret)
}

View File

@@ -1,582 +0,0 @@
package updater
// import (
// "errors"
// "io/fs"
// "os"
// "path/filepath"
// "sort"
// "strings"
// "sync"
// semver "github.com/hashicorp/go-version"
// "github.com/safing/jess/filesig"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
// )
// var devVersion *semver.Version
// func init() {
// var err error
// devVersion, err = semver.NewVersion("0")
// if err != nil {
// panic(err)
// }
// }
// Resource represents a resource (via an identifier) and multiple file versions.
// type Resource struct {
// sync.Mutex
// registry *ResourceRegistry
// notifier *notifier
// // Identifier is the unique identifier for that resource.
// // It forms a file path using a forward-slash as the
// // path separator.
// Identifier string
// // Versions holds all available resource versions.
// Versions []*ResourceVersion
// // ActiveVersion is the last version of the resource
// // that someone requested using GetFile().
// ActiveVersion *ResourceVersion
// // SelectedVersion is newest, selectable version of
// // that resource that is available. A version
// // is selectable if it's not blacklisted by the user.
// // Note that it's not guaranteed that the selected version
// // is available locally. In that case, GetFile will attempt
// // to download the latest version from the updates servers
// // specified in the resource registry.
// SelectedVersion *ResourceVersion
// // VerificationOptions holds the verification options for this resource.
// VerificationOptions *VerificationOptions
// // Index holds a reference to the index this resource was last defined in.
// // Will be nil if resource was only found on disk.
// Index *Index
// }
// ResourceVersion represents a single version of a resource.
// type ResourceVersion struct {
// resource *Resource
// // VersionNumber is the string representation of the resource
// // version.
// VersionNumber string
// semVer *semver.Version
// // Available indicates if this version is available locally.
// Available bool
// // SigAvailable indicates if the signature of this version is available locally.
// SigAvailable bool
// // CurrentRelease indicates that this is the current release that should be
// // selected, if possible.
// CurrentRelease bool
// // PreRelease indicates that this version is pre-release.
// PreRelease bool
// // Blacklisted may be set to true if this version should
// // be skipped and not used. This is useful if the version
// // is known to be broken.
// Blacklisted bool
// }
// func (rv *ResourceVersion) String() string {
// return rv.VersionNumber
// }
// // SemVer returns the semantic version of the resource.
// func (rv *ResourceVersion) SemVer() *semver.Version {
// return rv.semVer
// }
// EqualsVersion normalizes the given version and checks equality with semver.
// func (rv *ResourceVersion) EqualsVersion(version string) bool {
// cmpSemVer, err := semver.NewVersion(version)
// if err != nil {
// return false
// }
// return rv.semVer.Equal(cmpSemVer)
// }
// // isSelectable returns true if the version represented by rv is selectable.
// // A version is selectable if it's not blacklisted and either already locally
// // available or ready to be downloaded.
// func (rv *ResourceVersion) isSelectable() bool {
// switch {
// case rv.Blacklisted:
// // Should not be used.
// return false
// case rv.Available:
// // Is available locally, use!
// return true
// case !rv.resource.registry.Online:
// // Cannot download, because registry is set to offline.
// return false
// case rv.resource.Index == nil:
// // Cannot download, because resource is not part of an index.
// return false
// case !rv.resource.Index.AutoDownload:
// // Cannot download, because index may not automatically download.
// return false
// default:
// // Is not available locally, but we are allowed to download it on request!
// return true
// }
// }
// isBetaVersionNumber checks if rv is marked as a beta version by checking
// the version string. It does not honor the BetaRelease field of rv!
// func (rv *ResourceVersion) isBetaVersionNumber() bool { //nolint:unused
// // "b" suffix check if for backwards compatibility
// // new versions should use the pre-release suffix as
// // declared by https://semver.org
// // i.e. 1.2.3-beta
// switch rv.semVer.Prerelease() {
// case "b", "beta":
// return true
// default:
// return false
// }
// }
// Export makes a copy of the resource with only the exposed information.
// Attributes are copied and safe to access.
// Any ResourceVersion must not be modified.
// func (res *Resource) Export() *Resource {
// res.Lock()
// defer res.Unlock()
// // Copy attibutes.
// export := &Resource{
// Identifier: res.Identifier,
// Versions: make([]*ResourceVersion, len(res.Versions)),
// ActiveVersion: res.ActiveVersion,
// SelectedVersion: res.SelectedVersion,
// }
// // Copy Versions slice.
// copy(export.Versions, res.Versions)
// return export
// }
// // Len is the number of elements in the collection.
// // It implements sort.Interface for ResourceVersion.
// func (res *Resource) Len() int {
// return len(res.Versions)
// }
// // Less reports whether the element with index i should
// // sort before the element with index j.
// // It implements sort.Interface for ResourceVersions.
// func (res *Resource) Less(i, j int) bool {
// return res.Versions[i].semVer.GreaterThan(res.Versions[j].semVer)
// }
// // Swap swaps the elements with indexes i and j.
// // It implements sort.Interface for ResourceVersions.
// func (res *Resource) Swap(i, j int) {
// res.Versions[i], res.Versions[j] = res.Versions[j], res.Versions[i]
// }
// // available returns whether any version of the resource is available.
// func (res *Resource) available() bool {
// for _, rv := range res.Versions {
// if rv.Available {
// return true
// }
// }
// return false
// }
// // inUse returns true if the resource is currently in use.
// func (res *Resource) inUse() bool {
// return res.ActiveVersion != nil
// }
// // AnyVersionAvailable returns true if any version of
// // res is locally available.
// func (res *Resource) AnyVersionAvailable() bool {
// res.Lock()
// defer res.Unlock()
// return res.available()
// }
// func (reg *ResourceRegistry) newResource(identifier string) *Resource {
// return &Resource{
// registry: reg,
// Identifier: identifier,
// Versions: make([]*ResourceVersion, 0, 1),
// VerificationOptions: reg.GetVerificationOptions(identifier),
// }
// }
// // AddVersion adds a resource version to a resource.
// func (res *Resource) AddVersion(version string, available, currentRelease, preRelease bool) error {
// res.Lock()
// defer res.Unlock()
// // reset current release flags
// if currentRelease {
// for _, rv := range res.Versions {
// rv.CurrentRelease = false
// }
// }
// var rv *ResourceVersion
// // check for existing version
// for _, possibleMatch := range res.Versions {
// if possibleMatch.VersionNumber == version {
// rv = possibleMatch
// break
// }
// }
// // create new version if none found
// if rv == nil {
// // parse to semver
// sv, err := semver.NewVersion(version)
// if err != nil {
// return err
// }
// rv = &ResourceVersion{
// resource: res,
// VersionNumber: sv.String(), // Use normalized version.
// semVer: sv,
// }
// res.Versions = append(res.Versions, rv)
// }
// // set flags
// if available {
// rv.Available = true
// // If available and signatures are enabled for this resource, check if the
// // signature is available.
// if res.VerificationOptions != nil && utils.PathExists(rv.storageSigPath()) {
// rv.SigAvailable = true
// }
// }
// if currentRelease {
// rv.CurrentRelease = true
// }
// if preRelease || rv.semVer.Prerelease() != "" {
// rv.PreRelease = true
// }
// return nil
// }
// // GetFile returns the selected version as a *File.
// func (res *Resource) GetFile() *File {
// res.Lock()
// defer res.Unlock()
// // check for notifier
// if res.notifier == nil {
// // create new notifier
// res.notifier = newNotifier()
// }
// // check if version is selected
// if res.SelectedVersion == nil {
// res.selectVersion()
// }
// // create file
// return &File{
// resource: res,
// version: res.SelectedVersion,
// notifier: res.notifier,
// versionedPath: res.SelectedVersion.versionedPath(),
// storagePath: res.SelectedVersion.storagePath(),
// }
// }
// //nolint:gocognit // function already kept as simple as possible
// func (res *Resource) selectVersion() {
// sort.Sort(res)
// // export after we finish
// var fallback bool
// defer func() {
// if fallback {
// log.Tracef("updater: selected version %s (as fallback) for resource %s", res.SelectedVersion, res.Identifier)
// } else {
// log.Debugf("updater: selected version %s for resource %s", res.SelectedVersion, res.Identifier)
// }
// if res.inUse() &&
// res.SelectedVersion != res.ActiveVersion && // new selected version does not match previously selected version
// res.notifier != nil {
// res.notifier.markAsUpgradeable()
// res.notifier = nil
// log.Debugf("updater: active version of %s is %s, update available", res.Identifier, res.ActiveVersion.VersionNumber)
// }
// }()
// if len(res.Versions) == 0 {
// // TODO: find better way to deal with an empty version slice (which should not happen)
// res.SelectedVersion = nil
// return
// }
// // Target selection
// // 1) Dev release if dev mode is active and ignore blacklisting
// if res.registry.DevMode {
// // Get last version, as this will be v0.0.0, if available.
// rv := res.Versions[len(res.Versions)-1]
// // Check if it's v0.0.0.
// if rv.semVer.Equal(devVersion) && rv.Available {
// res.SelectedVersion = rv
// return
// }
// }
// // 2) Find the current release. This may be also be a pre-release.
// for _, rv := range res.Versions {
// if rv.CurrentRelease {
// if rv.isSelectable() {
// res.SelectedVersion = rv
// return
// }
// // There can only be once current release,
// // so we can abort after finding one.
// break
// }
// }
// // 3) If UsePreReleases is set, find any newest version.
// if res.registry.UsePreReleases {
// for _, rv := range res.Versions {
// if rv.isSelectable() {
// res.SelectedVersion = rv
// return
// }
// }
// }
// // 4) Find the newest stable version.
// for _, rv := range res.Versions {
// if !rv.PreRelease && rv.isSelectable() {
// res.SelectedVersion = rv
// return
// }
// }
// // 5) Default to newest.
// res.SelectedVersion = res.Versions[0]
// fallback = true
// }
// // Blacklist blacklists the specified version and selects a new version.
// func (res *Resource) Blacklist(version string) error {
// res.Lock()
// defer res.Unlock()
// // count available and valid versions
// valid := 0
// for _, rv := range res.Versions {
// if rv.semVer.Equal(devVersion) {
// continue // ignore dev versions
// }
// if !rv.Blacklisted {
// valid++
// }
// }
// if valid <= 1 {
// return errors.New("cannot blacklist last version") // last one, cannot blacklist!
// }
// // find version and blacklist
// for _, rv := range res.Versions {
// if rv.VersionNumber == version {
// // blacklist and update
// rv.Blacklisted = true
// res.selectVersion()
// return nil
// }
// }
// return errors.New("could not find version")
// }
// // Purge deletes old updates, retaining a certain amount, specified by
// // the keep parameter. Purge will always keep at least 2 versions so
// // specifying a smaller keep value will have no effect.
// func (res *Resource) Purge(keepExtra int) { //nolint:gocognit
// res.Lock()
// defer res.Unlock()
// // If there is any blacklisted version within the resource, pause purging.
// // In this case we may need extra available versions beyond what would be
// // available after purging.
// for _, rv := range res.Versions {
// if rv.Blacklisted {
// log.Debugf(
// "%s: pausing purging of resource %s, as it contains blacklisted items",
// res.registry.Name,
// rv.resource.Identifier,
// )
// return
// }
// }
// // Safeguard the amount of extra version to keep.
// if keepExtra < 2 {
// keepExtra = 2
// }
// // Search for purge boundary.
// var purgeBoundary int
// var skippedActiveVersion bool
// var skippedSelectedVersion bool
// var skippedStableVersion bool
// boundarySearch:
// for i, rv := range res.Versions {
// // Check if required versions are already skipped.
// switch {
// case !skippedActiveVersion && res.ActiveVersion != nil:
// // Skip versions until the active version, if it's set.
// case !skippedSelectedVersion && res.SelectedVersion != nil:
// // Skip versions until the selected version, if it's set.
// case !skippedStableVersion:
// // Skip versions until the stable version.
// default:
// // All required version skipped, set purge boundary.
// purgeBoundary = i + keepExtra
// break boundarySearch
// }
// // Check if current instance is a required version.
// if rv == res.ActiveVersion {
// skippedActiveVersion = true
// }
// if rv == res.SelectedVersion {
// skippedSelectedVersion = true
// }
// if !rv.PreRelease {
// skippedStableVersion = true
// }
// }
// // Check if there is anything to purge at all.
// if purgeBoundary <= keepExtra || purgeBoundary >= len(res.Versions) {
// return
// }
// // Purge everything beyond the purge boundary.
// for _, rv := range res.Versions[purgeBoundary:] {
// // Only remove if resource file is actually available.
// if !rv.Available {
// continue
// }
// // Remove resource file.
// storagePath := rv.storagePath()
// err := os.Remove(storagePath)
// if err != nil {
// if !errors.Is(err, fs.ErrNotExist) {
// log.Warningf("%s: failed to purge resource %s v%s: %s", res.registry.Name, rv.resource.Identifier, rv.VersionNumber, err)
// }
// } else {
// log.Tracef("%s: purged resource %s v%s", res.registry.Name, rv.resource.Identifier, rv.VersionNumber)
// }
// // Remove resource signature file.
// err = os.Remove(rv.storageSigPath())
// if err != nil {
// if !errors.Is(err, fs.ErrNotExist) {
// log.Warningf("%s: failed to purge resource signature %s v%s: %s", res.registry.Name, rv.resource.Identifier, rv.VersionNumber, err)
// }
// } else {
// log.Tracef("%s: purged resource signature %s v%s", res.registry.Name, rv.resource.Identifier, rv.VersionNumber)
// }
// // Remove unpacked version of resource.
// ext := filepath.Ext(storagePath)
// if ext == "" {
// // Nothing to do if file does not have an extension.
// continue
// }
// unpackedPath := strings.TrimSuffix(storagePath, ext)
// // Remove if it exists, or an error occurs on access.
// _, err = os.Stat(unpackedPath)
// if err == nil || !errors.Is(err, fs.ErrNotExist) {
// err = os.Remove(unpackedPath)
// if err != nil {
// log.Warningf("%s: failed to purge unpacked resource %s v%s: %s", res.registry.Name, rv.resource.Identifier, rv.VersionNumber, err)
// } else {
// log.Tracef("%s: purged unpacked resource %s v%s", res.registry.Name, rv.resource.Identifier, rv.VersionNumber)
// }
// }
// }
// // remove entries of deleted files
// res.Versions = res.Versions[purgeBoundary:]
// }
// // SigningMetadata returns the metadata to be included in signatures.
// func (rv *ResourceVersion) SigningMetadata() map[string]string {
// return map[string]string{
// "id": rv.resource.Identifier,
// "version": rv.VersionNumber,
// }
// }
// // GetFile returns the version as a *File.
// // It locks the resource for doing so.
// func (rv *ResourceVersion) GetFile() *File {
// rv.resource.Lock()
// defer rv.resource.Unlock()
// // check for notifier
// if rv.resource.notifier == nil {
// // create new notifier
// rv.resource.notifier = newNotifier()
// }
// // create file
// return &File{
// resource: rv.resource,
// version: rv,
// notifier: rv.resource.notifier,
// versionedPath: rv.versionedPath(),
// storagePath: rv.storagePath(),
// }
// }
// // versionedPath returns the versioned identifier.
// func (rv *ResourceVersion) versionedPath() string {
// return GetVersionedPath(rv.resource.Identifier, rv.VersionNumber)
// }
// // versionedSigPath returns the versioned identifier of the file signature.
// func (rv *ResourceVersion) versionedSigPath() string {
// return GetVersionedPath(rv.resource.Identifier, rv.VersionNumber) + filesig.Extension
// }
// // storagePath returns the absolute storage path.
// func (rv *ResourceVersion) storagePath() string {
// return filepath.Join(rv.resource.registry.storageDir.Path, filepath.FromSlash(rv.versionedPath()))
// }
// // storageSigPath returns the absolute storage path of the file signature.
// func (rv *ResourceVersion) storageSigPath() string {
// return rv.storagePath() + filesig.Extension
// }

View File

@@ -1,119 +0,0 @@
package updater
import (
"fmt"
"testing"
semver "github.com/hashicorp/go-version"
"github.com/stretchr/testify/assert"
)
func TestVersionSelection(t *testing.T) {
t.Parallel()
res := registry.newResource("test/a")
err := res.AddVersion("1.2.2", true, false, false)
if err != nil {
t.Fatal(err)
}
err = res.AddVersion("1.2.3", true, false, false)
if err != nil {
t.Fatal(err)
}
err = res.AddVersion("1.2.4-beta", true, false, false)
if err != nil {
t.Fatal(err)
}
err = res.AddVersion("1.2.4-staging", true, false, false)
if err != nil {
t.Fatal(err)
}
err = res.AddVersion("1.2.5", false, false, false)
if err != nil {
t.Fatal(err)
}
err = res.AddVersion("1.2.6-beta", false, false, false)
if err != nil {
t.Fatal(err)
}
err = res.AddVersion("0", true, false, false)
if err != nil {
t.Fatal(err)
}
registry.UsePreReleases = true
registry.DevMode = true
registry.Online = true
res.Index = &Index{AutoDownload: true}
res.selectVersion()
if res.SelectedVersion.VersionNumber != "0.0.0" {
t.Errorf("selected version should be 0.0.0, not %s", res.SelectedVersion.VersionNumber)
}
registry.DevMode = false
res.selectVersion()
if res.SelectedVersion.VersionNumber != "1.2.6-beta" {
t.Errorf("selected version should be 1.2.6-beta, not %s", res.SelectedVersion.VersionNumber)
}
registry.UsePreReleases = false
res.selectVersion()
if res.SelectedVersion.VersionNumber != "1.2.5" {
t.Errorf("selected version should be 1.2.5, not %s", res.SelectedVersion.VersionNumber)
}
registry.Online = false
res.selectVersion()
if res.SelectedVersion.VersionNumber != "1.2.3" {
t.Errorf("selected version should be 1.2.3, not %s", res.SelectedVersion.VersionNumber)
}
f123 := res.GetFile()
f123.markActiveWithLocking()
err = res.Blacklist("1.2.3")
if err != nil {
t.Fatal(err)
}
if res.SelectedVersion.VersionNumber != "1.2.2" {
t.Errorf("selected version should be 1.2.2, not %s", res.SelectedVersion.VersionNumber)
}
if !f123.UpgradeAvailable() {
t.Error("upgrade should be available (flag)")
}
select {
case <-f123.WaitForAvailableUpgrade():
default:
t.Error("upgrade should be available (chan)")
}
t.Logf("resource: %+v", res)
for _, rv := range res.Versions {
t.Logf("version %s: %+v", rv.VersionNumber, rv)
}
}
func TestVersionParsing(t *testing.T) {
t.Parallel()
assert.Equal(t, "1.2.3", parseVersion("1.2.3"))
assert.Equal(t, "1.2.0", parseVersion("1.2.0"))
assert.Equal(t, "0.2.0", parseVersion("0.2.0"))
assert.Equal(t, "0.0.0", parseVersion("0"))
assert.Equal(t, "1.2.3-b", parseVersion("1.2.3-b"))
assert.Equal(t, "1.2.3-b", parseVersion("1.2.3b"))
assert.Equal(t, "1.2.3-beta", parseVersion("1.2.3-beta"))
assert.Equal(t, "1.2.3-beta", parseVersion("1.2.3beta"))
assert.Equal(t, "1.2.3", parseVersion("01.02.03"))
}
func parseVersion(v string) string {
sv, err := semver.NewVersion(v)
if err != nil {
return fmt.Sprintf("failed to parse version: %s", err)
}
return sv.String()
}

View File

@@ -1,49 +0,0 @@
package updater
// import (
// "strings"
// "github.com/safing/jess"
// )
// // VerificationOptions holds options for verification of files.
// type VerificationOptions struct {
// TrustStore jess.TrustStore
// DownloadPolicy SignaturePolicy
// DiskLoadPolicy SignaturePolicy
// }
// // GetVerificationOptions returns the verification options for the given identifier.
// func (reg *ResourceRegistry) GetVerificationOptions(identifier string) *VerificationOptions {
// if reg.Verification == nil {
// return nil
// }
// var (
// longestPrefix = -1
// bestMatch *VerificationOptions
// )
// for prefix, opts := range reg.Verification {
// if len(prefix) > longestPrefix && strings.HasPrefix(identifier, prefix) {
// longestPrefix = len(prefix)
// bestMatch = opts
// }
// }
// return bestMatch
// }
// // SignaturePolicy defines behavior in case of errors.
// type SignaturePolicy uint8
// // Signature Policies.
// const (
// // SignaturePolicyRequire fails on any error.
// SignaturePolicyRequire = iota
// // SignaturePolicyWarn only warns on errors.
// SignaturePolicyWarn
// // SignaturePolicyDisable only downloads signatures, but does not verify them.
// SignaturePolicyDisable
// )

View File

@@ -1,180 +0,0 @@
package updater
// import (
// "sort"
// "sync"
// "time"
// "github.com/safing/portmaster/base/utils"
// )
// // Registry States.
// const (
// StateReady = "ready" // Default idle state.
// StateChecking = "checking" // Downloading indexes.
// StateDownloading = "downloading" // Downloading updates.
// StateFetching = "fetching" // Fetching a single file.
// )
// // RegistryState describes the registry state.
// type RegistryState struct {
// sync.Mutex
// reg *ResourceRegistry
// // ID holds the ID of the state the registry is currently in.
// ID string
// // Details holds further information about the current state.
// Details any
// // Updates holds generic information about the current status of pending
// // and recently downloaded updates.
// Updates UpdateState
// // operationLock locks the operation of any state changing operation.
// // This is separate from the registry lock, which locks access to the
// // registry struct.
// operationLock sync.Mutex
// }
// // StateDownloadingDetails holds details of the downloading state.
// type StateDownloadingDetails struct {
// // Resources holds the resource IDs that are being downloaded.
// Resources []string
// // FinishedUpTo holds the index of Resources that is currently being
// // downloaded. Previous resources have finished downloading.
// FinishedUpTo int
// }
// // UpdateState holds generic information about the current status of pending
// // and recently downloaded updates.
// type UpdateState struct {
// // LastCheckAt holds the time of the last update check.
// LastCheckAt *time.Time
// // LastCheckError holds the error of the last check.
// LastCheckError error
// // PendingDownload holds the resources that are pending download.
// PendingDownload []string
// // LastDownloadAt holds the time when resources were downloaded the last time.
// LastDownloadAt *time.Time
// // LastDownloadError holds the error of the last download.
// LastDownloadError error
// // LastDownload holds the resources that we downloaded the last time updates
// // were downloaded.
// LastDownload []string
// // LastSuccessAt holds the time of the last successful update (check).
// LastSuccessAt *time.Time
// }
// // GetState returns the current registry state.
// // The returned data must not be modified.
// func (reg *ResourceRegistry) GetState() RegistryState {
// reg.state.Lock()
// defer reg.state.Unlock()
// return RegistryState{
// ID: reg.state.ID,
// Details: reg.state.Details,
// Updates: reg.state.Updates,
// }
// }
// // StartOperation starts an operation.
// func (s *RegistryState) StartOperation(id string) bool {
// defer s.notify()
// s.operationLock.Lock()
// s.Lock()
// defer s.Unlock()
// s.ID = id
// return true
// }
// // UpdateOperationDetails updates the details of an operation.
// // The supplied struct should be a copy and must not be changed after calling
// // this function.
// func (s *RegistryState) UpdateOperationDetails(details any) {
// defer s.notify()
// s.Lock()
// defer s.Unlock()
// s.Details = details
// }
// // EndOperation ends an operation.
// func (s *RegistryState) EndOperation() {
// defer s.notify()
// defer s.operationLock.Unlock()
// s.Lock()
// defer s.Unlock()
// s.ID = StateReady
// s.Details = nil
// }
// // ReportUpdateCheck reports an update check to the registry state.
// func (s *RegistryState) ReportUpdateCheck(pendingDownload []string, failed error) {
// defer s.notify()
// sort.Strings(pendingDownload)
// s.Lock()
// defer s.Unlock()
// now := time.Now()
// s.Updates.LastCheckAt = &now
// s.Updates.LastCheckError = failed
// s.Updates.PendingDownload = pendingDownload
// if failed == nil {
// s.Updates.LastSuccessAt = &now
// }
// }
// // ReportDownloads reports downloaded updates to the registry state.
// func (s *RegistryState) ReportDownloads(downloaded []string, failed error) {
// defer s.notify()
// sort.Strings(downloaded)
// s.Lock()
// defer s.Unlock()
// now := time.Now()
// s.Updates.LastDownloadAt = &now
// s.Updates.LastDownloadError = failed
// s.Updates.LastDownload = downloaded
// // Remove downloaded resources from the pending list.
// if len(s.Updates.PendingDownload) > 0 {
// newPendingDownload := make([]string, 0, len(s.Updates.PendingDownload))
// for _, pending := range s.Updates.PendingDownload {
// if !utils.StringInSlice(downloaded, pending) {
// newPendingDownload = append(newPendingDownload, pending)
// }
// }
// s.Updates.PendingDownload = newPendingDownload
// }
// if failed == nil {
// s.Updates.LastSuccessAt = &now
// }
// }
// func (s *RegistryState) notify() {
// switch {
// case s.reg == nil:
// return
// case s.reg.StateNotifyFunc == nil:
// return
// }
// s.reg.StateNotifyFunc(s)
// }

View File

@@ -1,272 +0,0 @@
package updater
// import (
// "context"
// "errors"
// "fmt"
// "io/fs"
// "net/http"
// "os"
// "path/filepath"
// "strings"
// "github.com/safing/jess/filesig"
// "github.com/safing/jess/lhash"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
// )
// // ScanStorage scans root within the storage dir and adds found
// // resources to the registry. If an error occurred, it is logged
// // and the last error is returned. Everything that was found
// // despite errors is added to the registry anyway. Leave root
// // empty to scan the full storage dir.
// func (reg *ResourceRegistry) ScanStorage(root string) error {
// var lastError error
// // prep root
// if root == "" {
// root = reg.storageDir.Path
// } else {
// var err error
// root, err = filepath.Abs(root)
// if err != nil {
// return err
// }
// if !strings.HasPrefix(root, reg.storageDir.Path) {
// return errors.New("supplied scan root path not within storage")
// }
// }
// // walk fs
// _ = filepath.Walk(root, func(path string, info os.FileInfo, err error) error {
// // skip tmp dir (including errors trying to read it)
// if strings.HasPrefix(path, reg.tmpDir.Path) {
// return filepath.SkipDir
// }
// // handle walker error
// if err != nil {
// lastError = fmt.Errorf("%s: could not read %s: %w", reg.Name, path, err)
// log.Warning(lastError.Error())
// return nil
// }
// // Ignore file signatures.
// if strings.HasSuffix(path, filesig.Extension) {
// return nil
// }
// // get relative path to storage
// relativePath, err := filepath.Rel(reg.storageDir.Path, path)
// if err != nil {
// lastError = fmt.Errorf("%s: could not get relative path of %s: %w", reg.Name, path, err)
// log.Warning(lastError.Error())
// return nil
// }
// // convert to identifier and version
// relativePath = filepath.ToSlash(relativePath)
// identifier, version, ok := GetIdentifierAndVersion(relativePath)
// if !ok {
// // file does not conform to format
// return nil
// }
// // fully ignore directories that also have an identifier - these will be unpacked resources
// if info.IsDir() {
// return filepath.SkipDir
// }
// // save
// err = reg.AddResource(identifier, version, nil, true, false, false)
// if err != nil {
// lastError = fmt.Errorf("%s: could not get add resource %s v%s: %w", reg.Name, identifier, version, err)
// log.Warning(lastError.Error())
// }
// return nil
// })
// return lastError
// }
// // LoadIndexes loads the current release indexes from disk
// // or will fetch a new version if not available and the
// // registry is marked as online.
// func (reg *ResourceRegistry) LoadIndexes(ctx context.Context) error {
// var firstErr error
// client := &http.Client{}
// for _, idx := range reg.getIndexes() {
// err := reg.loadIndexFile(idx)
// if err == nil {
// log.Debugf("%s: loaded index %s", reg.Name, idx.Path)
// } else if reg.Online {
// // try to download the index file if a local disk version
// // does not exist or we don't have permission to read it.
// if errors.Is(err, fs.ErrNotExist) || errors.Is(err, fs.ErrPermission) {
// err = reg.downloadIndex(ctx, client, idx)
// }
// }
// if err != nil && firstErr == nil {
// firstErr = err
// }
// }
// return firstErr
// }
// // getIndexes returns a copy of the index.
// // The indexes itself are references.
// func (reg *ResourceRegistry) getIndexes() []*Index {
// reg.RLock()
// defer reg.RUnlock()
// indexes := make([]*Index, len(reg.indexes))
// copy(indexes, reg.indexes)
// return indexes
// }
// func (reg *ResourceRegistry) loadIndexFile(idx *Index) error {
// indexPath := filepath.Join(reg.storageDir.Path, filepath.FromSlash(idx.Path))
// indexData, err := os.ReadFile(indexPath)
// if err != nil {
// return fmt.Errorf("failed to read index file %s: %w", idx.Path, err)
// }
// // Verify signature, if enabled.
// if verifOpts := reg.GetVerificationOptions(idx.Path); verifOpts != nil {
// // Load and check signature.
// verifiedHash, _, err := reg.loadAndVerifySigFile(verifOpts, indexPath+filesig.Extension)
// if err != nil {
// switch verifOpts.DiskLoadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("failed to verify signature of index %s: %w", idx.Path, err)
// case SignaturePolicyWarn:
// log.Warningf("%s: failed to verify signature of index %s: %s", reg.Name, idx.Path, err)
// case SignaturePolicyDisable:
// log.Debugf("%s: failed to verify signature of index %s: %s", reg.Name, idx.Path, err)
// }
// }
// // Check if signature checksum matches the index data.
// if err == nil && !verifiedHash.Matches(indexData) {
// switch verifOpts.DiskLoadPolicy {
// case SignaturePolicyRequire:
// return fmt.Errorf("index file %s does not match signature", idx.Path)
// case SignaturePolicyWarn:
// log.Warningf("%s: index file %s does not match signature", reg.Name, idx.Path)
// case SignaturePolicyDisable:
// log.Debugf("%s: index file %s does not match signature", reg.Name, idx.Path)
// }
// }
// }
// // Parse the index file.
// indexFile, err := ParseIndexFile(indexData, idx.Channel, idx.LastRelease)
// if err != nil {
// return fmt.Errorf("failed to parse index file %s: %w", idx.Path, err)
// }
// // Update last seen release.
// idx.LastRelease = indexFile.Published
// // Warn if there aren't any releases in the index.
// if len(indexFile.Releases) == 0 {
// log.Debugf("%s: index %s has no releases", reg.Name, idx.Path)
// return nil
// }
// // Add index releases to available resources.
// err = reg.AddResources(indexFile.Releases, idx, false, true, idx.PreRelease)
// if err != nil {
// log.Warningf("%s: failed to add resource: %s", reg.Name, err)
// }
// return nil
// }
// func (reg *ResourceRegistry) loadAndVerifySigFile(verifOpts *VerificationOptions, sigFilePath string) (*lhash.LabeledHash, []byte, error) {
// // Load signature file.
// sigFileData, err := os.ReadFile(sigFilePath)
// if err != nil {
// return nil, nil, fmt.Errorf("failed to read signature file: %w", err)
// }
// // Extract all signatures.
// sigs, err := filesig.ParseSigFile(sigFileData)
// switch {
// case len(sigs) == 0 && err != nil:
// return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
// case len(sigs) == 0:
// return nil, nil, errors.New("no signatures found in signature file")
// case err != nil:
// return nil, nil, fmt.Errorf("failed to parse signature file: %w", err)
// }
// // Verify all signatures.
// var verifiedHash *lhash.LabeledHash
// for _, sig := range sigs {
// fd, err := filesig.VerifyFileData(
// sig,
// nil,
// verifOpts.TrustStore,
// )
// if err != nil {
// return nil, sigFileData, err
// }
// // Save or check verified hash.
// if verifiedHash == nil {
// verifiedHash = fd.FileHash()
// } else if !fd.FileHash().Equal(verifiedHash) {
// // Return an error if two valid hashes mismatch.
// // For simplicity, all hash algorithms must be the same for now.
// return nil, sigFileData, errors.New("file hashes from different signatures do not match")
// }
// }
// return verifiedHash, sigFileData, nil
// }
// // CreateSymlinks creates a directory structure with unversioned symlinks to the given updates list.
// func (reg *ResourceRegistry) CreateSymlinks(symlinkRoot *utils.DirStructure) error {
// err := os.RemoveAll(symlinkRoot.Path)
// if err != nil {
// return fmt.Errorf("failed to wipe symlink root: %w", err)
// }
// err = symlinkRoot.Ensure()
// if err != nil {
// return fmt.Errorf("failed to create symlink root: %w", err)
// }
// reg.RLock()
// defer reg.RUnlock()
// for _, res := range reg.resources {
// if res.SelectedVersion == nil {
// return fmt.Errorf("no selected version available for %s", res.Identifier)
// }
// targetPath := res.SelectedVersion.storagePath()
// linkPath := filepath.Join(symlinkRoot.Path, filepath.FromSlash(res.Identifier))
// linkPathDir := filepath.Dir(linkPath)
// err = symlinkRoot.EnsureAbsPath(linkPathDir)
// if err != nil {
// return fmt.Errorf("failed to create dir for link: %w", err)
// }
// relativeTargetPath, err := filepath.Rel(linkPathDir, targetPath)
// if err != nil {
// return fmt.Errorf("failed to get relative target path: %w", err)
// }
// err = os.Symlink(relativeTargetPath, linkPath)
// if err != nil {
// return fmt.Errorf("failed to link %s: %w", res.Identifier, err)
// }
// }
// return nil
// }

View File

@@ -1,68 +0,0 @@
package updater
/*
func testLoadLatestScope(t *testing.T, basePath, filePath, expectedIdentifier, expectedVersion string) {
fullPath := filepath.Join(basePath, filePath)
// create dir
dirPath := filepath.Dir(fullPath)
err := os.MkdirAll(dirPath, 0755)
if err != nil {
t.Fatalf("could not create test dir: %s\n", err)
return
}
// touch file
err = os.WriteFile(fullPath, []byte{}, 0644)
if err != nil {
t.Fatalf("could not create test file: %s\n", err)
return
}
// run loadLatestScope
latest, err := ScanForLatest(basePath, true)
if err != nil {
t.Errorf("could not update latest: %s\n", err)
return
}
for key, val := range latest {
localUpdates[key] = val
}
// test result
version, ok := localUpdates[expectedIdentifier]
if !ok {
t.Errorf("identifier %s not in map", expectedIdentifier)
t.Errorf("current map: %v", localUpdates)
}
if version != expectedVersion {
t.Errorf("unexpected version for %s: %s", filePath, version)
}
}
func TestLoadLatestScope(t *testing.T) {
updatesLock.Lock()
defer updatesLock.Unlock()
tmpDir, err := os.MkdirTemp("", "testing_")
if err != nil {
t.Fatalf("could not create test dir: %s\n", err)
return
}
defer os.RemoveAll(tmpDir)
testLoadLatestScope(t, tmpDir, "all/ui/assets_v1-2-3.zip", "all/ui/assets.zip", "1.2.3")
testLoadLatestScope(t, tmpDir, "all/ui/assets_v1-2-4b.zip", "all/ui/assets.zip", "1.2.4b")
testLoadLatestScope(t, tmpDir, "all/ui/assets_v1-2-5.zip", "all/ui/assets.zip", "1.2.5")
testLoadLatestScope(t, tmpDir, "all/ui/assets_v1-3-4.zip", "all/ui/assets.zip", "1.3.4")
testLoadLatestScope(t, tmpDir, "all/ui/assets_v2-3-4.zip", "all/ui/assets.zip", "2.3.4")
testLoadLatestScope(t, tmpDir, "all/ui/assets_v1-2-3.zip", "all/ui/assets.zip", "2.3.4")
testLoadLatestScope(t, tmpDir, "all/ui/assets_v1-2-4.zip", "all/ui/assets.zip", "2.3.4")
testLoadLatestScope(t, tmpDir, "all/ui/assets_v1-3-4.zip", "all/ui/assets.zip", "2.3.4")
testLoadLatestScope(t, tmpDir, "os_platform/portmaster/portmaster_v1-2-3", "os_platform/portmaster/portmaster", "1.2.3")
testLoadLatestScope(t, tmpDir, "os_platform/portmaster/portmaster_v2-1-1", "os_platform/portmaster/portmaster", "2.1.1")
testLoadLatestScope(t, tmpDir, "os_platform/portmaster/portmaster_v1-2-3", "os_platform/portmaster/portmaster", "2.1.1")
}
*/

View File

@@ -1,195 +0,0 @@
package updater
// import (
// "archive/zip"
// "compress/gzip"
// "errors"
// "fmt"
// "io"
// "io/fs"
// "os"
// "path"
// "path/filepath"
// "strings"
// "github.com/hashicorp/go-multierror"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
// )
// // MaxUnpackSize specifies the maximum size that will be unpacked.
// const MaxUnpackSize = 1000000000 // 1GB
// // UnpackGZIP unpacks a GZIP compressed reader r
// // and returns a new reader. It's suitable to be
// // used with registry.GetPackedFile.
// func UnpackGZIP(r io.Reader) (io.Reader, error) {
// return gzip.NewReader(r)
// }
// // UnpackResources unpacks all resources defined in the AutoUnpack list.
// func (reg *ResourceRegistry) UnpackResources() error {
// reg.RLock()
// defer reg.RUnlock()
// var multierr *multierror.Error
// for _, res := range reg.resources {
// if utils.StringInSlice(reg.AutoUnpack, res.Identifier) {
// err := res.UnpackArchive()
// if err != nil {
// multierr = multierror.Append(
// multierr,
// fmt.Errorf("%s: %w", res.Identifier, err),
// )
// }
// }
// }
// return multierr.ErrorOrNil()
// }
// const (
// zipSuffix = ".zip"
// )
// // UnpackArchive unpacks the archive the resource refers to. The contents are
// // unpacked into a directory with the same name as the file, excluding the
// // suffix. If the destination folder already exists, it is assumed that the
// // contents have already been correctly unpacked.
// func (res *Resource) UnpackArchive() error {
// res.Lock()
// defer res.Unlock()
// // Only unpack selected versions.
// if res.SelectedVersion == nil {
// return nil
// }
// switch {
// case strings.HasSuffix(res.Identifier, zipSuffix):
// return res.unpackZipArchive()
// default:
// return fmt.Errorf("unsupported file type for unpacking")
// }
// }
// func (res *Resource) unpackZipArchive() error {
// // Get file and directory paths.
// archiveFile := res.SelectedVersion.storagePath()
// destDir := strings.TrimSuffix(archiveFile, zipSuffix)
// tmpDir := filepath.Join(
// res.registry.tmpDir.Path,
// filepath.FromSlash(strings.TrimSuffix(
// path.Base(res.SelectedVersion.versionedPath()),
// zipSuffix,
// )),
// )
// // Check status of destination.
// dstStat, err := os.Stat(destDir)
// switch {
// case errors.Is(err, fs.ErrNotExist):
// // The destination does not exist, continue with unpacking.
// case err != nil:
// return fmt.Errorf("cannot access destination for unpacking: %w", err)
// case !dstStat.IsDir():
// return fmt.Errorf("destination for unpacking is blocked by file: %s", dstStat.Name())
// default:
// // Archive already seems to be unpacked.
// return nil
// }
// // Create the tmp directory for unpacking.
// err = res.registry.tmpDir.EnsureAbsPath(tmpDir)
// if err != nil {
// return fmt.Errorf("failed to create tmp dir for unpacking: %w", err)
// }
// // Defer clean up of directories.
// defer func() {
// // Always clean up the tmp dir.
// _ = os.RemoveAll(tmpDir)
// // Cleanup the destination in case of an error.
// if err != nil {
// _ = os.RemoveAll(destDir)
// }
// }()
// // Open the archive for reading.
// var archiveReader *zip.ReadCloser
// archiveReader, err = zip.OpenReader(archiveFile)
// if err != nil {
// return fmt.Errorf("failed to open zip reader: %w", err)
// }
// defer func() {
// _ = archiveReader.Close()
// }()
// // Save all files to the tmp dir.
// for _, file := range archiveReader.File {
// err = copyFromZipArchive(
// file,
// filepath.Join(tmpDir, filepath.FromSlash(file.Name)),
// )
// if err != nil {
// return fmt.Errorf("failed to extract archive file %s: %w", file.Name, err)
// }
// }
// // Make the final move.
// err = os.Rename(tmpDir, destDir)
// if err != nil {
// return fmt.Errorf("failed to move the extracted archive from %s to %s: %w", tmpDir, destDir, err)
// }
// // Fix permissions on the destination dir.
// err = res.registry.storageDir.EnsureAbsPath(destDir)
// if err != nil {
// return fmt.Errorf("failed to apply directory permissions on %s: %w", destDir, err)
// }
// log.Infof("%s: unpacked %s", res.registry.Name, res.SelectedVersion.versionedPath())
// return nil
// }
// func copyFromZipArchive(archiveFile *zip.File, dstPath string) error {
// // If file is a directory, create it and continue.
// if archiveFile.FileInfo().IsDir() {
// err := os.Mkdir(dstPath, archiveFile.Mode())
// if err != nil {
// return fmt.Errorf("failed to create directory %s: %w", dstPath, err)
// }
// return nil
// }
// // Open archived file for reading.
// fileReader, err := archiveFile.Open()
// if err != nil {
// return fmt.Errorf("failed to open file in archive: %w", err)
// }
// defer func() {
// _ = fileReader.Close()
// }()
// // Open destination file for writing.
// dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, archiveFile.Mode())
// if err != nil {
// return fmt.Errorf("failed to open destination file %s: %w", dstPath, err)
// }
// defer func() {
// _ = dstFile.Close()
// }()
// // Copy full file from archive to dst.
// if _, err := io.CopyN(dstFile, fileReader, MaxUnpackSize); err != nil {
// // EOF is expected here as the archive is likely smaller
// // thane MaxUnpackSize
// if errors.Is(err, io.EOF) {
// return nil
// }
// return err
// }
// return nil
// }

View File

@@ -1,359 +0,0 @@
package updater
// import (
// "context"
// "fmt"
// "net/http"
// "os"
// "path"
// "path/filepath"
// "strings"
// "golang.org/x/exp/slices"
// "github.com/safing/jess/filesig"
// "github.com/safing/jess/lhash"
// "github.com/safing/portmaster/base/log"
// "github.com/safing/portmaster/base/utils"
// )
// // UpdateIndexes downloads all indexes. An error is only returned when all
// // indexes fail to update.
// func (reg *ResourceRegistry) UpdateIndexes(ctx context.Context) error {
// var lastErr error
// var anySuccess bool
// // Start registry operation.
// reg.state.StartOperation(StateChecking)
// defer reg.state.EndOperation()
// client := &http.Client{}
// for _, idx := range reg.getIndexes() {
// if err := reg.downloadIndex(ctx, client, idx); err != nil {
// lastErr = err
// log.Warningf("%s: failed to update index %s: %s", reg.Name, idx.Path, err)
// } else {
// anySuccess = true
// }
// }
// // If all indexes failed to update, fail.
// if !anySuccess {
// err := fmt.Errorf("failed to update all indexes, last error was: %w", lastErr)
// reg.state.ReportUpdateCheck(nil, err)
// return err
// }
// // Get pending resources and update status.
// pendingResourceVersions, _ := reg.GetPendingDownloads(true, false)
// reg.state.ReportUpdateCheck(
// humanInfoFromResourceVersions(pendingResourceVersions),
// nil,
// )
// return nil
// }
// func (reg *ResourceRegistry) downloadIndex(ctx context.Context, client *http.Client, idx *Index) error {
// var (
// // Index.
// indexErr error
// indexData []byte
// downloadURL string
// // Signature.
// sigErr error
// verifiedHash *lhash.LabeledHash
// sigFileData []byte
// verifOpts = reg.GetVerificationOptions(idx.Path)
// )
// // Upgrade to v2 index if verification is enabled.
// downloadIndexPath := idx.Path
// if verifOpts != nil {
// downloadIndexPath = strings.TrimSuffix(downloadIndexPath, baseIndexExtension) + v2IndexExtension
// }
// // Download new index and signature.
// for tries := range 3 {
// // Index and signature need to be fetched together, so that they are
// // fetched from the same source. One source should always have a matching
// // index and signature. Backup sources may be behind a little.
// // If the signature verification fails, another source should be tried.
// // Get index data.
// indexData, downloadURL, indexErr = reg.fetchData(ctx, client, downloadIndexPath, tries)
// if indexErr != nil {
// log.Debugf("%s: failed to fetch index %s: %s", reg.Name, downloadURL, indexErr)
// continue
// }
// // Get signature and verify it.
// if verifOpts != nil {
// verifiedHash, sigFileData, sigErr = reg.fetchAndVerifySigFile(
// ctx, client,
// verifOpts, downloadIndexPath+filesig.Extension, nil,
// tries,
// )
// if sigErr != nil {
// log.Debugf("%s: failed to verify signature of %s: %s", reg.Name, downloadURL, sigErr)
// continue
// }
// // Check if the index matches the verified hash.
// if verifiedHash.Matches(indexData) {
// log.Infof("%s: verified signature of %s", reg.Name, downloadURL)
// } else {
// sigErr = ErrIndexChecksumMismatch
// log.Debugf("%s: checksum does not match file from %s", reg.Name, downloadURL)
// continue
// }
// }
// break
// }
// if indexErr != nil {
// return fmt.Errorf("failed to fetch index %s: %w", downloadIndexPath, indexErr)
// }
// if sigErr != nil {
// return fmt.Errorf("failed to fetch or verify index %s signature: %w", downloadIndexPath, sigErr)
// }
// // Parse the index file.
// indexFile, err := ParseIndexFile(indexData, idx.Channel, idx.LastRelease)
// if err != nil {
// return fmt.Errorf("failed to parse index %s: %w", idx.Path, err)
// }
// // Add index data to registry.
// if len(indexFile.Releases) > 0 {
// // Check if all resources are within the indexes' authority.
// authoritativePath := path.Dir(idx.Path) + "/"
// if authoritativePath == "./" {
// // Fix path for indexes at the storage root.
// authoritativePath = ""
// }
// cleanedData := make(map[string]string, len(indexFile.Releases))
// for key, version := range indexFile.Releases {
// if strings.HasPrefix(key, authoritativePath) {
// cleanedData[key] = version
// } else {
// log.Warningf("%s: index %s oversteps it's authority by defining version for %s", reg.Name, idx.Path, key)
// }
// }
// // add resources to registry
// err = reg.AddResources(cleanedData, idx, false, true, idx.PreRelease)
// if err != nil {
// log.Warningf("%s: failed to add resources: %s", reg.Name, err)
// }
// } else {
// log.Debugf("%s: index %s is empty", reg.Name, idx.Path)
// }
// // Check if dest dir exists.
// indexDir := filepath.FromSlash(path.Dir(idx.Path))
// err = reg.storageDir.EnsureRelPath(indexDir)
// if err != nil {
// log.Warningf("%s: failed to ensure directory for updated index %s: %s", reg.Name, idx.Path, err)
// }
// // Index files must be readable by portmaster-staert with user permissions in order to load the index.
// err = os.WriteFile( //nolint:gosec
// filepath.Join(reg.storageDir.Path, filepath.FromSlash(idx.Path)),
// indexData, 0o0644,
// )
// if err != nil {
// log.Warningf("%s: failed to save updated index %s: %s", reg.Name, idx.Path, err)
// }
// // Write signature file, if we have one.
// if len(sigFileData) > 0 {
// err = os.WriteFile( //nolint:gosec
// filepath.Join(reg.storageDir.Path, filepath.FromSlash(idx.Path)+filesig.Extension),
// sigFileData, 0o0644,
// )
// if err != nil {
// log.Warningf("%s: failed to save updated index signature %s: %s", reg.Name, idx.Path+filesig.Extension, err)
// }
// }
// log.Infof("%s: updated index %s with %d entries", reg.Name, idx.Path, len(indexFile.Releases))
// return nil
// }
// // DownloadUpdates checks if updates are available and downloads updates of used components.
// func (reg *ResourceRegistry) DownloadUpdates(ctx context.Context, includeManual bool) error {
// // Start registry operation.
// reg.state.StartOperation(StateDownloading)
// defer reg.state.EndOperation()
// // Get pending updates.
// toUpdate, missingSigs := reg.GetPendingDownloads(includeManual, true)
// downloadDetailsResources := humanInfoFromResourceVersions(toUpdate)
// reg.state.UpdateOperationDetails(&StateDownloadingDetails{
// Resources: downloadDetailsResources,
// })
// // nothing to update
// if len(toUpdate) == 0 && len(missingSigs) == 0 {
// log.Infof("%s: everything up to date", reg.Name)
// return nil
// }
// // check download dir
// if err := reg.tmpDir.Ensure(); err != nil {
// return fmt.Errorf("could not prepare tmp directory for download: %w", err)
// }
// // download updates
// log.Infof("%s: starting to download %d updates", reg.Name, len(toUpdate))
// client := &http.Client{}
// var reportError error
// for i, rv := range toUpdate {
// log.Infof(
// "%s: downloading update [%d/%d]: %s version %s",
// reg.Name,
// i+1, len(toUpdate),
// rv.resource.Identifier, rv.VersionNumber,
// )
// var err error
// for tries := range 3 {
// err = reg.fetchFile(ctx, client, rv, tries)
// if err == nil {
// // Update resource version state.
// rv.resource.Lock()
// rv.Available = true
// if rv.resource.VerificationOptions != nil {
// rv.SigAvailable = true
// }
// rv.resource.Unlock()
// break
// }
// }
// if err != nil {
// reportError := fmt.Errorf("failed to download %s version %s: %w", rv.resource.Identifier, rv.VersionNumber, err)
// log.Warningf("%s: %s", reg.Name, reportError)
// }
// reg.state.UpdateOperationDetails(&StateDownloadingDetails{
// Resources: downloadDetailsResources,
// FinishedUpTo: i + 1,
// })
// }
// if len(missingSigs) > 0 {
// log.Infof("%s: downloading %d missing signatures", reg.Name, len(missingSigs))
// for _, rv := range missingSigs {
// var err error
// for tries := range 3 {
// err = reg.fetchMissingSig(ctx, client, rv, tries)
// if err == nil {
// // Update resource version state.
// rv.resource.Lock()
// rv.SigAvailable = true
// rv.resource.Unlock()
// break
// }
// }
// if err != nil {
// reportError := fmt.Errorf("failed to download missing sig of %s version %s: %w", rv.resource.Identifier, rv.VersionNumber, err)
// log.Warningf("%s: %s", reg.Name, reportError)
// }
// }
// }
// reg.state.ReportDownloads(
// downloadDetailsResources,
// reportError,
// )
// log.Infof("%s: finished downloading updates", reg.Name)
// return nil
// }
// // DownloadUpdates checks if updates are available and downloads updates of used components.
// // GetPendingDownloads returns the list of pending downloads.
// // If manual is set, indexes with AutoDownload=false will be checked.
// // If auto is set, indexes with AutoDownload=true will be checked.
// func (reg *ResourceRegistry) GetPendingDownloads(manual, auto bool) (resources, sigs []*ResourceVersion) {
// reg.RLock()
// defer reg.RUnlock()
// // create list of downloads
// var toUpdate []*ResourceVersion
// var missingSigs []*ResourceVersion
// for _, res := range reg.resources {
// func() {
// res.Lock()
// defer res.Unlock()
// // Skip resources without index or indexes that should not be reported
// // according to parameters.
// switch {
// case res.Index == nil:
// // Cannot download if resource is not part of an index.
// return
// case manual && !res.Index.AutoDownload:
// // Manual update report and index is not auto-download.
// case auto && res.Index.AutoDownload:
// // Auto update report and index is auto-download.
// default:
// // Resource should not be reported.
// return
// }
// // Skip resources we don't need.
// switch {
// case res.inUse():
// // Update if resource is in use.
// case res.available():
// // Update if resource is available locally, ie. was used in the past.
// case utils.StringInSlice(reg.MandatoryUpdates, res.Identifier):
// // Update is set as mandatory.
// default:
// // Resource does not need to be updated.
// return
// }
// // Go through all versions until we find versions that need updating.
// for _, rv := range res.Versions {
// switch {
// case !rv.CurrentRelease:
// // We are not interested in older releases.
// case !rv.Available:
// // File not available locally, download!
// toUpdate = append(toUpdate, rv)
// case !rv.SigAvailable && res.VerificationOptions != nil:
// // File signature is not available and verification is enabled, download signature!
// missingSigs = append(missingSigs, rv)
// }
// }
// }()
// }
// slices.SortFunc(toUpdate, func(a, b *ResourceVersion) int {
// return strings.Compare(a.resource.Identifier, b.resource.Identifier)
// })
// slices.SortFunc(missingSigs, func(a, b *ResourceVersion) int {
// return strings.Compare(a.resource.Identifier, b.resource.Identifier)
// })
// return toUpdate, missingSigs
// }
// func humanInfoFromResourceVersions(resourceVersions []*ResourceVersion) []string {
// identifiers := make([]string, len(resourceVersions))
// for i, rv := range resourceVersions {
// identifiers[i] = fmt.Sprintf("%s v%s", rv.resource.Identifier, rv.VersionNumber)
// }
// return identifiers
// }

View File

@@ -6,7 +6,6 @@ import (
"io"
"os"
"path/filepath"
"runtime"
semver "github.com/hashicorp/go-version"
"github.com/safing/portmaster/base/log"
@@ -112,19 +111,6 @@ func (r *Registry) performUpgrade(downloadDir string, indexFile string) error {
} else {
log.Debugf("updates: %s moved", artifact.Filename)
}
// Special case for linux.
// When installed the portmaster ui path is `/usr/bin/portmaster`. During update the ui will be placed in `/usr/lib/portmaster/portmaster`
// After an update the original binary should be deleted and replaced by symlink
// `/usr/bin/portmaster` -> `/usr/lib/portmaster/portmaster`
if runtime.GOOS == "linux" && artifact.Filename == "portmaster" && artifact.Platform == currentPlatform {
err = r.makeSymlinkForUI()
if err != nil {
log.Errorf("failed to create symlink for the ui: %s", err)
} else {
log.Infof("updates: ui symlink successfully created")
}
}
}
log.Infof("updates: update complete")
@@ -212,16 +198,6 @@ func (r *Registry) CleanOldFiles() error {
return nil
}
func (r *Registry) makeSymlinkForUI() error {
portmasterBinPath := "/usr/bin/portmaster"
_ = os.Remove(portmasterBinPath)
err := os.Symlink(filepath.Join(r.dir, "portmaster"), portmasterBinPath)
if err != nil {
return fmt.Errorf("failed to create symlink: %w", err)
}
return nil
}
type File struct {
id string
path string