Restructure modules (#1572)

* Move portbase into monorepo

* Add new simple module mgr

* [WIP] Switch to new simple module mgr

* Add StateMgr and more worker variants

* [WIP] Switch more modules

* [WIP] Switch more modules

* [WIP] swtich more modules

* [WIP] switch all SPN modules

* [WIP] switch all service modules

* [WIP] Convert all workers to the new module system

* [WIP] add new task system to module manager

* [WIP] Add second take for scheduling workers

* [WIP] Add FIXME for bugs in new scheduler

* [WIP] Add minor improvements to scheduler

* [WIP] Add new worker scheduler

* [WIP] Fix more bug related to new module system

* [WIP] Fix start handing of the new module system

* [WIP] Improve startup process

* [WIP] Fix minor issues

* [WIP] Fix missing subsystem in settings

* [WIP] Initialize managers in constructor

* [WIP] Move module event initialization to constrictors

* [WIP] Fix setting for enabling and disabling the SPN module

* [WIP] Move API registeration into module construction

* [WIP] Update states mgr for all modules

* [WIP] Add CmdLine operation support

* Add state helper methods to module group and instance

* Add notification and module status handling to status package

* Fix starting issues

* Remove pilot widget and update security lock to new status data

* Remove debug logs

* Improve http server shutdown

* Add workaround for cleanly shutting down firewall+netquery

* Improve logging

* Add syncing states with notifications for new module system

* Improve starting, stopping, shutdown; resolve FIXMEs/TODOs

* [WIP] Fix most unit tests

* Review new module system and fix minor issues

* Push shutdown and restart events again via API

* Set sleep mode via interface

* Update example/template module

* [WIP] Fix spn/cabin unit test

* Remove deprecated UI elements

* Make log output more similar for the logging transition phase

* Switch spn hub and observer cmds to new module system

* Fix log sources

* Make worker mgr less error prone

* Fix tests and minor issues

* Fix observation hub

* Improve shutdown and restart handling

* Split up big connection.go source file

* Move varint and dsd packages to structures repo

* Improve expansion test

* Fix linter warnings

* Fix interception module on windows

* Fix linter errors

---------

Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
This commit is contained in:
Daniel Hååvi
2024-08-09 17:15:48 +02:00
committed by GitHub
parent 10a77498f4
commit 80664d1a27
647 changed files with 37690 additions and 3366 deletions

368
base/container/container.go Normal file
View File

@@ -0,0 +1,368 @@
package container
import (
"errors"
"io"
"github.com/safing/structures/varint"
)
// Container is []byte sclie on steroids, allowing for quick data appending, prepending and fetching.
type Container struct {
compartments [][]byte
offset int
err error
}
// Data Handling
// NewContainer is DEPRECATED, please use New(), it's the same thing.
func NewContainer(data ...[]byte) *Container {
return &Container{
compartments: data,
}
}
// New creates a new container with an optional initial []byte slice. Data will NOT be copied.
func New(data ...[]byte) *Container {
return &Container{
compartments: data,
}
}
// Prepend prepends data. Data will NOT be copied.
func (c *Container) Prepend(data []byte) {
if c.offset < 1 {
c.renewCompartments()
}
c.offset--
c.compartments[c.offset] = data
}
// Append appends the given data. Data will NOT be copied.
func (c *Container) Append(data []byte) {
c.compartments = append(c.compartments, data)
}
// PrependNumber prepends a number (varint encoded).
func (c *Container) PrependNumber(n uint64) {
c.Prepend(varint.Pack64(n))
}
// AppendNumber appends a number (varint encoded).
func (c *Container) AppendNumber(n uint64) {
c.compartments = append(c.compartments, varint.Pack64(n))
}
// PrependInt prepends an int (varint encoded).
func (c *Container) PrependInt(n int) {
c.Prepend(varint.Pack64(uint64(n)))
}
// AppendInt appends an int (varint encoded).
func (c *Container) AppendInt(n int) {
c.compartments = append(c.compartments, varint.Pack64(uint64(n)))
}
// AppendAsBlock appends the length of the data and the data itself. Data will NOT be copied.
func (c *Container) AppendAsBlock(data []byte) {
c.AppendNumber(uint64(len(data)))
c.Append(data)
}
// PrependAsBlock prepends the length of the data and the data itself. Data will NOT be copied.
func (c *Container) PrependAsBlock(data []byte) {
c.Prepend(data)
c.PrependNumber(uint64(len(data)))
}
// AppendContainer appends another Container. Data will NOT be copied.
func (c *Container) AppendContainer(data *Container) {
c.compartments = append(c.compartments, data.compartments...)
}
// AppendContainerAsBlock appends another Container (length and data). Data will NOT be copied.
func (c *Container) AppendContainerAsBlock(data *Container) {
c.AppendNumber(uint64(data.Length()))
c.compartments = append(c.compartments, data.compartments...)
}
// HoldsData returns true if the Container holds any data.
func (c *Container) HoldsData() bool {
for i := c.offset; i < len(c.compartments); i++ {
if len(c.compartments[i]) > 0 {
return true
}
}
return false
}
// Length returns the full length of all bytes held by the container.
func (c *Container) Length() (length int) {
for i := c.offset; i < len(c.compartments); i++ {
length += len(c.compartments[i])
}
return
}
// Replace replaces all held data with a new data slice. Data will NOT be copied.
func (c *Container) Replace(data []byte) {
c.compartments = [][]byte{data}
}
// CompileData concatenates all bytes held by the container and returns it as one single []byte slice. Data will NOT be copied and is NOT consumed.
func (c *Container) CompileData() []byte {
if len(c.compartments) != 1 {
newBuf := make([]byte, c.Length())
copyBuf := newBuf
for i := c.offset; i < len(c.compartments); i++ {
copy(copyBuf, c.compartments[i])
copyBuf = copyBuf[len(c.compartments[i]):]
}
c.compartments = [][]byte{newBuf}
c.offset = 0
}
return c.compartments[0]
}
// Get returns the given amount of bytes. Data MAY be copied and IS consumed.
func (c *Container) Get(n int) ([]byte, error) {
buf := c.Peek(n)
if len(buf) < n {
return nil, errors.New("container: not enough data to return")
}
c.skip(len(buf))
return buf, nil
}
// GetAll returns all data. Data MAY be copied and IS consumed.
func (c *Container) GetAll() []byte {
// TODO: Improve.
buf := c.Peek(c.Length())
c.skip(len(buf))
return buf
}
// GetAsContainer returns the given amount of bytes in a new container. Data will NOT be copied and IS consumed.
func (c *Container) GetAsContainer(n int) (*Container, error) {
newC := c.PeekContainer(n)
if newC == nil {
return nil, errors.New("container: not enough data to return")
}
c.skip(n)
return newC, nil
}
// GetMax returns as much as possible, but the given amount of bytes at maximum. Data MAY be copied and IS consumed.
func (c *Container) GetMax(n int) []byte {
buf := c.Peek(n)
c.skip(len(buf))
return buf
}
// WriteToSlice copies data to the give slice until it is full, or the container is empty. It returns the bytes written and if the container is now empty. Data IS copied and IS consumed.
func (c *Container) WriteToSlice(slice []byte) (n int, containerEmptied bool) {
for i := c.offset; i < len(c.compartments); i++ {
copy(slice, c.compartments[i])
if len(slice) < len(c.compartments[i]) {
// only part was copied
n += len(slice)
c.compartments[i] = c.compartments[i][len(slice):]
c.checkOffset()
return n, false
}
// all was copied
n += len(c.compartments[i])
slice = slice[len(c.compartments[i]):]
c.compartments[i] = nil
c.offset = i + 1
}
c.checkOffset()
return n, true
}
// WriteAllTo writes all the data to the given io.Writer. Data IS NOT copied (but may be by writer) and IS NOT consumed.
func (c *Container) WriteAllTo(writer io.Writer) error {
for i := c.offset; i < len(c.compartments); i++ {
written := 0
for written < len(c.compartments[i]) {
n, err := writer.Write(c.compartments[i][written:])
if err != nil {
return err
}
written += n
}
}
return nil
}
func (c *Container) clean() {
if c.offset > 100 {
c.renewCompartments()
}
}
func (c *Container) renewCompartments() {
baseLength := len(c.compartments) - c.offset + 5
newCompartments := make([][]byte, baseLength, baseLength+5)
copy(newCompartments[5:], c.compartments[c.offset:])
c.compartments = newCompartments
c.offset = 4
}
func (c *Container) carbonCopy() *Container {
newC := &Container{
compartments: make([][]byte, len(c.compartments)),
offset: c.offset,
err: c.err,
}
copy(newC.compartments, c.compartments)
return newC
}
func (c *Container) checkOffset() {
if c.offset >= len(c.compartments) {
c.offset = len(c.compartments) / 2
}
}
// Block Handling
// PrependLength prepends the current full length of all bytes in the container.
func (c *Container) PrependLength() {
c.Prepend(varint.Pack64(uint64(c.Length())))
}
// Peek returns the given amount of bytes. Data MAY be copied and IS NOT consumed.
func (c *Container) Peek(n int) []byte {
// Check requested length.
if n <= 0 {
return nil
}
// Check if the first slice holds enough data.
if len(c.compartments[c.offset]) >= n {
return c.compartments[c.offset][:n]
}
// Start gathering data.
slice := make([]byte, n)
copySlice := slice
n = 0
for i := c.offset; i < len(c.compartments); i++ {
copy(copySlice, c.compartments[i])
if len(copySlice) <= len(c.compartments[i]) {
n += len(copySlice)
return slice[:n]
}
n += len(c.compartments[i])
copySlice = copySlice[len(c.compartments[i]):]
}
return slice[:n]
}
// PeekContainer returns the given amount of bytes in a new container. Data will NOT be copied and IS NOT consumed.
func (c *Container) PeekContainer(n int) (newC *Container) {
// Check requested length.
if n < 0 {
return nil
} else if n == 0 {
return &Container{}
}
newC = &Container{}
for i := c.offset; i < len(c.compartments); i++ {
if n >= len(c.compartments[i]) {
newC.compartments = append(newC.compartments, c.compartments[i])
n -= len(c.compartments[i])
} else {
newC.compartments = append(newC.compartments, c.compartments[i][:n])
n = 0
}
}
if n > 0 {
return nil
}
return newC
}
func (c *Container) skip(n int) {
for i := c.offset; i < len(c.compartments); i++ {
if len(c.compartments[i]) <= n {
n -= len(c.compartments[i])
c.offset = i + 1
c.compartments[i] = nil
if n == 0 {
c.checkOffset()
return
}
} else {
c.compartments[i] = c.compartments[i][n:]
c.checkOffset()
return
}
}
c.checkOffset()
}
// GetNextBlock returns the next block of data defined by a varint. Data MAY be copied and IS consumed.
func (c *Container) GetNextBlock() ([]byte, error) {
blockSize, err := c.GetNextN64()
if err != nil {
return nil, err
}
return c.Get(int(blockSize))
}
// GetNextBlockAsContainer returns the next block of data as a Container defined by a varint. Data will NOT be copied and IS consumed.
func (c *Container) GetNextBlockAsContainer() (*Container, error) {
blockSize, err := c.GetNextN64()
if err != nil {
return nil, err
}
return c.GetAsContainer(int(blockSize))
}
// GetNextN8 parses and returns a varint of type uint8.
func (c *Container) GetNextN8() (uint8, error) {
buf := c.Peek(2)
num, n, err := varint.Unpack8(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}
// GetNextN16 parses and returns a varint of type uint16.
func (c *Container) GetNextN16() (uint16, error) {
buf := c.Peek(3)
num, n, err := varint.Unpack16(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}
// GetNextN32 parses and returns a varint of type uint32.
func (c *Container) GetNextN32() (uint32, error) {
buf := c.Peek(5)
num, n, err := varint.Unpack32(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}
// GetNextN64 parses and returns a varint of type uint64.
func (c *Container) GetNextN64() (uint64, error) {
buf := c.Peek(10)
num, n, err := varint.Unpack64(buf)
if err != nil {
return 0, err
}
c.skip(n)
return num, nil
}

View File

@@ -0,0 +1,208 @@
package container
import (
"bytes"
"testing"
"github.com/safing/portmaster/base/utils"
)
var (
testData = []byte("The quick brown fox jumps over the lazy dog")
testDataSplitted = [][]byte{
[]byte("T"),
[]byte("he"),
[]byte(" qu"),
[]byte("ick "),
[]byte("brown"),
[]byte(" fox j"),
[]byte("umps ov"),
[]byte("er the l"),
[]byte("azy dog"),
}
)
func TestContainerDataHandling(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1c := c1.carbonCopy()
c2 := New()
for range len(testData) {
oneByte := make([]byte, 1)
c1c.WriteToSlice(oneByte)
c2.Append(oneByte)
}
c2c := c2.carbonCopy()
c3 := New()
for i := len(c2c.compartments) - 1; i >= c2c.offset; i-- {
c3.Prepend(c2c.compartments[i])
}
c3c := c3.carbonCopy()
d4 := make([]byte, len(testData)*2)
n, _ := c3c.WriteToSlice(d4)
d4 = d4[:n]
c3c = c3.carbonCopy()
d5 := make([]byte, len(testData))
for i := range len(testData) {
c3c.WriteToSlice(d5[i : i+1])
}
c6 := New()
c6.Replace(testData)
c7 := New(testDataSplitted[0])
for i := 1; i < len(testDataSplitted); i++ {
c7.Append(testDataSplitted[i])
}
c8 := New(testDataSplitted...)
for range 110 {
c8.Prepend(nil)
}
c8.clean()
c9 := c8.PeekContainer(len(testData))
c10 := c9.PeekContainer(len(testData) - 1)
c10.Append(testData[len(testData)-1:])
compareMany(t, testData, c1.CompileData(), c2.CompileData(), c3.CompileData(), d4, d5, c6.CompileData(), c7.CompileData(), c8.CompileData(), c9.CompileData(), c10.CompileData())
}
func compareMany(t *testing.T, reference []byte, other ...[]byte) {
t.Helper()
for i, cmp := range other {
if !bytes.Equal(reference, cmp) {
t.Errorf("sample %d does not match reference: sample is '%s'", i+1, string(cmp))
}
}
}
func TestDataFetching(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
data := c1.GetMax(1)
if string(data[0]) != "T" {
t.Errorf("failed to GetMax(1), got %s, expected %s", string(data), "T")
}
_, err := c1.Get(1000)
if err == nil {
t.Error("should fail")
}
_, err = c1.GetAsContainer(1000)
if err == nil {
t.Error("should fail")
}
}
func TestBlocks(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1.PrependLength()
n, err := c1.GetNextN8()
if err != nil {
t.Errorf("GetNextN8() failed: %s", err)
}
if n != 43 {
t.Errorf("n should be 43, was %d", n)
}
c1.PrependLength()
n2, err := c1.GetNextN16()
if err != nil {
t.Errorf("GetNextN16() failed: %s", err)
}
if n2 != 43 {
t.Errorf("n should be 43, was %d", n2)
}
c1.PrependLength()
n3, err := c1.GetNextN32()
if err != nil {
t.Errorf("GetNextN32() failed: %s", err)
}
if n3 != 43 {
t.Errorf("n should be 43, was %d", n3)
}
c1.PrependLength()
n4, err := c1.GetNextN64()
if err != nil {
t.Errorf("GetNextN64() failed: %s", err)
}
if n4 != 43 {
t.Errorf("n should be 43, was %d", n4)
}
}
func TestContainerBlockHandling(t *testing.T) {
t.Parallel()
c1 := New(utils.DuplicateBytes(testData))
c1.PrependLength()
c1.AppendAsBlock(testData)
c1c := c1.carbonCopy()
c2 := New(nil)
for range c1.Length() {
oneByte := make([]byte, 1)
c1c.WriteToSlice(oneByte)
c2.Append(oneByte)
}
c3 := New(testDataSplitted[0])
for i := 1; i < len(testDataSplitted); i++ {
c3.Append(testDataSplitted[i])
}
c3.PrependLength()
d1, err := c1.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d2, err := c1.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d3, err := c2.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d4, err := c2.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
d5, err := c3.GetNextBlock()
if err != nil {
t.Errorf("GetNextBlock failed: %s", err)
}
compareMany(t, testData, d1, d2, d3, d4, d5)
}
func TestContainerMisc(t *testing.T) {
t.Parallel()
c1 := New()
d1 := c1.CompileData()
if len(d1) > 0 {
t.Fatalf("empty container should not hold any data")
}
}
func TestDeprecated(t *testing.T) {
t.Parallel()
NewContainer(utils.DuplicateBytes(testData))
}

26
base/container/doc.go Normal file
View File

@@ -0,0 +1,26 @@
// Package container gives you a []byte slice on steroids, allowing for quick data appending, prepending and fetching as well as transparent error transportation.
//
// A Container is basically a [][]byte slice that just appends new []byte slices and only copies things around when necessary.
//
// Byte slices added to the Container are not changed or appended, to not corrupt any other data that may be before and after the given slice.
// If interested, consider the following example to understand why this is important:
//
// package main
//
// import (
// "fmt"
// )
//
// func main() {
// a := []byte{0, 1,2,3,4,5,6,7,8,9}
// fmt.Printf("a: %+v\n", a)
// fmt.Printf("\nmaking changes...\n(we are not changing a directly)\n\n")
// b := a[2:6]
// c := append(b, 10, 11)
// fmt.Printf("b: %+v\n", b)
// fmt.Printf("c: %+v\n", c)
// fmt.Printf("a: %+v\n", a)
// }
//
// run it here: https://play.golang.org/p/xu1BXT3QYeE
package container

View File

@@ -0,0 +1,21 @@
package container
import (
"encoding/json"
)
// MarshalJSON serializes the container as a JSON byte array.
func (c *Container) MarshalJSON() ([]byte, error) {
return json.Marshal(c.CompileData())
}
// UnmarshalJSON unserializes a container from a JSON byte array.
func (c *Container) UnmarshalJSON(data []byte) error {
var raw []byte
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
c.compartments = [][]byte{raw}
return nil
}