Restructure modules (#1572)

* Move portbase into monorepo

* Add new simple module mgr

* [WIP] Switch to new simple module mgr

* Add StateMgr and more worker variants

* [WIP] Switch more modules

* [WIP] Switch more modules

* [WIP] swtich more modules

* [WIP] switch all SPN modules

* [WIP] switch all service modules

* [WIP] Convert all workers to the new module system

* [WIP] add new task system to module manager

* [WIP] Add second take for scheduling workers

* [WIP] Add FIXME for bugs in new scheduler

* [WIP] Add minor improvements to scheduler

* [WIP] Add new worker scheduler

* [WIP] Fix more bug related to new module system

* [WIP] Fix start handing of the new module system

* [WIP] Improve startup process

* [WIP] Fix minor issues

* [WIP] Fix missing subsystem in settings

* [WIP] Initialize managers in constructor

* [WIP] Move module event initialization to constrictors

* [WIP] Fix setting for enabling and disabling the SPN module

* [WIP] Move API registeration into module construction

* [WIP] Update states mgr for all modules

* [WIP] Add CmdLine operation support

* Add state helper methods to module group and instance

* Add notification and module status handling to status package

* Fix starting issues

* Remove pilot widget and update security lock to new status data

* Remove debug logs

* Improve http server shutdown

* Add workaround for cleanly shutting down firewall+netquery

* Improve logging

* Add syncing states with notifications for new module system

* Improve starting, stopping, shutdown; resolve FIXMEs/TODOs

* [WIP] Fix most unit tests

* Review new module system and fix minor issues

* Push shutdown and restart events again via API

* Set sleep mode via interface

* Update example/template module

* [WIP] Fix spn/cabin unit test

* Remove deprecated UI elements

* Make log output more similar for the logging transition phase

* Switch spn hub and observer cmds to new module system

* Fix log sources

* Make worker mgr less error prone

* Fix tests and minor issues

* Fix observation hub

* Improve shutdown and restart handling

* Split up big connection.go source file

* Move varint and dsd packages to structures repo

* Improve expansion test

* Fix linter warnings

* Fix interception module on windows

* Fix linter errors

---------

Co-authored-by: Vladimir Stoilov <vladimir@safing.io>
This commit is contained in:
Daniel Hååvi
2024-08-09 17:15:48 +02:00
committed by GitHub
parent 10a77498f4
commit 80664d1a27
647 changed files with 37690 additions and 3366 deletions

13
base/log/flags.go Normal file
View File

@@ -0,0 +1,13 @@
package log
import "flag"
var (
logLevelFlag string
pkgLogLevelsFlag string
)
func init() {
flag.StringVar(&logLevelFlag, "log", "", "set log level to [trace|debug|info|warning|error|critical]")
flag.StringVar(&pkgLogLevelsFlag, "plog", "", "set log level of packages: database=trace,notifications=debug")
}

158
base/log/formatting.go Normal file
View File

@@ -0,0 +1,158 @@
package log
import (
"fmt"
"time"
)
var counter uint16
const (
maxCount uint16 = 999
timeFormat string = "2006-01-02 15:04:05.000"
)
func (s Severity) String() string {
switch s {
case TraceLevel:
return "TRC"
case DebugLevel:
return "DBG"
case InfoLevel:
return "INF"
case WarningLevel:
return "WRN"
case ErrorLevel:
return "ERR"
case CriticalLevel:
return "CRT"
default:
return "NON"
}
}
func formatLine(line *logLine, duplicates uint64, useColor bool) string {
var colorStart, colorEnd, colorDim, colorEndDim string
if useColor {
colorStart = line.level.color()
colorEnd = endColor()
colorDim = dimColor()
colorEndDim = endDimColor()
}
counter++
var fLine string
if line.line == 0 {
fLine = fmt.Sprintf(
"%s%s%s %s%s%s %s? %s %03d%s%s %s",
colorDim,
line.timestamp.Format(timeFormat),
colorEndDim,
colorStart,
line.level.String(),
colorEnd,
colorDim,
rightArrow,
counter,
formatDuplicates(duplicates),
colorEndDim,
line.msg,
)
} else {
fLen := len(line.file)
fPartStart := fLen - 10
if fPartStart < 0 {
fPartStart = 0
}
fLine = fmt.Sprintf(
"%s%s%s %s%s%s %s%s:%03d %s %03d%s%s %s",
colorDim,
line.timestamp.Format(timeFormat),
colorEndDim,
colorStart,
line.level.String(),
colorEnd,
colorDim,
line.file[fPartStart:],
line.line,
rightArrow,
counter,
formatDuplicates(duplicates),
colorEndDim,
line.msg,
)
}
if line.tracer != nil {
// append full trace time
if len(line.tracer.logs) > 0 {
fLine += fmt.Sprintf(" Σ=%s", line.timestamp.Sub(line.tracer.logs[0].timestamp))
}
// append all trace actions
var d time.Duration
for i, action := range line.tracer.logs {
// set color
if useColor {
colorStart = action.level.color()
}
// set filename length
fLen := len(action.file)
fPartStart := fLen - 10
if fPartStart < 0 {
fPartStart = 0
}
// format
if i == len(line.tracer.logs)-1 { // last
d = line.timestamp.Sub(action.timestamp)
} else {
d = line.tracer.logs[i+1].timestamp.Sub(action.timestamp)
}
fLine += fmt.Sprintf(
"\n%s%23s%s %s%s%s %s%s:%03d %s%s %s",
colorDim,
d,
colorEndDim,
colorStart,
action.level.String(),
colorEnd,
colorDim,
action.file[fPartStart:],
action.line,
rightArrow,
colorEndDim,
action.msg,
)
}
}
if counter >= maxCount {
counter = 0
}
return fLine
}
func formatDuplicates(duplicates uint64) string {
if duplicates == 0 {
return ""
}
return fmt.Sprintf(" [%dx]", duplicates+1)
}

View File

@@ -0,0 +1,59 @@
//go:build !windows
package log
const (
rightArrow = "▶"
leftArrow = "◀"
)
const (
colorDim = "\033[2m"
colorEndDim = "\033[22m"
colorRed = "\033[91m"
colorYellow = "\033[93m"
colorBlue = "\033[34m"
colorMagenta = "\033[35m"
colorCyan = "\033[36m"
colorGreen = "\033[92m"
// Saved for later:
// colorBlack = "\033[30m" //.
// colorGreen = "\033[32m" //.
// colorWhite = "\033[37m" //.
)
func (s Severity) color() string {
switch s {
case DebugLevel:
return colorCyan
case InfoLevel:
return colorGreen
case WarningLevel:
return colorYellow
case ErrorLevel:
return colorRed
case CriticalLevel:
return colorMagenta
case TraceLevel:
return ""
default:
return ""
}
}
func endColor() string {
return "\033[0m"
}
func blueColor() string {
return colorBlue
}
func dimColor() string {
return colorDim
}
func endDimColor() string {
return colorEndDim
}

View File

@@ -0,0 +1,81 @@
package log
import (
"github.com/safing/portmaster/base/utils/osdetail"
)
const (
rightArrow = ">"
leftArrow = "<"
)
const (
colorDim = "\033[2m"
colorEndDim = "\033[22m"
colorRed = "\033[91m"
colorYellow = "\033[93m"
colorBlue = "\033[34m"
colorMagenta = "\033[35m"
colorCyan = "\033[36m"
colorGreen = "\033[92m"
// colorBlack = "\033[30m"
// colorGreen = "\033[32m"
// colorWhite = "\033[37m"
)
var (
colorsSupported bool
)
func init() {
colorsSupported = osdetail.EnableColorSupport()
}
func (s Severity) color() string {
if colorsSupported {
switch s {
case DebugLevel:
return colorCyan
case InfoLevel:
return colorGreen
case WarningLevel:
return colorYellow
case ErrorLevel:
return colorRed
case CriticalLevel:
return colorMagenta
default:
return ""
}
}
return ""
}
func endColor() string {
if colorsSupported {
return "\033[0m"
}
return ""
}
func blueColor() string {
if colorsSupported {
return colorBlue
}
return ""
}
func dimColor() string {
if colorsSupported {
return colorDim
}
return ""
}
func endDimColor() string {
if colorsSupported {
return colorEndDim
}
return ""
}

219
base/log/input.go Normal file
View File

@@ -0,0 +1,219 @@
package log
import (
"fmt"
"runtime"
"strings"
"sync/atomic"
"time"
)
var (
warnLogLines = new(uint64)
errLogLines = new(uint64)
critLogLines = new(uint64)
)
func log(level Severity, msg string, tracer *ContextTracer) {
if !started.IsSet() {
// a bit resource intense, but keeps logs before logging started.
// TODO: create option to disable logging
go func() {
<-startedSignal
log(level, msg, tracer)
}()
return
}
// get time
now := time.Now()
// get file and line
_, file, line, ok := runtime.Caller(2)
if !ok {
file = ""
line = 0
} else {
if len(file) > 3 {
file = file[:len(file)-3]
} else {
file = ""
}
}
// check if level is enabled for file or generally
if pkgLevelsActive.IsSet() {
pathSegments := strings.Split(file, "/")
if len(pathSegments) < 2 {
// file too short for package levels
return
}
pkgLevelsLock.Lock()
severity, ok := pkgLevels[pathSegments[len(pathSegments)-2]]
pkgLevelsLock.Unlock()
if ok {
if level < severity {
return
}
} else {
// no package level set, check against global level
if uint32(level) < atomic.LoadUint32(logLevel) {
return
}
}
} else if uint32(level) < atomic.LoadUint32(logLevel) {
// no package levels set, check against global level
return
}
// create log object
log := &logLine{
msg: msg,
tracer: tracer,
level: level,
timestamp: now,
file: file,
line: line,
}
// send log to processing
select {
case logBuffer <- log:
default:
forceEmptyingLoop:
// force empty buffer until we can send to it
for {
select {
case forceEmptyingOfBuffer <- struct{}{}:
case logBuffer <- log:
break forceEmptyingLoop
}
}
}
// wake up writer if necessary
if logsWaitingFlag.SetToIf(false, true) {
select {
case logsWaiting <- struct{}{}:
default:
}
}
}
func fastcheck(level Severity) bool {
if pkgLevelsActive.IsSet() {
return true
}
if uint32(level) >= atomic.LoadUint32(logLevel) {
return true
}
return false
}
// Trace is used to log tiny steps. Log traces to context if you can!
func Trace(msg string) {
if fastcheck(TraceLevel) {
log(TraceLevel, msg, nil)
}
}
// Tracef is used to log tiny steps. Log traces to context if you can!
func Tracef(format string, things ...interface{}) {
if fastcheck(TraceLevel) {
log(TraceLevel, fmt.Sprintf(format, things...), nil)
}
}
// Debug is used to log minor errors or unexpected events. These occurrences are usually not worth mentioning in itself, but they might hint at a bigger problem.
func Debug(msg string) {
if fastcheck(DebugLevel) {
log(DebugLevel, msg, nil)
}
}
// Debugf is used to log minor errors or unexpected events. These occurrences are usually not worth mentioning in itself, but they might hint at a bigger problem.
func Debugf(format string, things ...interface{}) {
if fastcheck(DebugLevel) {
log(DebugLevel, fmt.Sprintf(format, things...), nil)
}
}
// Info is used to log mildly significant events. Should be used to inform about somewhat bigger or user affecting events that happen.
func Info(msg string) {
if fastcheck(InfoLevel) {
log(InfoLevel, msg, nil)
}
}
// Infof is used to log mildly significant events. Should be used to inform about somewhat bigger or user affecting events that happen.
func Infof(format string, things ...interface{}) {
if fastcheck(InfoLevel) {
log(InfoLevel, fmt.Sprintf(format, things...), nil)
}
}
// Warning is used to log (potentially) bad events, but nothing broke (even a little) and there is no need to panic yet.
func Warning(msg string) {
atomic.AddUint64(warnLogLines, 1)
if fastcheck(WarningLevel) {
log(WarningLevel, msg, nil)
}
}
// Warningf is used to log (potentially) bad events, but nothing broke (even a little) and there is no need to panic yet.
func Warningf(format string, things ...interface{}) {
atomic.AddUint64(warnLogLines, 1)
if fastcheck(WarningLevel) {
log(WarningLevel, fmt.Sprintf(format, things...), nil)
}
}
// Error is used to log errors that break or impair functionality. The task/process may have to be aborted and tried again later. The system is still operational. Maybe User/Admin should be informed.
func Error(msg string) {
atomic.AddUint64(errLogLines, 1)
if fastcheck(ErrorLevel) {
log(ErrorLevel, msg, nil)
}
}
// Errorf is used to log errors that break or impair functionality. The task/process may have to be aborted and tried again later. The system is still operational.
func Errorf(format string, things ...interface{}) {
atomic.AddUint64(errLogLines, 1)
if fastcheck(ErrorLevel) {
log(ErrorLevel, fmt.Sprintf(format, things...), nil)
}
}
// Critical is used to log events that completely break the system. Operation cannot continue. User/Admin must be informed.
func Critical(msg string) {
atomic.AddUint64(critLogLines, 1)
if fastcheck(CriticalLevel) {
log(CriticalLevel, msg, nil)
}
}
// Criticalf is used to log events that completely break the system. Operation cannot continue. User/Admin must be informed.
func Criticalf(format string, things ...interface{}) {
atomic.AddUint64(critLogLines, 1)
if fastcheck(CriticalLevel) {
log(CriticalLevel, fmt.Sprintf(format, things...), nil)
}
}
// TotalWarningLogLines returns the total amount of warning log lines since
// start of the program.
func TotalWarningLogLines() uint64 {
return atomic.LoadUint64(warnLogLines)
}
// TotalErrorLogLines returns the total amount of error log lines since start
// of the program.
func TotalErrorLogLines() uint64 {
return atomic.LoadUint64(errLogLines)
}
// TotalCriticalLogLines returns the total amount of critical log lines since
// start of the program.
func TotalCriticalLogLines() uint64 {
return atomic.LoadUint64(critLogLines)
}

249
base/log/logging.go Normal file
View File

@@ -0,0 +1,249 @@
package log
import (
"fmt"
"os"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/tevino/abool"
)
// concept
/*
- Logging function:
- check if file-based levelling enabled
- if yes, check if level is active on this file
- check if level is active
- send data to backend via big buffered channel
- Backend:
- wait until there is time for writing logs
- write logs
- configurable if logged to folder (buffer + rollingFileAppender) and/or console
- console: log everything above INFO to stderr
- Channel overbuffering protection:
- if buffer is full, trigger write
- Anti-Importing-Loop:
- everything imports logging
- logging is configured by main module and is supplied access to configuration and taskmanager
*/
// Severity describes a log level.
type Severity uint32
// Message describes a log level message and is implemented
// by logLine.
type Message interface {
Text() string
Severity() Severity
Time() time.Time
File() string
LineNumber() int
}
type logLine struct {
msg string
tracer *ContextTracer
level Severity
timestamp time.Time
file string
line int
}
func (ll *logLine) Text() string {
return ll.msg
}
func (ll *logLine) Severity() Severity {
return ll.level
}
func (ll *logLine) Time() time.Time {
return ll.timestamp
}
func (ll *logLine) File() string {
return ll.file
}
func (ll *logLine) LineNumber() int {
return ll.line
}
func (ll *logLine) Equal(ol *logLine) bool {
switch {
case ll.msg != ol.msg:
return false
case ll.tracer != nil || ol.tracer != nil:
return false
case ll.file != ol.file:
return false
case ll.line != ol.line:
return false
case ll.level != ol.level:
return false
}
return true
}
// Log Levels.
const (
TraceLevel Severity = 1
DebugLevel Severity = 2
InfoLevel Severity = 3
WarningLevel Severity = 4
ErrorLevel Severity = 5
CriticalLevel Severity = 6
)
var (
logBuffer chan *logLine
forceEmptyingOfBuffer = make(chan struct{})
logLevelInt = uint32(InfoLevel)
logLevel = &logLevelInt
pkgLevelsActive = abool.NewBool(false)
pkgLevels = make(map[string]Severity)
pkgLevelsLock sync.Mutex
logsWaiting = make(chan struct{}, 1)
logsWaitingFlag = abool.NewBool(false)
shutdownFlag = abool.NewBool(false)
shutdownSignal = make(chan struct{})
shutdownWaitGroup sync.WaitGroup
initializing = abool.NewBool(false)
started = abool.NewBool(false)
startedSignal = make(chan struct{})
)
// SetPkgLevels sets individual log levels for packages. Only effective after Start().
func SetPkgLevels(levels map[string]Severity) {
pkgLevelsLock.Lock()
pkgLevels = levels
pkgLevelsLock.Unlock()
pkgLevelsActive.Set()
}
// UnSetPkgLevels removes all individual log levels for packages.
func UnSetPkgLevels() {
pkgLevelsActive.UnSet()
}
// GetLogLevel returns the current log level.
func GetLogLevel() Severity {
return Severity(atomic.LoadUint32(logLevel))
}
// SetLogLevel sets a new log level. Only effective after Start().
func SetLogLevel(level Severity) {
atomic.StoreUint32(logLevel, uint32(level))
// Setup slog here for the transition period.
setupSLog(level)
}
// Name returns the name of the log level.
func (s Severity) Name() string {
switch s {
case TraceLevel:
return "trace"
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarningLevel:
return "warning"
case ErrorLevel:
return "error"
case CriticalLevel:
return "critical"
default:
return "none"
}
}
// ParseLevel returns the level severity of a log level name.
func ParseLevel(level string) Severity {
switch strings.ToLower(level) {
case "trace":
return 1
case "debug":
return 2
case "info":
return 3
case "warning":
return 4
case "error":
return 5
case "critical":
return 6
}
return 0
}
// Start starts the logging system. Must be called in order to see logs.
func Start() (err error) {
if !initializing.SetToIf(false, true) {
return nil
}
logBuffer = make(chan *logLine, 1024)
if logLevelFlag != "" {
initialLogLevel := ParseLevel(logLevelFlag)
if initialLogLevel == 0 {
fmt.Fprintf(os.Stderr, "log warning: invalid log level \"%s\", falling back to level info\n", logLevelFlag)
initialLogLevel = InfoLevel
}
SetLogLevel(initialLogLevel)
} else {
// Setup slog here for the transition period.
setupSLog(GetLogLevel())
}
// get and set file loglevels
pkgLogLevels := pkgLogLevelsFlag
if len(pkgLogLevels) > 0 {
newPkgLevels := make(map[string]Severity)
for _, pair := range strings.Split(pkgLogLevels, ",") {
splitted := strings.Split(pair, "=")
if len(splitted) != 2 {
err = fmt.Errorf("log warning: invalid file log level \"%s\", ignoring", pair)
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
break
}
fileLevel := ParseLevel(splitted[1])
if fileLevel == 0 {
err = fmt.Errorf("log warning: invalid file log level \"%s\", ignoring", pair)
fmt.Fprintf(os.Stderr, "%s\n", err.Error())
break
}
newPkgLevels[splitted[0]] = fileLevel
}
SetPkgLevels(newPkgLevels)
}
if !schedulingEnabled {
close(writeTrigger)
}
startWriter()
started.Set()
close(startedSignal)
return err
}
// Shutdown writes remaining log lines and then stops the log system.
func Shutdown() {
if shutdownFlag.SetToIf(false, true) {
close(shutdownSignal)
}
shutdownWaitGroup.Wait()
}

64
base/log/logging_test.go Normal file
View File

@@ -0,0 +1,64 @@
package log
import (
"fmt"
"testing"
"time"
)
func init() {
err := Start()
if err != nil {
panic(fmt.Sprintf("start failed: %s", err))
}
}
func TestLogging(t *testing.T) {
t.Parallel()
// skip
if testing.Short() {
t.Skip()
}
// set levels (static random)
SetLogLevel(WarningLevel)
SetLogLevel(InfoLevel)
SetLogLevel(ErrorLevel)
SetLogLevel(DebugLevel)
SetLogLevel(CriticalLevel)
SetLogLevel(TraceLevel)
// log
Trace("Trace")
Debug("Debug")
Info("Info")
Warning("Warning")
Error("Error")
Critical("Critical")
// logf
Tracef("Trace %s", "f")
Debugf("Debug %s", "f")
Infof("Info %s", "f")
Warningf("Warning %s", "f")
Errorf("Error %s", "f")
Criticalf("Critical %s", "f")
// play with levels
SetLogLevel(CriticalLevel)
Warning("Warning")
SetLogLevel(TraceLevel)
// log invalid level
log(0xFF, "msg", nil)
// wait logs to be written
time.Sleep(1 * time.Millisecond)
// just for show
UnSetPkgLevels()
// do not really shut down, we may need logging for other tests
// ShutdownLogging()
}

311
base/log/output.go Normal file
View File

@@ -0,0 +1,311 @@
package log
import (
"fmt"
"os"
"runtime/debug"
"sync"
"time"
)
type (
// Adapter is used to write logs.
Adapter interface {
// Write is called for each log message.
Write(msg Message, duplicates uint64)
}
// AdapterFunc is a convenience type for implementing
// Adapter.
AdapterFunc func(msg Message, duplicates uint64)
// FormatFunc formats msg into a string.
FormatFunc func(msg Message, duplicates uint64) string
// SimpleFileAdapter implements Adapter and writes all
// messages to File.
SimpleFileAdapter struct {
Format FormatFunc
File *os.File
}
)
var (
// StdoutAdapter is a simple file adapter that writes
// all logs to os.Stdout using a predefined format.
StdoutAdapter = &SimpleFileAdapter{
File: os.Stdout,
Format: defaultColorFormater,
}
// StderrAdapter is a simple file adapter that writes
// all logs to os.Stdout using a predefined format.
StderrAdapter = &SimpleFileAdapter{
File: os.Stderr,
Format: defaultColorFormater,
}
)
var (
adapter Adapter = StdoutAdapter
schedulingEnabled = false
writeTrigger = make(chan struct{})
)
// SetAdapter configures the logging adapter to use.
// This must be called before the log package is initialized.
func SetAdapter(a Adapter) {
if initializing.IsSet() || a == nil {
return
}
adapter = a
}
// Write implements Adapter and calls fn.
func (fn AdapterFunc) Write(msg Message, duplicates uint64) {
fn(msg, duplicates)
}
// Write implements Adapter and writes msg the underlying file.
func (fileAdapter *SimpleFileAdapter) Write(msg Message, duplicates uint64) {
fmt.Fprintln(fileAdapter.File, fileAdapter.Format(msg, duplicates))
}
// EnableScheduling enables external scheduling of the logger. This will require to manually trigger writes via TriggerWrite whenevery logs should be written. Please note that full buffers will also trigger writing. Must be called before Start() to have an effect.
func EnableScheduling() {
if !initializing.IsSet() {
schedulingEnabled = true
}
}
// TriggerWriter triggers log output writing.
func TriggerWriter() {
if started.IsSet() && schedulingEnabled {
select {
case writeTrigger <- struct{}{}:
default:
}
}
}
// TriggerWriterChannel returns the channel to trigger log writing. Returned channel will close if EnableScheduling() is not called correctly.
func TriggerWriterChannel() chan struct{} {
return writeTrigger
}
func defaultColorFormater(line Message, duplicates uint64) string {
return formatLine(line.(*logLine), duplicates, true) //nolint:forcetypeassert // TODO: improve
}
func startWriter() {
fmt.Printf(
"%s%s%s %sBOF %s%s\n",
dimColor(),
time.Now().Format(timeFormat),
endDimColor(),
blueColor(),
rightArrow,
endColor(),
)
shutdownWaitGroup.Add(1)
go writerManager()
}
func writerManager() {
defer shutdownWaitGroup.Done()
for {
err := writer()
if err != nil {
Errorf("log: writer failed: %s", err)
} else {
return
}
}
}
// defer should be able to edit the err. So naked return is required.
// nolint:golint,nakedret
func writer() (err error) {
defer func() {
// recover from panic
panicVal := recover()
if panicVal != nil {
err = fmt.Errorf("%s", panicVal)
// write stack to stderr
fmt.Fprintf(
os.Stderr,
`===== Error Report =====
Message: %s
StackTrace:
%s
===== End of Report =====
`,
err,
string(debug.Stack()),
)
}
}()
var currentLine *logLine
var duplicates uint64
for {
// reset
currentLine = nil
duplicates = 0
// wait until logs need to be processed
select {
case <-logsWaiting: // normal process
logsWaitingFlag.UnSet()
case <-forceEmptyingOfBuffer: // log buffer is full!
case <-shutdownSignal: // shutting down
finalizeWriting()
return
}
// wait for timeslot to log
select {
case <-writeTrigger: // normal process
case <-forceEmptyingOfBuffer: // log buffer is full!
case <-shutdownSignal: // shutting down
finalizeWriting()
return
}
// write all the logs!
writeLoop:
for {
select {
case nextLine := <-logBuffer:
// first line we process, just assign to currentLine
if currentLine == nil {
currentLine = nextLine
continue writeLoop
}
// we now have currentLine and nextLine
// if currentLine and nextLine are equal, do not print, just increase counter and continue
if nextLine.Equal(currentLine) {
duplicates++
continue writeLoop
}
// if currentLine and line are _not_ equal, output currentLine
adapter.Write(currentLine, duplicates)
// add to unexpected logs
addUnexpectedLogs(currentLine)
// reset duplicate counter
duplicates = 0
// set new currentLine
currentLine = nextLine
default:
break writeLoop
}
}
// write final line
if currentLine != nil {
adapter.Write(currentLine, duplicates)
// add to unexpected logs
addUnexpectedLogs(currentLine)
}
// back down a little
select {
case <-time.After(10 * time.Millisecond):
case <-shutdownSignal:
finalizeWriting()
return
}
}
}
func finalizeWriting() {
for {
select {
case line := <-logBuffer:
adapter.Write(line, 0)
case <-time.After(10 * time.Millisecond):
fmt.Printf(
"%s%s%s %sEOF %s%s\n",
dimColor(),
time.Now().Format(timeFormat),
endDimColor(),
blueColor(),
leftArrow,
endColor(),
)
return
}
}
}
// Last Unexpected Logs
var (
lastUnexpectedLogs [10]string
lastUnexpectedLogsIndex int
lastUnexpectedLogsLock sync.Mutex
)
func addUnexpectedLogs(line *logLine) {
// Add main line.
if line.level >= WarningLevel {
addUnexpectedLogLine(line)
return
}
// Check for unexpected lines in the tracer.
if line.tracer != nil {
for _, traceLine := range line.tracer.logs {
if traceLine.level >= WarningLevel {
// Add full trace.
addUnexpectedLogLine(line)
return
}
}
}
}
func addUnexpectedLogLine(line *logLine) {
lastUnexpectedLogsLock.Lock()
defer lastUnexpectedLogsLock.Unlock()
// Format line and add to logs.
lastUnexpectedLogs[lastUnexpectedLogsIndex] = formatLine(line, 0, false)
// Increase index and wrap back to start.
lastUnexpectedLogsIndex = (lastUnexpectedLogsIndex + 1) % len(lastUnexpectedLogs)
}
// GetLastUnexpectedLogs returns the last 10 log lines of level Warning an up.
func GetLastUnexpectedLogs() []string {
lastUnexpectedLogsLock.Lock()
defer lastUnexpectedLogsLock.Unlock()
// Make a copy and return.
logsLen := len(lastUnexpectedLogs)
start := lastUnexpectedLogsIndex
logsCopy := make([]string, 0, logsLen)
// Loop from mid-to-mid.
for i := start; i < start+logsLen; i++ {
if lastUnexpectedLogs[i%logsLen] != "" {
logsCopy = append(logsCopy, lastUnexpectedLogs[i%logsLen])
}
}
return logsCopy
}

65
base/log/slog.go Normal file
View File

@@ -0,0 +1,65 @@
package log
import (
"log/slog"
"os"
"runtime"
"github.com/lmittmann/tint"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
func setupSLog(logLevel Severity) {
// Convert to slog level.
var level slog.Level
switch logLevel {
case TraceLevel:
level = slog.LevelDebug
case DebugLevel:
level = slog.LevelDebug
case InfoLevel:
level = slog.LevelInfo
case WarningLevel:
level = slog.LevelWarn
case ErrorLevel:
level = slog.LevelError
case CriticalLevel:
level = slog.LevelError
}
// Setup logging.
// Define output.
logOutput := os.Stdout
// Create handler depending on OS.
var logHandler slog.Handler
switch runtime.GOOS {
case "windows":
logHandler = tint.NewHandler(
colorable.NewColorable(logOutput),
&tint.Options{
AddSource: true,
Level: level,
TimeFormat: timeFormat,
},
)
case "linux":
logHandler = tint.NewHandler(logOutput, &tint.Options{
AddSource: true,
Level: level,
TimeFormat: timeFormat,
NoColor: !isatty.IsTerminal(logOutput.Fd()),
})
default:
logHandler = tint.NewHandler(os.Stdout, &tint.Options{
AddSource: true,
Level: level,
TimeFormat: timeFormat,
NoColor: true,
})
}
// Set as default logger.
slog.SetDefault(slog.New(logHandler))
slog.SetLogLoggerLevel(level)
}

280
base/log/trace.go Normal file
View File

@@ -0,0 +1,280 @@
package log
import (
"context"
"fmt"
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
)
// ContextTracerKey is the key used for the context key/value storage.
type ContextTracerKey struct{}
// ContextTracer is attached to a context in order bind logs to a context.
type ContextTracer struct {
sync.Mutex
logs []*logLine
}
var key = ContextTracerKey{}
// AddTracer adds a ContextTracer to the returned Context. Will return a nil ContextTracer if logging level is not set to trace. Will return a nil ContextTracer if one already exists. Will return a nil ContextTracer in case of an error. Will return a nil context if nil.
func AddTracer(ctx context.Context) (context.Context, *ContextTracer) {
if ctx != nil && fastcheck(TraceLevel) {
// check pkg levels
if pkgLevelsActive.IsSet() {
// get file
_, file, _, ok := runtime.Caller(1)
if !ok {
// cannot get file, ignore
return ctx, nil
}
pathSegments := strings.Split(file, "/")
if len(pathSegments) < 2 {
// file too short for package levels
return ctx, nil
}
pkgLevelsLock.Lock()
severity, ok := pkgLevels[pathSegments[len(pathSegments)-2]]
pkgLevelsLock.Unlock()
if ok {
// check against package level
if TraceLevel < severity {
return ctx, nil
}
} else {
// no package level set, check against global level
if uint32(TraceLevel) < atomic.LoadUint32(logLevel) {
return ctx, nil
}
}
} else if uint32(TraceLevel) < atomic.LoadUint32(logLevel) {
// no package levels set, check against global level
return ctx, nil
}
// check for existing tracer
_, ok := ctx.Value(key).(*ContextTracer)
if !ok {
// add and return new tracer
tracer := &ContextTracer{}
return context.WithValue(ctx, key, tracer), tracer
}
}
return ctx, nil
}
// Tracer returns the ContextTracer previously added to the given Context.
func Tracer(ctx context.Context) *ContextTracer {
if ctx != nil {
tracer, ok := ctx.Value(key).(*ContextTracer)
if ok {
return tracer
}
}
return nil
}
// Submit collected logs on the context for further processing/outputting. Does nothing if called on a nil ContextTracer.
func (tracer *ContextTracer) Submit() {
if tracer == nil {
return
}
if !started.IsSet() {
// a bit resource intense, but keeps logs before logging started.
// TODO: create option to disable logging
go func() {
<-startedSignal
tracer.Submit()
}()
return
}
if len(tracer.logs) == 0 {
return
}
// extract last line as main line
mainLine := tracer.logs[len(tracer.logs)-1]
tracer.logs = tracer.logs[:len(tracer.logs)-1]
// create log object
log := &logLine{
msg: mainLine.msg,
tracer: tracer,
level: mainLine.level,
timestamp: mainLine.timestamp,
file: mainLine.file,
line: mainLine.line,
}
// send log to processing
select {
case logBuffer <- log:
default:
forceEmptyingLoop:
// force empty buffer until we can send to it
for {
select {
case forceEmptyingOfBuffer <- struct{}{}:
case logBuffer <- log:
break forceEmptyingLoop
}
}
}
// wake up writer if necessary
if logsWaitingFlag.SetToIf(false, true) {
logsWaiting <- struct{}{}
}
}
func (tracer *ContextTracer) log(level Severity, msg string) {
// get file and line
_, file, line, ok := runtime.Caller(2)
if !ok {
file = ""
line = 0
} else {
if len(file) > 3 {
file = file[:len(file)-3]
} else {
file = ""
}
}
tracer.Lock()
defer tracer.Unlock()
tracer.logs = append(tracer.logs, &logLine{
timestamp: time.Now(),
level: level,
msg: msg,
file: file,
line: line,
})
}
// Trace is used to log tiny steps. Log traces to context if you can!
func (tracer *ContextTracer) Trace(msg string) {
switch {
case tracer != nil:
tracer.log(TraceLevel, msg)
case fastcheck(TraceLevel):
log(TraceLevel, msg, nil)
}
}
// Tracef is used to log tiny steps. Log traces to context if you can!
func (tracer *ContextTracer) Tracef(format string, things ...interface{}) {
switch {
case tracer != nil:
tracer.log(TraceLevel, fmt.Sprintf(format, things...))
case fastcheck(TraceLevel):
log(TraceLevel, fmt.Sprintf(format, things...), nil)
}
}
// Debug is used to log minor errors or unexpected events. These occurrences are usually not worth mentioning in itself, but they might hint at a bigger problem.
func (tracer *ContextTracer) Debug(msg string) {
switch {
case tracer != nil:
tracer.log(DebugLevel, msg)
case fastcheck(DebugLevel):
log(DebugLevel, msg, nil)
}
}
// Debugf is used to log minor errors or unexpected events. These occurrences are usually not worth mentioning in itself, but they might hint at a bigger problem.
func (tracer *ContextTracer) Debugf(format string, things ...interface{}) {
switch {
case tracer != nil:
tracer.log(DebugLevel, fmt.Sprintf(format, things...))
case fastcheck(DebugLevel):
log(DebugLevel, fmt.Sprintf(format, things...), nil)
}
}
// Info is used to log mildly significant events. Should be used to inform about somewhat bigger or user affecting events that happen.
func (tracer *ContextTracer) Info(msg string) {
switch {
case tracer != nil:
tracer.log(InfoLevel, msg)
case fastcheck(InfoLevel):
log(InfoLevel, msg, nil)
}
}
// Infof is used to log mildly significant events. Should be used to inform about somewhat bigger or user affecting events that happen.
func (tracer *ContextTracer) Infof(format string, things ...interface{}) {
switch {
case tracer != nil:
tracer.log(InfoLevel, fmt.Sprintf(format, things...))
case fastcheck(InfoLevel):
log(InfoLevel, fmt.Sprintf(format, things...), nil)
}
}
// Warning is used to log (potentially) bad events, but nothing broke (even a little) and there is no need to panic yet.
func (tracer *ContextTracer) Warning(msg string) {
switch {
case tracer != nil:
tracer.log(WarningLevel, msg)
case fastcheck(WarningLevel):
log(WarningLevel, msg, nil)
}
}
// Warningf is used to log (potentially) bad events, but nothing broke (even a little) and there is no need to panic yet.
func (tracer *ContextTracer) Warningf(format string, things ...interface{}) {
switch {
case tracer != nil:
tracer.log(WarningLevel, fmt.Sprintf(format, things...))
case fastcheck(WarningLevel):
log(WarningLevel, fmt.Sprintf(format, things...), nil)
}
}
// Error is used to log errors that break or impair functionality. The task/process may have to be aborted and tried again later. The system is still operational. Maybe User/Admin should be informed.
func (tracer *ContextTracer) Error(msg string) {
switch {
case tracer != nil:
tracer.log(ErrorLevel, msg)
case fastcheck(ErrorLevel):
log(ErrorLevel, msg, nil)
}
}
// Errorf is used to log errors that break or impair functionality. The task/process may have to be aborted and tried again later. The system is still operational.
func (tracer *ContextTracer) Errorf(format string, things ...interface{}) {
switch {
case tracer != nil:
tracer.log(ErrorLevel, fmt.Sprintf(format, things...))
case fastcheck(ErrorLevel):
log(ErrorLevel, fmt.Sprintf(format, things...), nil)
}
}
// Critical is used to log events that completely break the system. Operation connot continue. User/Admin must be informed.
func (tracer *ContextTracer) Critical(msg string) {
switch {
case tracer != nil:
tracer.log(CriticalLevel, msg)
case fastcheck(CriticalLevel):
log(CriticalLevel, msg, nil)
}
}
// Criticalf is used to log events that completely break the system. Operation connot continue. User/Admin must be informed.
func (tracer *ContextTracer) Criticalf(format string, things ...interface{}) {
switch {
case tracer != nil:
tracer.log(CriticalLevel, fmt.Sprintf(format, things...))
case fastcheck(CriticalLevel):
log(CriticalLevel, fmt.Sprintf(format, things...), nil)
}
}

35
base/log/trace_test.go Normal file
View File

@@ -0,0 +1,35 @@
package log
import (
"context"
"testing"
"time"
)
func TestContextTracer(t *testing.T) {
t.Parallel()
// skip
if testing.Short() {
t.Skip()
}
ctx, tracer := AddTracer(context.Background())
_ = Tracer(ctx)
tracer.Trace("api: request received, checking security")
time.Sleep(1 * time.Millisecond)
tracer.Trace("login: logging in user")
time.Sleep(1 * time.Millisecond)
tracer.Trace("database: fetching requested resources")
time.Sleep(10 * time.Millisecond)
tracer.Warning("database: partial failure")
time.Sleep(10 * time.Microsecond)
tracer.Trace("renderer: rendering output")
time.Sleep(1 * time.Millisecond)
tracer.Trace("api: returning request")
tracer.Trace("api: completed request")
tracer.Submit()
time.Sleep(100 * time.Millisecond)
}