✨ feat(config): hot-reload Phase 1 — logging.level (ADR-0023) (#42)
Co-authored-by: Gabriel Radureau <arcodange@gmail.com> Co-committed-by: Gabriel Radureau <arcodange@gmail.com>
This commit was merged in pull request #42.
This commit is contained in:
@@ -1,11 +1,14 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/viper"
|
||||
@@ -29,6 +32,15 @@ type Config struct {
|
||||
Database DatabaseConfig `mapstructure:"database"`
|
||||
RateLimit RateLimitConfig `mapstructure:"rate_limit"`
|
||||
Cache CacheConfig `mapstructure:"cache"`
|
||||
|
||||
// viper is the underlying configuration source. Kept (unexported,
|
||||
// mapstructure:"-") so hot-reload can re-unmarshal on file changes —
|
||||
// see WatchAndApply (ADR-0023 selective hot-reload).
|
||||
viper *viper.Viper `mapstructure:"-"`
|
||||
|
||||
// reloadMu serialises Unmarshal during hot-reload so a partial mutation
|
||||
// can't be observed mid-flight by getter calls.
|
||||
reloadMu sync.RWMutex `mapstructure:"-"`
|
||||
}
|
||||
|
||||
// ServerConfig holds server-related configuration
|
||||
@@ -299,6 +311,9 @@ func LoadConfig() (*Config, error) {
|
||||
return nil, fmt.Errorf("config unmarshal error: %w", err)
|
||||
}
|
||||
|
||||
// Keep the viper instance for hot-reload (ADR-0023).
|
||||
config.viper = v
|
||||
|
||||
// Setup logging based on configuration (level, output file, time format).
|
||||
// The JSON/console format was already applied at the top of LoadConfig via
|
||||
// peekJSONLogging, so SetupLogging only needs to handle the remaining knobs.
|
||||
@@ -590,3 +605,53 @@ func (c *Config) setupLogOutput() {
|
||||
log.Logger = log.Output(file)
|
||||
log.Trace().Str("output", output).Msg("Logging to file")
|
||||
}
|
||||
|
||||
// WatchAndApply starts watching the config file for changes and applies the
|
||||
// hot-reloadable subset on every change (ADR-0023 selective hot-reload).
|
||||
//
|
||||
// Phase 1 (this PR) reloads:
|
||||
// - logging.level — re-applies via SetupLogging on every change.
|
||||
//
|
||||
// The other fields listed in ADR-0023 (api.v2_enabled, telemetry sampler,
|
||||
// auth.jwt.ttl) remain restart-only until their handlers land in a follow-up.
|
||||
//
|
||||
// Stops when ctx is cancelled. Safe to call once at server startup.
|
||||
// If the config file is absent (ConfigFileNotFoundError at load time), this
|
||||
// becomes a no-op and logs a single warning.
|
||||
func (c *Config) WatchAndApply(ctx context.Context) {
|
||||
if c.viper == nil {
|
||||
log.Warn().Msg("Config hot-reload disabled: no viper instance attached")
|
||||
return
|
||||
}
|
||||
if c.viper.ConfigFileUsed() == "" {
|
||||
log.Info().Msg("Config hot-reload disabled: no config file in use (env-only or defaults)")
|
||||
return
|
||||
}
|
||||
|
||||
c.viper.OnConfigChange(func(in fsnotify.Event) {
|
||||
log.Info().Str("event", in.Op.String()).Str("file", in.Name).Msg("Config file changed, reloading hot-reloadable fields")
|
||||
c.reloadMu.Lock()
|
||||
defer c.reloadMu.Unlock()
|
||||
|
||||
if err := c.viper.Unmarshal(c); err != nil {
|
||||
log.Error().Err(err).Msg("Hot-reload: failed to unmarshal new config, keeping previous values")
|
||||
return
|
||||
}
|
||||
|
||||
// Apply hot-reloadable fields. Order matters: logging first so the
|
||||
// rest of the reload is logged at the right level.
|
||||
c.SetupLogging()
|
||||
log.Info().Str("logging_level", c.GetLogLevel()).Msg("Hot-reload applied (logging.level)")
|
||||
})
|
||||
c.viper.WatchConfig()
|
||||
|
||||
log.Info().Str("file", c.viper.ConfigFileUsed()).Msg("Config hot-reload watcher started (ADR-0023 Phase 1)")
|
||||
|
||||
// Stop the watcher on context cancel — viper has no public Stop method,
|
||||
// so we just clear the callback to make further events no-ops.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
c.viper.OnConfigChange(func(_ fsnotify.Event) {})
|
||||
log.Info().Msg("Config hot-reload watcher stopped")
|
||||
}()
|
||||
}
|
||||
|
||||
83
pkg/config/config_hot_reload_test.go
Normal file
83
pkg/config/config_hot_reload_test.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/viper"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// loadFromFile is a helper that mimics LoadConfig() for a specific file path
|
||||
// without going through the env-prefix and singleton machinery — keeps the
|
||||
// test hermetic.
|
||||
func loadFromFile(t *testing.T, path string) *Config {
|
||||
t.Helper()
|
||||
v := viper.New()
|
||||
v.SetConfigFile(path)
|
||||
v.SetConfigType("yaml")
|
||||
v.SetDefault("logging.level", "info")
|
||||
require.NoError(t, v.ReadInConfig())
|
||||
|
||||
c := &Config{viper: v}
|
||||
require.NoError(t, v.Unmarshal(c))
|
||||
return c
|
||||
}
|
||||
|
||||
// TestWatchAndApply_LoggingLevel proves the hot-reload pipe end-to-end:
|
||||
// write a new logging.level to the watched file, the OnConfigChange handler
|
||||
// re-unmarshals, and the in-memory Config reflects the new value.
|
||||
func TestWatchAndApply_LoggingLevel(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
path := filepath.Join(dir, "config.yaml")
|
||||
require.NoError(t, os.WriteFile(path, []byte("logging:\n level: info\n"), 0644))
|
||||
|
||||
c := loadFromFile(t, path)
|
||||
assert.Equal(t, "info", c.GetLogLevel())
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
c.WatchAndApply(ctx)
|
||||
|
||||
// Mutate the file. fsnotify needs a real write event; rewrite atomically.
|
||||
require.NoError(t, os.WriteFile(path, []byte("logging:\n level: debug\n"), 0644))
|
||||
|
||||
// Poll for up to 2s waiting for the in-memory level to flip.
|
||||
deadline := time.Now().Add(2 * time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
c.reloadMu.RLock()
|
||||
level := c.GetLogLevel()
|
||||
c.reloadMu.RUnlock()
|
||||
if level == "debug" {
|
||||
return
|
||||
}
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
}
|
||||
c.reloadMu.RLock()
|
||||
defer c.reloadMu.RUnlock()
|
||||
t.Fatalf("logging level did not hot-reload to debug: still %q", c.GetLogLevel())
|
||||
}
|
||||
|
||||
// TestWatchAndApply_NoFileNoOp confirms the watcher is a safe no-op when no
|
||||
// config file is in use (env-only / defaults) — important so production
|
||||
// containers without a mounted config.yaml don't crash.
|
||||
func TestWatchAndApply_NoFileNoOp(t *testing.T) {
|
||||
c := &Config{viper: viper.New()}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
c.WatchAndApply(ctx) // should return without panicking
|
||||
}
|
||||
|
||||
// TestWatchAndApply_NilViperNoOp confirms the watcher tolerates a Config
|
||||
// constructed without the viper field (e.g. tests that build a Config{}
|
||||
// manually — same defensive code path as production but exercised explicitly).
|
||||
func TestWatchAndApply_NilViperNoOp(t *testing.T) {
|
||||
c := &Config{}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
c.WatchAndApply(ctx)
|
||||
}
|
||||
Reference in New Issue
Block a user