Refactor server architecture: move internal logic to pkg/server package
- Simplify cmd/server/main.go from 158 to 57 lines (64% reduction) - Move HTTP server creation, graceful shutdown, and context management to pkg/server - Add Run() method to encapsulate server lifecycle management - Maintain all existing functionality and OpenTelemetry integration - Improve separation of concerns and code organization This refactoring makes cmd/server/main.go a thin entrypoint while moving all server implementation details to the pkg/server package, following Go best practices for package organization. Add server and greet binaries to .gitignore
This commit is contained in:
@@ -2,19 +2,12 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"DanceLessonsCoach/pkg/config"
|
||||
"DanceLessonsCoach/pkg/server"
|
||||
"DanceLessonsCoach/pkg/telemetry"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -52,107 +45,13 @@ func main() {
|
||||
|
||||
log.Info().Bool("json_logging", cfg.Logging.JSON).Msg("Logging configured")
|
||||
|
||||
// Initialize OpenTelemetry if enabled
|
||||
var tracerProvider *sdktrace.TracerProvider
|
||||
if cfg.GetTelemetryEnabled() {
|
||||
log.Info().Msg("Initializing OpenTelemetry tracing")
|
||||
|
||||
telemetrySetup := &telemetry.Setup{
|
||||
ServiceName: cfg.GetServiceName(),
|
||||
OTLPEndpoint: cfg.GetOTLPEndpoint(),
|
||||
Insecure: cfg.GetTelemetryInsecure(),
|
||||
SamplerType: cfg.GetSamplerType(),
|
||||
SamplerRatio: cfg.GetSamplerRatio(),
|
||||
}
|
||||
|
||||
var err error
|
||||
if tracerProvider, err = telemetrySetup.InitializeTracing(context.Background()); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to initialize OpenTelemetry, continuing without tracing")
|
||||
} else {
|
||||
log.Info().Msg("OpenTelemetry tracing initialized successfully")
|
||||
}
|
||||
}
|
||||
|
||||
// Setup signal context for graceful shutdown
|
||||
rootCtx, stop := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
|
||||
defer stop()
|
||||
|
||||
// Create root context with cancellation
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Create ongoing context for active requests
|
||||
ongoingCtx, stopOngoingGracefully := context.WithCancel(context.Background())
|
||||
|
||||
// Create readiness context to control readiness state
|
||||
readyCtx, readyCancel := context.WithCancel(context.Background())
|
||||
defer readyCancel()
|
||||
|
||||
// Start server in goroutine
|
||||
// Create and run server
|
||||
server := server.NewServer(cfg, readyCtx)
|
||||
serverCtx, serverStop := context.WithCancel(ctx)
|
||||
|
||||
go func() {
|
||||
log.Info().Str("address", cfg.GetServerAddress()).Msg("Server running")
|
||||
|
||||
srv := &http.Server{
|
||||
Addr: cfg.GetServerAddress(),
|
||||
Handler: server.Router(),
|
||||
BaseContext: func(_ net.Listener) context.Context {
|
||||
return ongoingCtx
|
||||
},
|
||||
}
|
||||
|
||||
// Start the HTTP server in a separate goroutine
|
||||
go func() {
|
||||
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
|
||||
log.Error().Err(err).Msg("Server error")
|
||||
}
|
||||
}()
|
||||
|
||||
// Wait for signal
|
||||
<-rootCtx.Done()
|
||||
stop()
|
||||
log.Info().Msg("Shutdown signal received")
|
||||
|
||||
// Cancel readiness context to stop accepting new requests
|
||||
readyCancel()
|
||||
log.Info().Msg("Readiness set to false, no longer accepting new requests")
|
||||
|
||||
// Give time for readiness check to propagate (simplified for our case)
|
||||
time.Sleep(1 * time.Second)
|
||||
log.Info().Msg("Readiness check propagated, now waiting for ongoing requests to finish.")
|
||||
|
||||
// Create shutdown context with timeout from config
|
||||
shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), cfg.Shutdown.Timeout)
|
||||
defer shutdownCancel()
|
||||
|
||||
if err := srv.Shutdown(shutdownCtx); err != nil {
|
||||
log.Error().Err(err).Msg("Server shutdown failed")
|
||||
} else {
|
||||
log.Info().Msg("Server shutdown complete")
|
||||
}
|
||||
|
||||
// Stop ongoing requests context
|
||||
stopOngoingGracefully()
|
||||
cancel()
|
||||
serverStop()
|
||||
log.Info().Msg("Server exited")
|
||||
|
||||
// Shutdown OpenTelemetry tracer provider
|
||||
if tracerProvider != nil {
|
||||
if err := telemetry.Shutdown(context.Background(), tracerProvider); err != nil {
|
||||
log.Error().Err(err).Msg("Failed to shutdown OpenTelemetry tracer provider")
|
||||
} else {
|
||||
log.Info().Msg("OpenTelemetry tracer provider shutdown complete")
|
||||
}
|
||||
}
|
||||
|
||||
// Force log flush by writing to stderr directly
|
||||
// This ensures logs are written before process exits
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}()
|
||||
|
||||
// Wait for shutdown
|
||||
<-serverCtx.Done()
|
||||
if err := server.Run(); err != nil {
|
||||
log.Fatal().Err(err).Msg("Server failed")
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user