Files
dance-lessons-coach/pkg/user/postgres_repository_isolated_test.go
Gabriel Radureau 7a2b1a0a87 feat(user): NewPostgresRepositoryFromDSN factory + integration test (T12 stage 1/2)
First building block for parallel-safe BDD scenario isolation (T12 plan,
ADR-0025 follow-up). PR #28 had to revert BDD_SCHEMA_ISOLATION because
SetupScenarioSchema created an empty schema without migrations -- the
production server's repo never saw it. This PR adds the missing piece:
a factory that opens a *PostgresRepository connected via an arbitrary
DSN AND runs AutoMigrate against it, so a per-scenario schema actually
gets the users table.

Public API additions in pkg/user/postgres_repository.go:

- NewPostgresRepositoryFromDSN(cfg, dsn) (*PostgresRepository, error)
  Opens the repo from an explicit DSN (overrides cfg's host/port/etc),
  runs AutoMigrate -- creates tables in whatever schema the DSN's
  search_path points to.

- BuildSchemaIsolatedDSN(cfg, schemaName) string
  Builds a DSN with `search_path=<schemaName>` from a base config.

The existing NewPostgresRepository(cfg) is unchanged. Existing Close()
method is reused.

Integration test in postgres_repository_isolated_test.go proves:
- AutoMigrate creates `users` table in the per-scenario schema (not public)
- A CreateUser through the isolated repo writes into the per-scenario schema
- public.users sees ZERO rows for the test username
- The per-scenario schema users table sees exactly 1 row

Test skips gracefully when DLC_DATABASE_HOST is not set.

Out of scope (T12 stage 2/2 next):
- Wiring this factory into pkg/bdd/testserver/SetupScenarioSchema
- Spawning a fresh server.Server per scenario (requires NewServerWithUserRepo)
- Removing -p 1 from scripts/run-bdd-tests.sh after parallel safety is achieved

Per code-reviewer skill SOLID/DDD section :
- SRP : factory has single responsibility (open + migrate, no business logic)
- OCP : the new factory extends the package without changing existing callers
- Cognitive load : 1 file, 50 lines added, 1 dedicated test file

🤖 Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
2026-05-03 18:03:08 +02:00

119 lines
4.0 KiB
Go

package user
import (
"context"
"fmt"
"os"
"testing"
"dance-lessons-coach/pkg/config"
_ "github.com/lib/pq"
)
// TestNewPostgresRepositoryFromDSN_SchemaIsolation verifies that the factory
// + BuildSchemaIsolatedDSN combo produces a repository whose AutoMigrate
// creates the users table inside a per-scenario schema (NOT public).
//
// This is the foundation block for parallel-safe BDD tests (T12).
// Wiring it into the BDD testserver's SetupScenarioSchema is a follow-up.
//
// Skipped if Postgres is not available (no env vars / connection refused).
func TestNewPostgresRepositoryFromDSN_SchemaIsolation(t *testing.T) {
host := os.Getenv("DLC_DATABASE_HOST")
if host == "" {
t.Skip("DLC_DATABASE_HOST not set, skipping integration test")
}
cfg := &config.Config{}
cfg.Database.Host = host
cfg.Database.Port = parsePortOrDefault(os.Getenv("DLC_DATABASE_PORT"), 5432)
cfg.Database.User = envOr("DLC_DATABASE_USER", "postgres")
cfg.Database.Password = envOr("DLC_DATABASE_PASSWORD", "postgres")
cfg.Database.Name = envOr("DLC_DATABASE_NAME", "dance_lessons_coach_bdd_test")
cfg.Database.SSLMode = envOr("DLC_DATABASE_SSL_MODE", "disable")
schemaName := "test_isolated_dsn_factory"
// Open default repo (public schema) just to manage the schema lifecycle
defaultRepo, err := NewPostgresRepository(cfg)
if err != nil {
t.Skipf("Postgres unavailable: %v", err)
}
defer defaultRepo.Close()
// Drop schema if it exists from a previous run
if err := defaultRepo.db.Exec(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)).Error; err != nil {
t.Fatalf("DROP SCHEMA setup failed: %v", err)
}
defer func() {
_ = defaultRepo.db.Exec(fmt.Sprintf("DROP SCHEMA IF EXISTS %s CASCADE", schemaName)).Error
}()
// CREATE SCHEMA
if err := defaultRepo.db.Exec(fmt.Sprintf("CREATE SCHEMA %s", schemaName)).Error; err != nil {
t.Fatalf("CREATE SCHEMA failed: %v", err)
}
// Now use the factory to open a repo whose connection has search_path=schemaName
dsn := BuildSchemaIsolatedDSN(cfg, schemaName)
isolatedRepo, err := NewPostgresRepositoryFromDSN(cfg, dsn)
if err != nil {
t.Fatalf("NewPostgresRepositoryFromDSN failed: %v", err)
}
defer isolatedRepo.Close()
// Verify the users table now exists in our schema (not just in public)
var count int64
q := fmt.Sprintf("SELECT count(*) FROM information_schema.tables WHERE table_schema='%s' AND table_name='users'", schemaName)
if err := isolatedRepo.db.Raw(q).Scan(&count).Error; err != nil {
t.Fatalf("information_schema query failed: %v", err)
}
if count != 1 {
t.Fatalf("expected users table in schema %s after AutoMigrate, count=%d", schemaName, count)
}
// Verify a CreateUser via the isolated repo writes into the new schema, NOT public
u := &User{Username: "isolated_factory_user", PasswordHash: "x"}
if err := isolatedRepo.CreateUser(context.Background(), u); err != nil {
t.Fatalf("CreateUser via isolated repo failed: %v", err)
}
var publicCount int64
if err := defaultRepo.db.Raw(fmt.Sprintf("SELECT count(*) FROM public.users WHERE username='%s'", u.Username)).Scan(&publicCount).Error; err != nil {
t.Fatalf("query public.users failed: %v", err)
}
if publicCount != 0 {
t.Fatalf("isolation leak: expected 0 rows in public.users for username=%s, got %d", u.Username, publicCount)
}
var schemaCount int64
if err := isolatedRepo.db.Raw(fmt.Sprintf("SELECT count(*) FROM %s.users WHERE username='%s'", schemaName, u.Username)).Scan(&schemaCount).Error; err != nil {
t.Fatalf("query schema users failed: %v", err)
}
if schemaCount != 1 {
t.Fatalf("expected 1 row in %s.users, got %d", schemaName, schemaCount)
}
}
// envOr returns the env var value or the fallback if empty.
func envOr(key, fallback string) string {
if v := os.Getenv(key); v != "" {
return v
}
return fallback
}
// parsePortOrDefault parses a port string or returns the fallback.
func parsePortOrDefault(s string, fallback int) int {
if s == "" {
return fallback
}
var n int
_, err := fmt.Sscanf(s, "%d", &n)
if err != nil || n <= 0 {
return fallback
}
return n
}