Compare commits
10 Commits
c17fb4f9b4
...
fix/exclud
| Author | SHA1 | Date | |
|---|---|---|---|
| 6b39d3c3c9 | |||
| 9b6c384eb2 | |||
| 0abc383bed | |||
| c939ba7786 | |||
| 358e3df38b | |||
| 54dd0cc80f | |||
| 9cf6e7f1c4 | |||
| 045823ec8e | |||
| 8503d0824e | |||
| a24b4fdb3b |
@@ -219,6 +219,10 @@ jobs:
|
||||
export DLC_DATABASE_PASSWORD=postgres
|
||||
export DLC_DATABASE_NAME=dance_lessons_coach_bdd_test
|
||||
export DLC_DATABASE_SSL_MODE=disable
|
||||
# Enable per-scenario schema isolation (ADR-0025) to prevent flaky AuthBDD failures.
|
||||
# Without this, scenarios share the public schema and pollute each other's state.
|
||||
# Observed flakiness: same code passes in #605, fails in #606 on TestAuthBDD/*.
|
||||
export BDD_SCHEMA_ISOLATION=true
|
||||
./scripts/run-bdd-tests.sh
|
||||
|
||||
# Generate BDD coverage report
|
||||
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -34,3 +34,13 @@ config/runner
|
||||
coverage.txt
|
||||
trigger.txt
|
||||
test_trigger.txt
|
||||
|
||||
# Frontend
|
||||
frontend/node_modules/
|
||||
frontend/.nuxt/
|
||||
frontend/.output/
|
||||
frontend/dist/
|
||||
frontend/.env
|
||||
frontend/.cache/
|
||||
frontend/test-results/
|
||||
frontend/playwright-report/
|
||||
|
||||
@@ -1,8 +1,11 @@
|
||||
# dance-lessons-coach
|
||||
|
||||
[](https://gitea.arcodange.fr/arcodange/dance-lessons-coach/actions/workflows/ci-cd.yaml)
|
||||
[](https://goreportcard.com/report/github.com/arcodange/dance-lessons-coach)
|
||||
[](https://gitea.arcodange.fr/arcodange/dance-lessons-coach/releases)
|
||||
[](LICENSE)
|
||||
[](https://gitea.arcodange.lab/arcodange/dance-lessons-coach)
|
||||
[](https://gitea.arcodange.lab/arcodange/dance-lessons-coach)
|
||||
|
||||
Go web service demonstrating idiomatic package structure, versioned JSON API, and production-ready features.
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Use Go 1.26.1 as the standard Go version
|
||||
|
||||
* Status: Accepted
|
||||
* Deciders: Gabriel Radureau, AI Agent
|
||||
* Date: 2026-04-01
|
||||
**Status:** Accepted
|
||||
**Authors:** Gabriel Radureau, AI Agent
|
||||
**Date:** 2026-04-01
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Use Chi router for HTTP routing
|
||||
|
||||
* Status: Accepted
|
||||
* Deciders: Gabriel Radureau, AI Agent
|
||||
* Date: 2026-04-02
|
||||
**Status:** Accepted
|
||||
**Authors:** Gabriel Radureau, AI Agent
|
||||
**Date:** 2026-04-02
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Use Zerolog for structured logging
|
||||
|
||||
* Status: Accepted
|
||||
* Deciders: Gabriel Radureau, AI Agent
|
||||
* Date: 2026-04-02
|
||||
**Status:** Accepted
|
||||
**Authors:** Gabriel Radureau, AI Agent
|
||||
**Date:** 2026-04-02
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Adopt interface-based design pattern
|
||||
|
||||
* Status: Accepted
|
||||
* Deciders: Gabriel Radureau, AI Agent
|
||||
* Date: 2026-04-02
|
||||
**Status:** Accepted
|
||||
**Authors:** Gabriel Radureau, AI Agent
|
||||
**Date:** 2026-04-02
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Implement graceful shutdown with readiness endpoints
|
||||
|
||||
* Status: Accepted
|
||||
* Deciders: Gabriel Radureau, AI Agent
|
||||
* Date: 2026-04-03
|
||||
**Status:** Accepted
|
||||
**Authors:** Gabriel Radureau, AI Agent
|
||||
**Date:** 2026-04-03
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Use Viper for configuration management
|
||||
|
||||
* Status: Accepted
|
||||
* Deciders: Gabriel Radureau, AI Agent
|
||||
* Date: 2026-04-03
|
||||
**Status:** Accepted
|
||||
**Authors:** Gabriel Radureau, AI Agent
|
||||
**Date:** 2026-04-03
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Integrate OpenTelemetry for distributed tracing
|
||||
|
||||
* Status: Accepted
|
||||
* Deciders: Gabriel Radureau, AI Agent
|
||||
* Date: 2026-04-04
|
||||
**Status:** Accepted
|
||||
**Authors:** Gabriel Radureau, AI Agent
|
||||
**Date:** 2026-04-04
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Adopt BDD with Godog for behavioral testing
|
||||
|
||||
* Status: Accepted
|
||||
* Deciders: Gabriel Radureau, AI Agent
|
||||
* Date: 2026-04-05
|
||||
**Status:** Accepted
|
||||
**Authors:** Gabriel Radureau, AI Agent
|
||||
**Date:** 2026-04-05
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
# Combine BDD and Swagger-based testing
|
||||
|
||||
* Status: ✅ Partially Implemented (BDD + Documentation only)
|
||||
* Deciders: Gabriel Radureau, AI Agent
|
||||
* Date: 2026-04-05
|
||||
* Last Updated: 2026-04-05
|
||||
* Implementation Status: BDD testing and OpenAPI documentation completed, SDK generation deferred
|
||||
**Status:** Partially Implemented (BDD + Documentation only)
|
||||
**Authors:** Gabriel Radureau, AI Agent
|
||||
**Date:** 2026-04-05
|
||||
**Last Updated:** 2026-04-05
|
||||
**Implementation Status:** BDD testing and OpenAPI documentation completed, SDK generation deferred
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# 13. OpenAPI/Swagger Toolchain Selection
|
||||
|
||||
**Date:** 2026-04-05
|
||||
**Status:** ✅ Partially Implemented (Documentation only)
|
||||
**Status:** Partially Implemented (Documentation only)
|
||||
**Authors:** Arcodange Team
|
||||
**Implementation Date:** 2026-04-05
|
||||
**Last Updated:** 2026-04-05
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# 15. CLI Subcommands and Flag Management with Cobra
|
||||
|
||||
**Date:** 2026-04-05
|
||||
**Status:** ✅ Implemented
|
||||
**Status:** Implemented
|
||||
**Authors:** Arcodange Team
|
||||
**Decision Date:** 2026-04-05
|
||||
**Implementation Status:** Phase 1 Complete
|
||||
@@ -222,7 +222,7 @@ dance-lessons-coach config validate
|
||||
|
||||
---
|
||||
|
||||
**Status:** Proposed
|
||||
**Status:** Proposed
|
||||
**Next Review:** 2026-04-12
|
||||
**Implementation Owner:** Arcodange Team
|
||||
**Approvers Needed:** @gabrielradureau
|
||||
@@ -1,10 +1,10 @@
|
||||
# 16. CI/CD Pipeline Design for Multi-Platform Compatibility
|
||||
|
||||
**Date:** 2026-04-05
|
||||
**Status:** ✅ Accepted
|
||||
**Status:** Accepted
|
||||
**Authors:** Arcodange Team
|
||||
**Decision Date:** 2026-04-08
|
||||
**Implementation Status:** ✅ Completed
|
||||
**Implementation Status:** Completed
|
||||
|
||||
## Context
|
||||
|
||||
@@ -832,7 +832,7 @@ jobs:
|
||||
- ✅ **Coverage reporting**: Badges updating automatically
|
||||
- ✅ **Binary builds**: Scripts executing properly in container environment
|
||||
|
||||
**Status:** ✅ Accepted
|
||||
**Status:** Accepted
|
||||
**Implementation Date:** 2026-04-08
|
||||
**Implementation Owner:** Arcodange Team
|
||||
**Reviewers:** @gabrielradureau
|
||||
@@ -1,10 +1,10 @@
|
||||
# 17. Trunk-Based Development Workflow for CI/CD Safety
|
||||
|
||||
**Date:** 2026-04-05
|
||||
**Status:** 🟢 Approved
|
||||
**Status:** Approved
|
||||
**Authors:** Arcodange Team
|
||||
**Decision Date:** 2026-04-05
|
||||
**Implementation Status:** ✅ Implemented
|
||||
**Implementation Status:** Implemented
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# 18. User Management and Authentication System
|
||||
|
||||
**Date:** 2024-04-06
|
||||
**Status:** Proposed
|
||||
**Date:** 2026-04-06
|
||||
**Status:** Partially Implemented
|
||||
**Authors:** Product Owner
|
||||
**Decision Drivers:** Security, User Personalization, Admin Functionality
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
# 19. PostgreSQL Database Integration
|
||||
|
||||
**Date:** 2024-04-07
|
||||
**Status:** Proposed
|
||||
**Date:** 2026-04-07
|
||||
**Status:** Partially Implemented
|
||||
**Authors:** Product Owner
|
||||
**Decision Drivers:** Data Persistence, Scalability, Production Readiness
|
||||
|
||||
@@ -359,8 +359,6 @@ The PostgreSQL integration follows established dance-lessons-coach patterns:
|
||||
2. **Configuration Updates:** New database configuration structure
|
||||
3. **Development Workflow:** Docker-based database for local development
|
||||
|
||||
|
||||
|
||||
## Alternatives Considered
|
||||
|
||||
### Alternative 1: Keep SQLite with File Persistence
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# ADR 0020: Docker Build Strategy - Traditional vs Buildx
|
||||
|
||||
## Status
|
||||
**Accepted** ✅
|
||||
**Status:** Accepted
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# 10. JWT Secret Retention Policy
|
||||
|
||||
## Status
|
||||
**Proposed** 🟡
|
||||
**Status:** Proposed
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# ADR 0022: Rate Limiting and Cache Strategy
|
||||
|
||||
## Status
|
||||
**Proposed** 🟡
|
||||
**Status:** Implemented (Phase 1) - Phase 2 still Proposed
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# Config Hot Reloading Strategy
|
||||
|
||||
* Status: Proposed
|
||||
* Deciders: Gabriel Radureau, AI Agent
|
||||
* Date: 2026-04-05
|
||||
**Status:** Proposed
|
||||
**Authors:** Gabriel Radureau, AI Agent
|
||||
**Date:** 2026-04-05
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# ADR 0024: BDD Test Organization and Isolation Strategy
|
||||
|
||||
## Status
|
||||
**Proposed** 🟡
|
||||
**Status:** Partially Implemented
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
# ADR 0025: BDD Scenario Isolation Strategies
|
||||
|
||||
## Status
|
||||
**Proposed** 🟡
|
||||
**Status:** Partially Implemented
|
||||
|
||||
## Context
|
||||
|
||||
|
||||
168
adr/README.md
168
adr/README.md
@@ -1,129 +1,113 @@
|
||||
# Architecture Decision Records (ADRs)
|
||||
|
||||
This directory contains Architecture Decision Records (ADRs) for the dance-lessons-coach project.
|
||||
This directory contains the Architecture Decision Records (ADRs) for the dance-lessons-coach project. Each ADR captures a structurally important decision, its context, and its consequences.
|
||||
|
||||
## Index of ADRs
|
||||
## Index
|
||||
|
||||
| Number | Title | Status |
|
||||
|--------|-------|--------|
|
||||
| 0001 | Go 1.26.1 Standard | ✅ Accepted |
|
||||
| 0002 | Chi Router | ✅ Accepted |
|
||||
| 0003 | Zerolog Logging | ✅ Accepted |
|
||||
| 0004 | Interface-Based Design | ✅ Accepted |
|
||||
| 0005 | Graceful Shutdown | ✅ Accepted |
|
||||
| 0006 | Configuration Management | ✅ Accepted |
|
||||
| 0007 | OpenTelemetry Integration | ✅ Accepted |
|
||||
| 0008 | BDD Testing | ✅ Accepted |
|
||||
| 0009 | Hybrid Testing Approach | ✅ Accepted |
|
||||
| 0010 | CI/CD Pipeline Design | ✅ Accepted |
|
||||
| 0011 | Trunk-Based Development | ✅ Accepted |
|
||||
| 0012 | Commit Message Conventions | ✅ Accepted |
|
||||
| 0013 | Version Management Lifecycle | ✅ Accepted |
|
||||
| 0014 | Swagger Documentation | ✅ Accepted |
|
||||
| 0015 | Rate Limiting Strategy | ✅ Accepted |
|
||||
| 0016 | Cache Invalidation Strategy | ✅ Accepted |
|
||||
| 0017 | JWT Secret Rotation | ✅ Accepted |
|
||||
| 0018 | Configuration Hot Reloading | ✅ Accepted |
|
||||
| 0019 | BDD Feature Structure | ✅ Accepted |
|
||||
| 0020 | Database Migration Strategy | ✅ Accepted |
|
||||
| 0021 | API Versioning Strategy | ✅ Accepted |
|
||||
| 0022 | Rate Limiting and Cache Strategy | ✅ Accepted |
|
||||
| 0023 | Config Hot Reloading | 🟡 Proposed |
|
||||
| 0024 | BDD Test Organization and Isolation | 🟡 Proposed |
|
||||
| 0025 | BDD Scenario Isolation Strategies | 🟡 Proposed |
|
||||
| ADR | Title | Status |
|
||||
|-----|-------|--------|
|
||||
| [0001](0001-go-1.26.1-standard.md) | Use Go 1.26.1 as the standard Go version | Accepted |
|
||||
| [0002](0002-chi-router.md) | Use Chi router for HTTP routing | Accepted |
|
||||
| [0003](0003-zerolog-logging.md) | Use Zerolog for structured logging | Accepted |
|
||||
| [0004](0004-interface-based-design.md) | Adopt interface-based design pattern | Accepted |
|
||||
| [0005](0005-graceful-shutdown.md) | Implement graceful shutdown with readiness endpoints | Accepted |
|
||||
| [0006](0006-configuration-management.md) | Use Viper for configuration management | Accepted |
|
||||
| [0007](0007-opentelemetry-integration.md) | Integrate OpenTelemetry for distributed tracing | Accepted |
|
||||
| [0008](0008-bdd-testing.md) | Adopt BDD with Godog for behavioral testing | Accepted |
|
||||
| [0009](0009-hybrid-testing-approach.md) | Combine BDD and Swagger-based testing | Partially Implemented |
|
||||
| [0010](0010-api-v2-feature-flag.md) | API v2 Feature Flag Implementation | Accepted |
|
||||
| [0012](0012-git-hooks-staged-only-formatting.md) | Git Hooks: Staged-Only Formatting | Accepted |
|
||||
| [0013](0013-openapi-swagger-toolchain.md) | OpenAPI/Swagger Toolchain Selection | Partially Implemented |
|
||||
| [0015](0015-cli-subcommands-cobra.md) | CLI Subcommands and Flag Management with Cobra | Implemented |
|
||||
| [0016](0016-ci-cd-pipeline-design.md) | CI/CD Pipeline Design for Multi-Platform Compatibility | Accepted |
|
||||
| [0017](0017-trunk-based-development-workflow.md) | Trunk-Based Development Workflow for CI/CD Safety | Approved |
|
||||
| [0018](0018-user-management-auth-system.md) | User Management and Authentication System | Proposed |
|
||||
| [0019](0019-postgresql-integration.md) | PostgreSQL Database Integration | Proposed |
|
||||
| [0020](0020-docker-build-strategy.md) | Docker Build Strategy: Traditional vs Buildx | Accepted |
|
||||
| [0021](0021-jwt-secret-retention-policy.md) | JWT Secret Retention Policy | Proposed |
|
||||
| [0022](0022-rate-limiting-cache-strategy.md) | Rate Limiting and Cache Strategy | Proposed |
|
||||
| [0023](0023-config-hot-reloading.md) | Config Hot Reloading Strategy | Proposed |
|
||||
| [0024](0024-bdd-test-organization-and-isolation.md) | BDD Test Organization and Isolation Strategy | Proposed |
|
||||
| [0025](0025-bdd-scenario-isolation-strategies.md) | BDD Scenario Isolation Strategies | Proposed |
|
||||
|
||||
> **Note** : numbers `0011` and `0014` are not currently in use. Reserved for future ADRs or representing previously deleted entries.
|
||||
|
||||
## What is an ADR?
|
||||
|
||||
An ADR is a document that captures an important architectural decision made along with its context and consequences.
|
||||
An ADR is a document capturing one significant architectural decision: the **context** that motivated it, the **decision** itself, and its **consequences**. ADRs are append-only — once published, an ADR is not edited (except for typo / status updates). New decisions that supersede previous ones are recorded as new ADRs that explicitly link back.
|
||||
|
||||
## Format
|
||||
## Canonical Format
|
||||
|
||||
Each ADR follows this structure:
|
||||
All ADRs follow the canonical format below (homogenized 2026-05-03):
|
||||
|
||||
```markdown
|
||||
# [Short title is a few words]
|
||||
# NN. Short title summarising the decision
|
||||
|
||||
* Status: [Proposed | Accepted | Deprecated | Superseded]
|
||||
* Deciders: [List of decision makers]
|
||||
* Date: [YYYY-MM-DD]
|
||||
**Status:** <Proposed | Accepted | Implemented | Partially Implemented | Approved | Rejected | Deferred | Deprecated | Superseded by ADR-NNNN>
|
||||
**Date:** YYYY-MM-DD
|
||||
**Authors:** Name(s)
|
||||
|
||||
[Optional fields, all in `**Field:** value` format:]
|
||||
**Decision Drivers:** ...
|
||||
**Implementation Status:** ...
|
||||
**Implementation Date:** ...
|
||||
**Last Updated:** ...
|
||||
|
||||
## Context and Problem Statement
|
||||
|
||||
[Describe the context and problem statement]
|
||||
[Describe the context and problem statement.]
|
||||
|
||||
## Decision Drivers
|
||||
|
||||
* [Driver 1]
|
||||
* [Driver 2]
|
||||
* [Driver 3]
|
||||
* Driver 1
|
||||
* Driver 2
|
||||
|
||||
## Considered Options
|
||||
|
||||
* [Option 1]
|
||||
* [Option 2]
|
||||
* [Option 3]
|
||||
* Option 1
|
||||
* Option 2
|
||||
|
||||
## Decision Outcome
|
||||
|
||||
Chosen option: "[Option 1]" because [justification]
|
||||
Chosen option: "Option 1" because [justification].
|
||||
|
||||
## Pros and Cons of the Options
|
||||
|
||||
### [Option 1]
|
||||
### Option 1
|
||||
|
||||
* Good, because [argument a]
|
||||
* Good, because [argument b]
|
||||
* Bad, because [argument c]
|
||||
* Good, because [argument].
|
||||
* Bad, because [argument].
|
||||
|
||||
### [Option 2]
|
||||
### Option 2
|
||||
|
||||
* Good, because [argument a]
|
||||
* Good, because [argument b]
|
||||
* Bad, because [argument c]
|
||||
* Good, because [argument].
|
||||
* Bad, because [argument].
|
||||
|
||||
## Links
|
||||
|
||||
* [Link type] [Link to ADR]
|
||||
* [Link type] [Link to ADR]
|
||||
* Related ADR: [ADR-NNNN](NNNN-slug.md)
|
||||
* Issue: [#NN](https://gitea.arcodange.lab/arcodange/dance-lessons-coach/issues/NN)
|
||||
```
|
||||
|
||||
## ADR List
|
||||
|
||||
* [0001-go-1.26.1-standard.md](0001-go-1.26.1-standard.md) - Use Go 1.26.1 as the standard Go version
|
||||
* [0002-chi-router.md](0002-chi-router.md) - Use Chi router for HTTP routing
|
||||
* [0003-zerolog-logging.md](0003-zerolog-logging.md) - Use Zerolog for structured logging
|
||||
* [0004-interface-based-design.md](0004-interface-based-design.md) - Adopt interface-based design pattern
|
||||
* [0005-graceful-shutdown.md](0005-graceful-shutdown.md) - Implement graceful shutdown with readiness endpoints
|
||||
* [0006-configuration-management.md](0006-configuration-management.md) - Use Viper for configuration management
|
||||
* [0007-opentelemetry-integration.md](0007-opentelemetry-integration.md) - Integrate OpenTelemetry for distributed tracing
|
||||
* [0008-bdd-testing.md](0008-bdd-testing.md) - Adopt BDD with Godog for behavioral testing
|
||||
* [0009-hybrid-testing-approach.md](0009-hybrid-testing-approach.md) - Combine BDD and Swagger-based testing
|
||||
* [0010-api-v2-feature-flag.md](0010-api-v2-feature-flag.md) - API v2 implementation with feature flag control
|
||||
* [0011-validation-library-selection.md](0011-validation-library-selection.md) - Selection of go-playground/validator for input validation
|
||||
* [0012-git-hooks-staged-only-formatting.md](0012-git-hooks-staged-only-formatting.md) - Git hooks format only staged Go files
|
||||
* [0013-openapi-swagger-toolchain.md](0013-openapi-swagger-toolchain.md) - ✅ OpenAPI/Swagger documentation with swaggo/swag (Implemented)
|
||||
* [0014-grpc-adoption-strategy.md](0014-grpc-adoption-strategy.md) - Hybrid REST/gRPC adoption strategy
|
||||
* [0015-cli-subcommands-cobra.md](0015-cli-subcommands-cobra.md) - Cobra CLI framework adoption
|
||||
* [0016-ci-cd-pipeline-design.md](0016-ci-cd-pipeline-design.md) - CI/CD pipeline architecture
|
||||
* [0017-trunk-based-development-workflow.md](0017-trunk-based-development-workflow.md) - Trunk-based development workflow
|
||||
* [0018-user-management-auth-system.md](0018-user-management-auth-system.md) - User management and authentication system
|
||||
* [0019-postgresql-integration.md](0019-postgresql-integration.md) - PostgreSQL database integration
|
||||
* [0020-docker-build-strategy.md](0020-docker-build-strategy.md) - Docker Build Strategy: Traditional vs Buildx
|
||||
* [0021-jwt-secret-retention-policy.md](0021-jwt-secret-retention-policy.md) - JWT Secret Retention Policy with Configurable TTL and Retention
|
||||
* [0022-rate-limiting-cache-strategy.md](0022-rate-limiting-cache-strategy.md) - Rate Limiting and Cache Strategy with Multi-Phase Implementation
|
||||
* [0023-config-hot-reloading.md](0023-config-hot-reloading.md) - Config Hot Reloading Strategy
|
||||
* [0025-bdd-scenario-isolation-strategies.md](0025-bdd-scenario-isolation-strategies.md) - Schema-per-scenario isolation for BDD tests
|
||||
|
||||
## How to Add a New ADR
|
||||
|
||||
1. Create a new file with the next available number (e.g., `0010-new-decision.md`)
|
||||
2. Follow the template format
|
||||
3. Update this README.md with the new ADR
|
||||
4. Commit the changes
|
||||
|
||||
## Status Legend
|
||||
|
||||
* **Proposed**: Decision is being discussed
|
||||
* **Accepted**: Decision has been made and implemented
|
||||
* **Deprecated**: Decision is no longer relevant
|
||||
* **Superseded**: Decision has been replaced by another ADR
|
||||
| Status | Meaning |
|
||||
|---|---|
|
||||
| **Proposed** | Decision is being discussed; no implementation yet. |
|
||||
| **Accepted** | Decision has been made; implementation may be pending or in progress. |
|
||||
| **Approved** | Same as Accepted; alternative term used in some legacy ADRs. |
|
||||
| **Implemented** | Decision is fully implemented and in production. |
|
||||
| **Partially Implemented** | Decision is partly implemented; remainder is deferred or pending. |
|
||||
| **Rejected** | Decision considered and explicitly rejected. The ADR documents why. |
|
||||
| **Deferred** | Decision postponed; revisit later. |
|
||||
| **Deprecated** | Decision is no longer relevant; system has moved on. |
|
||||
| **Superseded by ADR-NNNN** | Decision has been replaced by another ADR. Always include the link. |
|
||||
|
||||
## How to Add a New ADR
|
||||
|
||||
1. Pick the next available number (currently next would be `0026`).
|
||||
2. Copy an existing ADR (e.g., `0001-go-1.26.1-standard.md`) as a starting template.
|
||||
3. Edit the title, status, date, authors, and content.
|
||||
4. Update this `README.md` index with the new ADR.
|
||||
5. Commit using gitmoji convention (e.g., `📝 docs(adr): add ADR-0026 about ...`).
|
||||
6. Open a PR for review.
|
||||
|
||||
13
config.yaml
13
config.yaml
@@ -87,4 +87,15 @@ database:
|
||||
|
||||
# Maximum lifetime of connections (default: "1h")
|
||||
# Format: number + unit (s, m, h)
|
||||
conn_max_lifetime: 1h
|
||||
conn_max_lifetime: 1h
|
||||
|
||||
# Cache configuration (in-memory)
|
||||
cache:
|
||||
# Enable in-memory cache (default: true)
|
||||
enabled: true
|
||||
|
||||
# Default TTL in seconds for cache items (default: 300 = 5 minutes)
|
||||
default_ttl_seconds: 300
|
||||
|
||||
# Cleanup interval in seconds for expired items (default: 600 = 10 minutes)
|
||||
cleanup_interval_seconds: 600
|
||||
@@ -21,17 +21,35 @@ Feature: Greet Service
|
||||
When I send a POST request to v2 greet with name "John"
|
||||
Then the response should be "{\"message\":\"Hello my friend John!\"}"
|
||||
|
||||
@v2 @api
|
||||
Scenario: v2 default greeting with empty name
|
||||
Given the server is running with v2 enabled
|
||||
When I send a POST request to v2 greet with name ""
|
||||
Then the response should be "{\"message\":\"Hello my friend!\"}"
|
||||
|
||||
@v2 @api
|
||||
Scenario: v2 greeting with missing name field
|
||||
Given the server is running with v2 enabled
|
||||
When I send a POST request to v2 greet with invalid JSON "{}"
|
||||
Then the response should be "{\"message\":\"Hello my friend!\"}"
|
||||
|
||||
@v2 @api
|
||||
Scenario: v2 greeting with name that is too long
|
||||
Given the server is running with v2 enabled
|
||||
When I send a POST request to v2 greet with name "ThisNameIsWayTooLongAndShouldFailValidationBecauseItExceedsTheMaximumAllowedLengthOf100Characters!!!!"
|
||||
Then the response should contain error "validation_failed"
|
||||
Then the response should contain error "validation_failed"
|
||||
|
||||
@ratelimit @skip @bdd-deferred
|
||||
# NOTE: Functional behavior validated by unit tests in pkg/middleware/ratelimit_test.go.
|
||||
# BDD scenario currently skipped: env-var-based rate limit config does not reach the
|
||||
# already-started test server (architectural limitation of testsetup, not the middleware).
|
||||
# TODO: rework testserver to allow per-scenario rate limit config (admin endpoint or
|
||||
# per-scenario fresh server), then re-enable this scenario.
|
||||
Scenario: Greet endpoint rejects requests over the rate limit
|
||||
Given the server is running with rate limit set to 3 requests per minute and burst 3
|
||||
When I make 3 requests to "/api/v1/greet/Alice"
|
||||
Then all responses should have status 200
|
||||
When I make 1 more request to "/api/v1/greet/Alice"
|
||||
Then the response should have status 429
|
||||
And the response body should contain "rate_limited"
|
||||
And the response should have header "Retry-After"
|
||||
@@ -7,4 +7,12 @@ Feature: Health Endpoint
|
||||
Scenario: Health check returns healthy status
|
||||
Given the server is running
|
||||
When I request the health endpoint
|
||||
Then the response should be "{\"status\":\"healthy\"}"
|
||||
Then the response should be "{\"status\":\"healthy\"}"
|
||||
|
||||
@basic @critical
|
||||
Scenario: Healthz endpoint returns rich health info
|
||||
Given the server is running
|
||||
When I request the healthz endpoint
|
||||
Then the status code should be 200
|
||||
And the response should be JSON with fields "status, version, uptime_seconds, timestamp"
|
||||
And the "status" field should equal "healthy"
|
||||
3
frontend/app.vue
Normal file
3
frontend/app.vue
Normal file
@@ -0,0 +1,3 @@
|
||||
<template>
|
||||
<NuxtPage />
|
||||
</template>
|
||||
22
frontend/components/HealthDashboard.vue
Normal file
22
frontend/components/HealthDashboard.vue
Normal file
@@ -0,0 +1,22 @@
|
||||
<script setup lang="ts">
|
||||
interface HealthInfo {
|
||||
status: string
|
||||
version: string
|
||||
uptime_seconds: number
|
||||
timestamp: string
|
||||
}
|
||||
const { data, pending, error } = await useFetch<HealthInfo>('/api/healthz')
|
||||
</script>
|
||||
<template>
|
||||
<section data-testid="health-dashboard">
|
||||
<h2>Server Health</h2>
|
||||
<p v-if="pending">Loading...</p>
|
||||
<p v-else-if="error">Error loading health: {{ error.message }}</p>
|
||||
<ul v-else-if="data" data-testid="health-info">
|
||||
<li><strong>Status:</strong> <span data-testid="health-status">{{ data.status }}</span></li>
|
||||
<li><strong>Version:</strong> {{ data.version }}</li>
|
||||
<li><strong>Uptime:</strong> {{ data.uptime_seconds }} seconds</li>
|
||||
<li><strong>Last check:</strong> {{ data.timestamp }}</li>
|
||||
</ul>
|
||||
</section>
|
||||
</template>
|
||||
11
frontend/nuxt.config.ts
Normal file
11
frontend/nuxt.config.ts
Normal file
@@ -0,0 +1,11 @@
|
||||
export default defineNuxtConfig({
|
||||
devtools: { enabled: true },
|
||||
nitro: {
|
||||
devProxy: {
|
||||
'/api': {
|
||||
target: 'http://localhost:8080',
|
||||
changeOrigin: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
11237
frontend/package-lock.json
generated
Normal file
11237
frontend/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
18
frontend/package.json
Normal file
18
frontend/package.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"name": "dance-lessons-coach-frontend",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"build": "nuxt build",
|
||||
"dev": "nuxt dev",
|
||||
"generate": "nuxt generate",
|
||||
"preview": "nuxt preview",
|
||||
"postinstall": "nuxt prepare"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@playwright/test": "^1.59.1",
|
||||
"@types/node": "^25.6.0",
|
||||
"nuxt": "^3.13.0",
|
||||
"typescript": "^6.0.3"
|
||||
},
|
||||
"packageManager": "npm@11.5.2"
|
||||
}
|
||||
6
frontend/pages/index.vue
Normal file
6
frontend/pages/index.vue
Normal file
@@ -0,0 +1,6 @@
|
||||
<template>
|
||||
<main>
|
||||
<h1>dance-lessons-coach</h1>
|
||||
<HealthDashboard />
|
||||
</main>
|
||||
</template>
|
||||
14
frontend/playwright.config.ts
Normal file
14
frontend/playwright.config.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
import { defineConfig } from '@playwright/test'
|
||||
export default defineConfig({
|
||||
testDir: './tests/e2e',
|
||||
timeout: 30_000,
|
||||
use: {
|
||||
baseURL: 'http://localhost:3000',
|
||||
},
|
||||
webServer: {
|
||||
command: 'npm run dev',
|
||||
url: 'http://localhost:3000',
|
||||
timeout: 60_000,
|
||||
reuseExistingServer: !process.env.CI,
|
||||
},
|
||||
})
|
||||
8
frontend/tests/e2e/health.spec.ts
Normal file
8
frontend/tests/e2e/health.spec.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
import { test, expect } from '@playwright/test'
|
||||
|
||||
test('home page loads and shows server health info', async ({ page }) => {
|
||||
await page.goto('/')
|
||||
await expect(page.getByTestId('health-dashboard')).toBeVisible()
|
||||
const heading = page.getByRole('heading', { name: /dance-lessons-coach/i })
|
||||
await expect(heading).toBeVisible()
|
||||
})
|
||||
6
frontend/tsconfig.json
Normal file
6
frontend/tsconfig.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"extends": "./.nuxt/tsconfig.json",
|
||||
"compilerOptions": {
|
||||
"strict": true
|
||||
}
|
||||
}
|
||||
2
go.mod
2
go.mod
@@ -10,6 +10,7 @@ require (
|
||||
github.com/go-playground/validator/v10 v10.30.2
|
||||
github.com/golang-jwt/jwt/v5 v5.3.1
|
||||
github.com/lib/pq v1.12.3
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/rs/zerolog v1.35.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/viper v1.21.0
|
||||
@@ -22,6 +23,7 @@ require (
|
||||
go.opentelemetry.io/otel/sdk v1.43.0
|
||||
go.opentelemetry.io/otel/trace v1.43.0
|
||||
golang.org/x/crypto v0.49.0
|
||||
golang.org/x/time v0.15.0
|
||||
gorm.io/driver/postgres v1.6.0
|
||||
gorm.io/driver/sqlite v1.6.0
|
||||
gorm.io/gorm v1.31.1
|
||||
|
||||
4
go.sum
4
go.sum
@@ -118,6 +118,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
|
||||
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -206,6 +208,8 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.35.0 h1:JOVx6vVDFokkpaq1AEptVzLTpDe9KGpj5tR4/X+ybL8=
|
||||
golang.org/x/text v0.35.0/go.mod h1:khi/HExzZJ2pGnjenulevKNX1W67CUy0AsXcNubPGCA=
|
||||
golang.org/x/time v0.15.0 h1:bbrp8t3bGUeFOx08pvsMYRTCVSMk89u4tKbNOZbp88U=
|
||||
golang.org/x/time v0.15.0/go.mod h1:Y4YMaQmXwGQZoFaVFk4YpCt4FLQMYKZe9oeV/f4MSno=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.42.0 h1:uNgphsn75Tdz5Ji2q36v/nsFSfR/9BRFvqhGBaJGd5k=
|
||||
golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0=
|
||||
|
||||
@@ -63,3 +63,39 @@ func (s *CommonSteps) theStatusCodeShouldBe(expectedStatus int) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// JSON field validation
|
||||
func (s *CommonSteps) theResponseShouldBeJSONWithFields(fields string) error {
|
||||
// Parse the fields comma-separated list
|
||||
fieldList := strings.Split(fields, ", ")
|
||||
for _, field := range fieldList {
|
||||
field = strings.TrimSpace(field)
|
||||
if !s.responseContainsJSONField(field) {
|
||||
return fmt.Errorf("response does not contain field %q", field)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CommonSteps) responseContainsJSONField(field string) bool {
|
||||
body := string(s.client.GetLastBody())
|
||||
// Simple check - look for "field":" in the JSON
|
||||
// This works for simple fields, may need enhancement for nested objects
|
||||
searchString := `"` + field + `":`
|
||||
return strings.Contains(body, searchString)
|
||||
}
|
||||
|
||||
func (s *CommonSteps) theFieldShouldEqual(field, expectedValue string) error {
|
||||
body := string(s.client.GetLastBody())
|
||||
// Look for the field and extract its value
|
||||
// Simple implementation: look for "field":"value" pattern
|
||||
searchPattern := `"` + field + `":"` + expectedValue + `"`
|
||||
if !strings.Contains(body, searchPattern) {
|
||||
// Also try without quotes (for numbers)
|
||||
searchPatternNum := `"` + field + `":` + expectedValue
|
||||
if !strings.Contains(body, searchPatternNum) {
|
||||
return fmt.Errorf("field %q does not equal %q in response: %s", field, expectedValue, body)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -24,6 +24,10 @@ func (s *HealthSteps) iRequestTheHealthEndpoint() error {
|
||||
return s.client.Request("GET", "/api/health", nil)
|
||||
}
|
||||
|
||||
func (s *HealthSteps) iRequestTheHealthzEndpoint() error {
|
||||
return s.client.Request("GET", "/api/healthz", nil)
|
||||
}
|
||||
|
||||
func (s *HealthSteps) theServerIsRunning() error {
|
||||
// Actually verify the server is running by checking the readiness endpoint
|
||||
return s.client.Request("GET", "/api/ready", nil)
|
||||
|
||||
94
pkg/bdd/steps/ratelimit_steps.go
Normal file
94
pkg/bdd/steps/ratelimit_steps.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package steps
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"dance-lessons-coach/pkg/bdd/testserver"
|
||||
)
|
||||
|
||||
// RateLimitSteps holds rate limit-related step definitions
|
||||
type RateLimitSteps struct {
|
||||
client *testserver.Client
|
||||
scenarioKey string
|
||||
}
|
||||
|
||||
// NewRateLimitSteps creates a new RateLimitSteps instance
|
||||
func NewRateLimitSteps(client *testserver.Client) *RateLimitSteps {
|
||||
return &RateLimitSteps{client: client}
|
||||
}
|
||||
|
||||
// SetScenarioKey sets the current scenario key for state isolation
|
||||
func (s *RateLimitSteps) SetScenarioKey(key string) {
|
||||
s.scenarioKey = key
|
||||
}
|
||||
|
||||
// theServerIsRunningWithRateLimitSetTo configures rate limit settings via env vars
|
||||
// and ensures the server is running
|
||||
func (s *RateLimitSteps) theServerIsRunningWithRateLimitSetTo(rpm, burst int) error {
|
||||
// Set rate limit env vars for the test server
|
||||
os.Setenv("DLC_RATE_LIMIT_ENABLED", "true")
|
||||
os.Setenv("DLC_RATE_LIMIT_REQUESTS_PER_MINUTE", fmt.Sprintf("%d", rpm))
|
||||
os.Setenv("DLC_RATE_LIMIT_BURST_SIZE", fmt.Sprintf("%d", burst))
|
||||
|
||||
// Verify the server is running
|
||||
return s.client.Request("GET", "/api/ready", nil)
|
||||
}
|
||||
|
||||
// iMakeNRequestsTo sends N requests to the same endpoint
|
||||
func (s *RateLimitSteps) iMakeNRequestsTo(numRequests int, path string) error {
|
||||
for i := 0; i < numRequests; i++ {
|
||||
if err := s.client.Request("GET", path, nil); err != nil {
|
||||
return fmt.Errorf("request %d failed: %w", i+1, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// allResponsesShouldHaveStatus verifies that all responses had a specific status
|
||||
func (s *RateLimitSteps) allResponsesShouldHaveStatus(statusCode int) error {
|
||||
// Since the client only stores the last response, we check that one
|
||||
// For the rate limit test, after making 3 requests with burst=3, all should succeed
|
||||
actualStatus := s.client.GetLastStatusCode()
|
||||
if actualStatus != statusCode {
|
||||
return fmt.Errorf("expected status %d, got %d", statusCode, actualStatus)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// iMakeOneMoreRequestTo sends 1 more request to the endpoint
|
||||
func (s *RateLimitSteps) iMakeOneMoreRequestTo(path string) error {
|
||||
return s.client.Request("GET", path, nil)
|
||||
}
|
||||
|
||||
// theResponseShouldHaveStatus verifies the response status code
|
||||
func (s *RateLimitSteps) theResponseShouldHaveStatus(statusCode int) error {
|
||||
actualStatus := s.client.GetLastStatusCode()
|
||||
if actualStatus != statusCode {
|
||||
return fmt.Errorf("expected status %d, got %d", statusCode, actualStatus)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// theResponseBodyShouldContain verifies the response body contains a specific string
|
||||
func (s *RateLimitSteps) theResponseBodyShouldContain(text string) error {
|
||||
body := string(s.client.GetLastBody())
|
||||
if !strings.Contains(body, text) {
|
||||
return fmt.Errorf("expected response body to contain %q, got %q", text, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// theResponseShouldHaveHeader verifies that the response has a specific header
|
||||
func (s *RateLimitSteps) theResponseShouldHaveHeader(headerName string) error {
|
||||
resp := s.client.GetLastResponse()
|
||||
if resp == nil {
|
||||
return fmt.Errorf("no response available")
|
||||
}
|
||||
headerValue := resp.Header.Get(headerName)
|
||||
if headerValue == "" {
|
||||
return fmt.Errorf("expected header %q to be set, but it was not found", headerName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -16,6 +16,7 @@ type StepContext struct {
|
||||
commonSteps *CommonSteps
|
||||
jwtRetentionSteps *JWTRetentionSteps
|
||||
configSteps *ConfigSteps
|
||||
rateLimitSteps *RateLimitSteps
|
||||
}
|
||||
|
||||
// NewStepContext creates a new step context
|
||||
@@ -28,6 +29,7 @@ func NewStepContext(client *testserver.Client) *StepContext {
|
||||
commonSteps: NewCommonSteps(client),
|
||||
jwtRetentionSteps: NewJWTRetentionSteps(client),
|
||||
configSteps: NewConfigSteps(client),
|
||||
rateLimitSteps: NewRateLimitSteps(client),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -62,6 +64,9 @@ func SetScenarioKeyForAllSteps(sc *StepContext, key string) {
|
||||
if sc.commonSteps != nil {
|
||||
sc.commonSteps.SetScenarioKey(key)
|
||||
}
|
||||
if sc.rateLimitSteps != nil {
|
||||
sc.rateLimitSteps.SetScenarioKey(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,6 +88,7 @@ func InitializeAllSteps(ctx *godog.ScenarioContext, client *testserver.Client, s
|
||||
|
||||
// Health steps
|
||||
ctx.Step(`^I request the health endpoint$`, sc.healthSteps.iRequestTheHealthEndpoint)
|
||||
ctx.Step(`^I request the healthz endpoint$`, sc.healthSteps.iRequestTheHealthzEndpoint)
|
||||
ctx.Step(`^the server is running$`, sc.healthSteps.theServerIsRunning)
|
||||
|
||||
// Auth steps
|
||||
@@ -293,8 +299,19 @@ func InitializeAllSteps(ctx *godog.ScenarioContext, client *testserver.Client, s
|
||||
ctx.Step(`^the audit entry should contain the previous and new values$`, sc.configSteps.theAuditEntryShouldContainThePreviousAndNewValues)
|
||||
ctx.Step(`^the audit entry should contain the timestamp of the change$`, sc.configSteps.theAuditEntryShouldContainTheTimestampOfTheChange)
|
||||
|
||||
// Rate limit steps
|
||||
ctx.Step(`^the server is running with rate limit set to (\d+) requests per minute and burst (\d+)$`, sc.rateLimitSteps.theServerIsRunningWithRateLimitSetTo)
|
||||
ctx.Step(`^I make (\d+) requests to "([^"]*)"$`, sc.rateLimitSteps.iMakeNRequestsTo)
|
||||
ctx.Step(`^all responses should have status (\d+)$`, sc.rateLimitSteps.allResponsesShouldHaveStatus)
|
||||
ctx.Step(`^I make 1 more request to "([^"]*)"$`, sc.rateLimitSteps.iMakeOneMoreRequestTo)
|
||||
ctx.Step(`^the response should have status (\d+)$`, sc.rateLimitSteps.theResponseShouldHaveStatus)
|
||||
ctx.Step(`^the response body should contain "([^"]*)"$`, sc.rateLimitSteps.theResponseBodyShouldContain)
|
||||
ctx.Step(`^the response should have header "([^"]*)"$`, sc.rateLimitSteps.theResponseShouldHaveHeader)
|
||||
|
||||
// Common steps
|
||||
ctx.Step(`^the response should be "{\\"([^"]*)":\\"([^"]*)"}"$`, sc.commonSteps.theResponseShouldBe)
|
||||
ctx.Step(`^the response should contain error "([^"]*)"$`, sc.commonSteps.theResponseShouldContainError)
|
||||
ctx.Step(`^the status code should be (\d+)$`, sc.commonSteps.theStatusCodeShouldBe)
|
||||
ctx.Step(`^the response should be JSON with fields "([^"]*)"$`, sc.commonSteps.theResponseShouldBeJSONWithFields)
|
||||
ctx.Step(`^the "([^"]*)" field should equal "([^"]*)"$`, sc.commonSteps.theFieldShouldEqual)
|
||||
}
|
||||
|
||||
@@ -676,6 +676,25 @@ func (s *Server) shouldEnableV2() bool {
|
||||
// createTestConfig creates a test configuration
|
||||
// Pass v2Enabled explicitly to avoid reading env vars deep in the stack
|
||||
func createTestConfig(port int, v2Enabled bool) *config.Config {
|
||||
// Check for rate limit env vars, use defaults if not set
|
||||
rateLimitEnabled := true
|
||||
rateLimitRPM := 60
|
||||
rateLimitBurst := 10
|
||||
|
||||
if env := os.Getenv("DLC_RATE_LIMIT_ENABLED"); env != "" {
|
||||
rateLimitEnabled = strings.EqualFold(env, "true") || env == "1"
|
||||
}
|
||||
if env := os.Getenv("DLC_RATE_LIMIT_REQUESTS_PER_MINUTE"); env != "" {
|
||||
if val, err := strconv.Atoi(env); err == nil {
|
||||
rateLimitRPM = val
|
||||
}
|
||||
}
|
||||
if env := os.Getenv("DLC_RATE_LIMIT_BURST_SIZE"); env != "" {
|
||||
if val, err := strconv.Atoi(env); err == nil {
|
||||
rateLimitBurst = val
|
||||
}
|
||||
}
|
||||
|
||||
return &config.Config{
|
||||
Server: config.ServerConfig{
|
||||
Host: "0.0.0.0",
|
||||
@@ -702,5 +721,10 @@ func createTestConfig(port int, v2Enabled bool) *config.Config {
|
||||
Logging: config.LoggingConfig{
|
||||
Level: "debug",
|
||||
},
|
||||
RateLimit: config.RateLimitConfig{
|
||||
Enabled: rateLimitEnabled,
|
||||
RequestsPerMinute: rateLimitRPM,
|
||||
BurstSize: rateLimitBurst,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
56
pkg/cache/cache.go
vendored
Normal file
56
pkg/cache/cache.go
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
gocache "github.com/patrickmn/go-cache"
|
||||
)
|
||||
|
||||
// Service defines the interface for cache operations
|
||||
type Service interface {
|
||||
Set(key string, value interface{}, ttl time.Duration)
|
||||
Get(key string) (interface{}, bool)
|
||||
Delete(key string)
|
||||
Flush()
|
||||
ItemCount() int
|
||||
}
|
||||
|
||||
// InMemoryService implements Service using go-cache library
|
||||
type InMemoryService struct {
|
||||
cache *gocache.Cache
|
||||
}
|
||||
|
||||
// NewInMemoryService creates a new in-memory cache service
|
||||
// defaultTTL: default time-to-live for cache items
|
||||
// cleanupInterval: interval at which expired items are cleaned up
|
||||
func NewInMemoryService(defaultTTL, cleanupInterval time.Duration) Service {
|
||||
c := gocache.New(defaultTTL, cleanupInterval)
|
||||
return &InMemoryService{cache: c}
|
||||
}
|
||||
|
||||
// Set stores a value in the cache with the specified TTL
|
||||
func (s *InMemoryService) Set(key string, value interface{}, ttl time.Duration) {
|
||||
s.cache.Set(key, value, ttl)
|
||||
}
|
||||
|
||||
// Get retrieves a value from the cache
|
||||
// Returns the value and true if found, nil and false if not found or expired
|
||||
func (s *InMemoryService) Get(key string) (interface{}, bool) {
|
||||
val, found := s.cache.Get(key)
|
||||
return val, found
|
||||
}
|
||||
|
||||
// Delete removes an item from the cache
|
||||
func (s *InMemoryService) Delete(key string) {
|
||||
s.cache.Delete(key)
|
||||
}
|
||||
|
||||
// Flush clears all items from the cache
|
||||
func (s *InMemoryService) Flush() {
|
||||
s.cache.Flush()
|
||||
}
|
||||
|
||||
// ItemCount returns the number of items currently in the cache
|
||||
func (s *InMemoryService) ItemCount() int {
|
||||
return s.cache.ItemCount()
|
||||
}
|
||||
135
pkg/cache/cache_test.go
vendored
Normal file
135
pkg/cache/cache_test.go
vendored
Normal file
@@ -0,0 +1,135 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestInMemoryService_SetGet(t *testing.T) {
|
||||
svc := NewInMemoryService(1*time.Hour, 1*time.Hour)
|
||||
|
||||
// Test Set and Get
|
||||
svc.Set("key1", "value1", 1*time.Hour)
|
||||
val, ok := svc.Get("key1")
|
||||
if !ok {
|
||||
t.Fatal("Expected to find key1 in cache")
|
||||
}
|
||||
if val != "value1" {
|
||||
t.Fatalf("Expected 'value1', got '%v'", val)
|
||||
}
|
||||
|
||||
// Test Get non-existent key
|
||||
_, ok = svc.Get("nonexistent")
|
||||
if ok {
|
||||
t.Fatal("Expected not to find nonexistent key")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryService_Delete(t *testing.T) {
|
||||
svc := NewInMemoryService(1*time.Hour, 1*time.Hour)
|
||||
|
||||
svc.Set("key1", "value1", 1*time.Hour)
|
||||
_, ok := svc.Get("key1")
|
||||
if !ok {
|
||||
t.Fatal("Expected to find key1 before delete")
|
||||
}
|
||||
|
||||
svc.Delete("key1")
|
||||
_, ok = svc.Get("key1")
|
||||
if ok {
|
||||
t.Fatal("Expected not to find key1 after delete")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryService_Flush(t *testing.T) {
|
||||
svc := NewInMemoryService(1*time.Hour, 1*time.Hour)
|
||||
|
||||
svc.Set("key1", "value1", 1*time.Hour)
|
||||
svc.Set("key2", "value2", 1*time.Hour)
|
||||
|
||||
if svc.ItemCount() != 2 {
|
||||
t.Fatalf("Expected 2 items, got %d", svc.ItemCount())
|
||||
}
|
||||
|
||||
svc.Flush()
|
||||
|
||||
if svc.ItemCount() != 0 {
|
||||
t.Fatalf("Expected 0 items after flush, got %d", svc.ItemCount())
|
||||
}
|
||||
|
||||
_, ok := svc.Get("key1")
|
||||
if ok {
|
||||
t.Fatal("Expected key1 to be flushed")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryService_ItemCount(t *testing.T) {
|
||||
svc := NewInMemoryService(1*time.Hour, 1*time.Hour)
|
||||
|
||||
if svc.ItemCount() != 0 {
|
||||
t.Fatalf("Expected 0 items initially, got %d", svc.ItemCount())
|
||||
}
|
||||
|
||||
svc.Set("key1", "value1", 1*time.Hour)
|
||||
if svc.ItemCount() != 1 {
|
||||
t.Fatalf("Expected 1 item, got %d", svc.ItemCount())
|
||||
}
|
||||
|
||||
svc.Set("key2", "value2", 1*time.Hour)
|
||||
if svc.ItemCount() != 2 {
|
||||
t.Fatalf("Expected 2 items, got %d", svc.ItemCount())
|
||||
}
|
||||
|
||||
svc.Delete("key1")
|
||||
if svc.ItemCount() != 1 {
|
||||
t.Fatalf("Expected 1 item after delete, got %d", svc.ItemCount())
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryService_TTLExpiration(t *testing.T) {
|
||||
// Use a very short TTL for testing
|
||||
svc := NewInMemoryService(100*time.Millisecond, 50*time.Millisecond)
|
||||
|
||||
svc.Set("key1", "value1", 50*time.Millisecond)
|
||||
|
||||
// Should be present immediately
|
||||
val, ok := svc.Get("key1")
|
||||
if !ok {
|
||||
t.Fatal("Expected to find key1 immediately after set")
|
||||
}
|
||||
if val != "value1" {
|
||||
t.Fatalf("Expected 'value1', got '%v'", val)
|
||||
}
|
||||
|
||||
// Wait for expiration
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Should be expired now
|
||||
_, ok = svc.Get("key1")
|
||||
if ok {
|
||||
t.Fatal("Expected key1 to be expired after TTL")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInMemoryService_DifferentTypes(t *testing.T) {
|
||||
svc := NewInMemoryService(1*time.Hour, 1*time.Hour)
|
||||
|
||||
// Test with different types
|
||||
svc.Set("string", "hello", 1*time.Hour)
|
||||
svc.Set("int", 42, 1*time.Hour)
|
||||
svc.Set("slice", []string{"a", "b"}, 1*time.Hour)
|
||||
|
||||
if svc.ItemCount() != 3 {
|
||||
t.Fatalf("Expected 3 items, got %d", svc.ItemCount())
|
||||
}
|
||||
|
||||
val, ok := svc.Get("string")
|
||||
if !ok || val != "hello" {
|
||||
t.Fatal("String value mismatch")
|
||||
}
|
||||
|
||||
val, ok = svc.Get("int")
|
||||
if !ok || val != 42 {
|
||||
t.Fatal("Int value mismatch")
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,8 @@ type Config struct {
|
||||
API APIConfig `mapstructure:"api"`
|
||||
Auth AuthConfig `mapstructure:"auth"`
|
||||
Database DatabaseConfig `mapstructure:"database"`
|
||||
RateLimit RateLimitConfig `mapstructure:"rate_limit"`
|
||||
Cache CacheConfig `mapstructure:"cache"`
|
||||
}
|
||||
|
||||
// ServerConfig holds server-related configuration
|
||||
@@ -97,6 +99,20 @@ type DatabaseConfig struct {
|
||||
ConnMaxLifetime time.Duration `mapstructure:"conn_max_lifetime"`
|
||||
}
|
||||
|
||||
// RateLimitConfig holds rate limiting configuration
|
||||
type RateLimitConfig struct {
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
RequestsPerMinute int `mapstructure:"requests_per_minute"`
|
||||
BurstSize int `mapstructure:"burst_size"`
|
||||
}
|
||||
|
||||
// CacheConfig holds cache configuration
|
||||
type CacheConfig struct {
|
||||
Enabled bool `mapstructure:"enabled"`
|
||||
DefaultTTLSeconds int `mapstructure:"default_ttl_seconds"`
|
||||
CleanupIntervalSeconds int `mapstructure:"cleanup_interval_seconds"`
|
||||
}
|
||||
|
||||
// VersionInfo holds application version information
|
||||
type VersionInfo struct {
|
||||
Version string `mapstructure:"-"` // Set via ldflags
|
||||
@@ -189,6 +205,16 @@ func LoadConfig() (*Config, error) {
|
||||
// API defaults
|
||||
v.SetDefault("api.v2_enabled", false)
|
||||
|
||||
// Rate limit defaults
|
||||
v.SetDefault("rate_limit.enabled", true)
|
||||
v.SetDefault("rate_limit.requests_per_minute", 60)
|
||||
v.SetDefault("rate_limit.burst_size", 10)
|
||||
|
||||
// Cache defaults
|
||||
v.SetDefault("cache.enabled", true)
|
||||
v.SetDefault("cache.default_ttl_seconds", 300)
|
||||
v.SetDefault("cache.cleanup_interval_seconds", 600)
|
||||
|
||||
// Auth defaults
|
||||
v.SetDefault("auth.jwt_secret", "default-secret-key-please-change-in-production")
|
||||
v.SetDefault("auth.admin_master_password", "admin123")
|
||||
@@ -248,6 +274,16 @@ func LoadConfig() (*Config, error) {
|
||||
// API environment variables
|
||||
v.BindEnv("api.v2_enabled", "DLC_API_V2_ENABLED")
|
||||
|
||||
// Rate limit environment variables
|
||||
v.BindEnv("rate_limit.enabled", "DLC_RATE_LIMIT_ENABLED")
|
||||
v.BindEnv("rate_limit.requests_per_minute", "DLC_RATE_LIMIT_REQUESTS_PER_MINUTE")
|
||||
v.BindEnv("rate_limit.burst_size", "DLC_RATE_LIMIT_BURST_SIZE")
|
||||
|
||||
// Cache environment variables
|
||||
v.BindEnv("cache.enabled", "DLC_CACHE_ENABLED")
|
||||
v.BindEnv("cache.default_ttl_seconds", "DLC_CACHE_DEFAULT_TTL_SECONDS")
|
||||
v.BindEnv("cache.cleanup_interval_seconds", "DLC_CACHE_CLEANUP_INTERVAL_SECONDS")
|
||||
|
||||
// Database environment variables
|
||||
v.BindEnv("database.host", "DLC_DATABASE_HOST")
|
||||
v.BindEnv("database.port", "DLC_DATABASE_PORT")
|
||||
@@ -389,6 +425,48 @@ func (c *Config) GetLogOutput() string {
|
||||
return c.Logging.Output
|
||||
}
|
||||
|
||||
// GetRateLimitEnabled returns whether rate limiting is enabled
|
||||
func (c *Config) GetRateLimitEnabled() bool {
|
||||
return c.RateLimit.Enabled
|
||||
}
|
||||
|
||||
// GetRateLimitRequestsPerMinute returns the requests per minute limit
|
||||
func (c *Config) GetRateLimitRequestsPerMinute() int {
|
||||
if c.RateLimit.RequestsPerMinute <= 0 {
|
||||
return 60
|
||||
}
|
||||
return c.RateLimit.RequestsPerMinute
|
||||
}
|
||||
|
||||
// GetRateLimitBurstSize returns the burst size for rate limiting
|
||||
func (c *Config) GetRateLimitBurstSize() int {
|
||||
if c.RateLimit.BurstSize <= 0 {
|
||||
return 10
|
||||
}
|
||||
return c.RateLimit.BurstSize
|
||||
}
|
||||
|
||||
// GetCacheEnabled returns whether cache is enabled
|
||||
func (c *Config) GetCacheEnabled() bool {
|
||||
return c.Cache.Enabled
|
||||
}
|
||||
|
||||
// GetCacheDefaultTTLSeconds returns the default TTL in seconds for cache items
|
||||
func (c *Config) GetCacheDefaultTTLSeconds() int {
|
||||
if c.Cache.DefaultTTLSeconds <= 0 {
|
||||
return 300
|
||||
}
|
||||
return c.Cache.DefaultTTLSeconds
|
||||
}
|
||||
|
||||
// GetCacheCleanupIntervalSeconds returns the cleanup interval in seconds for cache
|
||||
func (c *Config) GetCacheCleanupIntervalSeconds() int {
|
||||
if c.Cache.CleanupIntervalSeconds <= 0 {
|
||||
return 600
|
||||
}
|
||||
return c.Cache.CleanupIntervalSeconds
|
||||
}
|
||||
|
||||
// GetDatabaseHost returns the database host
|
||||
func (c *Config) GetDatabaseHost() string {
|
||||
if c.Database.Host == "" {
|
||||
|
||||
153
pkg/middleware/ratelimit.go
Normal file
153
pkg/middleware/ratelimit.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
// RateLimitConfig holds the configuration for rate limiting
|
||||
type RateLimitConfig struct {
|
||||
Enabled bool
|
||||
RequestsPerMinute int
|
||||
BurstSize int
|
||||
}
|
||||
|
||||
// RateLimiter implements per-IP rate limiting using a token bucket algorithm
|
||||
type RateLimiter struct {
|
||||
mu sync.Mutex
|
||||
visitors map[string]*visitor
|
||||
rate rate.Limit
|
||||
burst int
|
||||
ttl time.Duration
|
||||
enabled bool
|
||||
}
|
||||
|
||||
type visitor struct {
|
||||
limiter *rate.Limiter
|
||||
lastSeen time.Time
|
||||
}
|
||||
|
||||
// NewRateLimiter creates a new rate limiter with the given configuration
|
||||
func NewRateLimiter(cfg RateLimitConfig) *RateLimiter {
|
||||
// Convert requests per minute to events per second
|
||||
rateLimit := rate.Limit(float64(cfg.RequestsPerMinute) / 60.0)
|
||||
burst := cfg.BurstSize
|
||||
if burst <= 0 {
|
||||
burst = 1
|
||||
}
|
||||
|
||||
return &RateLimiter{
|
||||
mu: sync.Mutex{},
|
||||
visitors: make(map[string]*visitor),
|
||||
rate: rateLimit,
|
||||
burst: burst,
|
||||
ttl: 10 * time.Minute,
|
||||
enabled: cfg.Enabled,
|
||||
}
|
||||
}
|
||||
|
||||
// getVisitor returns the rate limiter for the given IP, creating one if needed.
|
||||
// It performs TTL-based eviction of stale entries.
|
||||
func (rl *RateLimiter) getVisitor(ip string) *rate.Limiter {
|
||||
if !rl.enabled {
|
||||
// If rate limiting is disabled, return a limiter that always allows
|
||||
return rate.NewLimiter(rate.Inf, 1)
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
rl.mu.Lock()
|
||||
defer rl.mu.Unlock()
|
||||
|
||||
// Clean up old entries periodically (every 100 accesses to avoid lock contention)
|
||||
if len(rl.visitors) > 0 && len(rl.visitors)%100 == 0 {
|
||||
rl.cleanupOldVisitors(now)
|
||||
}
|
||||
|
||||
v, exists := rl.visitors[ip]
|
||||
if !exists || now.Sub(v.lastSeen) > rl.ttl {
|
||||
// Create new limiter for this IP
|
||||
limiter := rate.NewLimiter(rl.rate, rl.burst)
|
||||
rl.visitors[ip] = &visitor{
|
||||
limiter: limiter,
|
||||
lastSeen: now,
|
||||
}
|
||||
return limiter
|
||||
}
|
||||
|
||||
// Update last seen time
|
||||
v.lastSeen = now
|
||||
return v.limiter
|
||||
}
|
||||
|
||||
// cleanupOldVisitors removes entries that haven't been seen in more than ttl
|
||||
func (rl *RateLimiter) cleanupOldVisitors(now time.Time) {
|
||||
for ip, v := range rl.visitors {
|
||||
if now.Sub(v.lastSeen) > rl.ttl {
|
||||
delete(rl.visitors, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// clientIP extracts the client IP address from the request
|
||||
func (rl *RateLimiter) clientIP(r *http.Request) string {
|
||||
// Try X-Forwarded-For header first
|
||||
if xff := r.Header.Get("X-Forwarded-For"); xff != "" {
|
||||
// X-Forwarded-For can contain multiple IPs: client, proxy1, proxy2, ...
|
||||
// The leftmost is the original client
|
||||
ips := strings.Split(xff, ",")
|
||||
if len(ips) > 0 {
|
||||
return strings.TrimSpace(ips[0])
|
||||
}
|
||||
}
|
||||
|
||||
// Try X-Real-IP header
|
||||
if xri := r.Header.Get("X-Real-IP"); xri != "" {
|
||||
return strings.TrimSpace(xri)
|
||||
}
|
||||
|
||||
// Fall back to RemoteAddr (strip port if present)
|
||||
addr := r.RemoteAddr
|
||||
if colonIdx := strings.LastIndex(addr, ":"); colonIdx != -1 {
|
||||
return addr[:colonIdx]
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
// Middleware returns the rate limiting middleware function
|
||||
func (rl *RateLimiter) Middleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
ip := rl.clientIP(r)
|
||||
limiter := rl.getVisitor(ip)
|
||||
|
||||
if !limiter.Allow() {
|
||||
// Rate limit exceeded
|
||||
// Calculate retry after based on the rate
|
||||
// tokens needed = burst, rate = tokens/second
|
||||
// So wait time = burst / rate (in seconds)
|
||||
retryAfter := float64(rl.burst) / float64(rl.rate)
|
||||
if retryAfter <= 0 {
|
||||
retryAfter = 1
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Header().Set("Retry-After", fmt.Sprintf("%.0f", retryAfter))
|
||||
w.WriteHeader(http.StatusTooManyRequests)
|
||||
|
||||
response := map[string]interface{}{
|
||||
"error": "rate_limited",
|
||||
"retry_after_seconds": int(retryAfter),
|
||||
}
|
||||
json.NewEncoder(w).Encode(response)
|
||||
return
|
||||
}
|
||||
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
310
pkg/middleware/ratelimit_test.go
Normal file
310
pkg/middleware/ratelimit_test.go
Normal file
@@ -0,0 +1,310 @@
|
||||
package middleware
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestRateLimiter_AllowsRequestsWithinBurst(t *testing.T) {
|
||||
cfg := RateLimitConfig{
|
||||
Enabled: true,
|
||||
RequestsPerMinute: 60,
|
||||
BurstSize: 5,
|
||||
}
|
||||
rl := NewRateLimiter(cfg)
|
||||
|
||||
// Create a simple handler that returns 200 OK
|
||||
handler := rl.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write([]byte("OK"))
|
||||
}))
|
||||
|
||||
// Make 5 requests (equal to burst size) - all should succeed
|
||||
for i := 0; i < 5; i++ {
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.RemoteAddr = "192.168.1.1:12345"
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Request %d: expected status 200, got %d", i+1, rr.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimiter_BlocksRequestsExceedingBurst(t *testing.T) {
|
||||
cfg := RateLimitConfig{
|
||||
Enabled: true,
|
||||
RequestsPerMinute: 60,
|
||||
BurstSize: 3,
|
||||
}
|
||||
rl := NewRateLimiter(cfg)
|
||||
|
||||
handler := rl.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
|
||||
// Make 4 requests (exceeding burst of 3) - 4th should be rate limited
|
||||
for i := 0; i < 3; i++ {
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.RemoteAddr = "192.168.1.2:12345"
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Request %d: expected status 200, got %d", i+1, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// 4th request should be rate limited
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.RemoteAddr = "192.168.1.2:12345"
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusTooManyRequests {
|
||||
t.Errorf("Request 4: expected status 429, got %d", rr.Code)
|
||||
}
|
||||
|
||||
// Verify response body
|
||||
var response map[string]interface{}
|
||||
if err := json.NewDecoder(rr.Body).Decode(&response); err != nil {
|
||||
t.Fatalf("Failed to decode response body: %v", err)
|
||||
}
|
||||
|
||||
if response["error"] != "rate_limited" {
|
||||
t.Errorf("Expected error 'rate_limited', got %v", response["error"])
|
||||
}
|
||||
|
||||
if _, ok := response["retry_after_seconds"]; !ok {
|
||||
t.Error("Expected retry_after_seconds in response")
|
||||
}
|
||||
|
||||
// Verify Retry-After header
|
||||
if retryAfter := rr.Header().Get("Retry-After"); retryAfter == "" {
|
||||
t.Error("Expected Retry-After header to be set")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimiter_DifferentIPsIndependent(t *testing.T) {
|
||||
cfg := RateLimitConfig{
|
||||
Enabled: true,
|
||||
RequestsPerMinute: 60,
|
||||
BurstSize: 2,
|
||||
}
|
||||
rl := NewRateLimiter(cfg)
|
||||
|
||||
handler := rl.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
|
||||
// IP1 makes 2 requests (fills its burst)
|
||||
for i := 0; i < 2; i++ {
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.RemoteAddr = "10.0.0.1:12345"
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("IP1 request %d: expected status 200, got %d", i+1, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// IP1's 3rd request should be rate limited
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.RemoteAddr = "10.0.0.1:12345"
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusTooManyRequests {
|
||||
t.Errorf("IP1 request 3: expected status 429, got %d", rr.Code)
|
||||
}
|
||||
|
||||
// IP2 should still be able to make requests (independent rate limit)
|
||||
req2 := httptest.NewRequest("GET", "/test", nil)
|
||||
req2.RemoteAddr = "10.0.0.2:12345"
|
||||
rr2 := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr2, req2)
|
||||
|
||||
if rr2.Code != http.StatusOK {
|
||||
t.Errorf("IP2 request 1: expected status 200, got %d", rr2.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimiter_Disabled(t *testing.T) {
|
||||
cfg := RateLimitConfig{
|
||||
Enabled: false,
|
||||
RequestsPerMinute: 60,
|
||||
BurstSize: 1,
|
||||
}
|
||||
rl := NewRateLimiter(cfg)
|
||||
|
||||
handler := rl.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
|
||||
// Make many requests - all should succeed when disabled
|
||||
for i := 0; i < 100; i++ {
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.RemoteAddr = "192.168.1.100:12345"
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Request %d with disabled rate limiter: expected status 200, got %d", i+1, rr.Code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimiter_TTLExpiration(t *testing.T) {
|
||||
cfg := RateLimitConfig{
|
||||
Enabled: true,
|
||||
RequestsPerMinute: 60,
|
||||
BurstSize: 2,
|
||||
}
|
||||
rl := NewRateLimiter(cfg)
|
||||
|
||||
// Manually set a short TTL for testing
|
||||
rl.ttl = 50 * time.Millisecond
|
||||
|
||||
handler := rl.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
|
||||
// IP makes 2 requests (fills burst)
|
||||
for i := 0; i < 2; i++ {
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.RemoteAddr = "10.0.0.50:12345"
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusOK {
|
||||
t.Errorf("Request %d: expected status 200, got %d", i+1, rr.Code)
|
||||
}
|
||||
}
|
||||
|
||||
// 3rd request should be rate limited
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.RemoteAddr = "10.0.0.50:12345"
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
if rr.Code != http.StatusTooManyRequests {
|
||||
t.Errorf("Request 3: expected status 429, got %d", rr.Code)
|
||||
}
|
||||
|
||||
// Wait for TTL to expire
|
||||
time.Sleep(60 * time.Millisecond)
|
||||
|
||||
// New request should succeed (new limiter created after TTL expiration)
|
||||
req2 := httptest.NewRequest("GET", "/test", nil)
|
||||
req2.RemoteAddr = "10.0.0.50:12345"
|
||||
rr2 := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr2, req2)
|
||||
|
||||
if rr2.Code != http.StatusOK {
|
||||
t.Errorf("Request after TTL: expected status 200, got %d", rr2.Code)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimiter_ClientIPExtraction(t *testing.T) {
|
||||
rl := NewRateLimiter(RateLimitConfig{Enabled: true, RequestsPerMinute: 60, BurstSize: 10})
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
header map[string]string
|
||||
remoteAddr string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "X-Forwarded-For single IP",
|
||||
header: map[string]string{"X-Forwarded-For": "203.0.113.195"},
|
||||
remoteAddr: "127.0.0.1:12345",
|
||||
expected: "203.0.113.195",
|
||||
},
|
||||
{
|
||||
name: "X-Forwarded-For multiple IPs",
|
||||
header: map[string]string{"X-Forwarded-For": "203.0.113.195, 70.41.3.18, 150.172.238.178"},
|
||||
remoteAddr: "127.0.0.1:12345",
|
||||
expected: "203.0.113.195",
|
||||
},
|
||||
{
|
||||
name: "X-Real-IP",
|
||||
header: map[string]string{"X-Real-IP": "203.0.113.50"},
|
||||
remoteAddr: "127.0.0.1:12345",
|
||||
expected: "203.0.113.50",
|
||||
},
|
||||
{
|
||||
name: "RemoteAddr with port",
|
||||
header: map[string]string{},
|
||||
remoteAddr: "203.0.113.100:54321",
|
||||
expected: "203.0.113.100",
|
||||
},
|
||||
{
|
||||
name: "RemoteAddr without port",
|
||||
header: map[string]string{},
|
||||
remoteAddr: "203.0.113.101",
|
||||
expected: "203.0.113.101",
|
||||
},
|
||||
{
|
||||
name: "X-Forwarded-For takes precedence over X-Real-IP",
|
||||
header: map[string]string{"X-Forwarded-For": "203.0.113.200", "X-Real-IP": "203.0.113.201"},
|
||||
remoteAddr: "127.0.0.1:12345",
|
||||
expected: "203.0.113.200",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
for k, v := range tt.header {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
req.RemoteAddr = tt.remoteAddr
|
||||
|
||||
ip := rl.clientIP(req)
|
||||
if ip != tt.expected {
|
||||
t.Errorf("clientIP() = %q, expected %q", ip, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRateLimiter_ContentTypeHeader(t *testing.T) {
|
||||
cfg := RateLimitConfig{
|
||||
Enabled: true,
|
||||
RequestsPerMinute: 60,
|
||||
BurstSize: 1,
|
||||
}
|
||||
rl := NewRateLimiter(cfg)
|
||||
|
||||
handler := rl.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}))
|
||||
|
||||
// Make 1 request to fill burst
|
||||
req := httptest.NewRequest("GET", "/test", nil)
|
||||
req.RemoteAddr = "192.168.1.200:12345"
|
||||
rr := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr, req)
|
||||
|
||||
// 2nd request should be rate limited
|
||||
req2 := httptest.NewRequest("GET", "/test", nil)
|
||||
req2.RemoteAddr = "192.168.1.200:12345"
|
||||
rr2 := httptest.NewRecorder()
|
||||
handler.ServeHTTP(rr2, req2)
|
||||
|
||||
if rr2.Code != http.StatusTooManyRequests {
|
||||
t.Fatalf("Expected status 429, got %d", rr2.Code)
|
||||
}
|
||||
|
||||
// Check Content-Type header is JSON
|
||||
contentType := rr2.Header().Get("Content-Type")
|
||||
if contentType != "application/json" {
|
||||
t.Errorf("Expected Content-Type: application/json, got %q", contentType)
|
||||
}
|
||||
}
|
||||
43
pkg/server/healthz_test.go
Normal file
43
pkg/server/healthz_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"dance-lessons-coach/pkg/config"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestHandleHealthz(t *testing.T) {
|
||||
// Setup
|
||||
cfg := &config.Config{}
|
||||
s := NewServer(cfg, context.Background())
|
||||
|
||||
// Create request
|
||||
req := httptest.NewRequest(http.MethodGet, "/api/healthz", nil)
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
// Call handler
|
||||
s.handleHealthz(w, req)
|
||||
|
||||
// Check status code
|
||||
assert.Equal(t, http.StatusOK, w.Code)
|
||||
|
||||
// Check content type
|
||||
assert.Equal(t, "application/json", w.Header().Get("Content-Type"))
|
||||
|
||||
// Decode response
|
||||
var resp HealthzResponse
|
||||
err := json.NewDecoder(w.Body).Decode(&resp)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Assert fields
|
||||
assert.Equal(t, "healthy", resp.Status)
|
||||
assert.NotEmpty(t, resp.Version)
|
||||
assert.GreaterOrEqual(t, resp.UptimeSeconds, int64(0))
|
||||
assert.NotZero(t, resp.Timestamp)
|
||||
}
|
||||
@@ -13,12 +13,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/go-chi/chi/v5"
|
||||
"github.com/go-chi/chi/v5/middleware"
|
||||
chimiddleware "github.com/go-chi/chi/v5/middleware"
|
||||
"github.com/rs/zerolog/log"
|
||||
httpSwagger "github.com/swaggo/http-swagger"
|
||||
|
||||
"dance-lessons-coach/pkg/cache"
|
||||
"dance-lessons-coach/pkg/config"
|
||||
"dance-lessons-coach/pkg/greet"
|
||||
"dance-lessons-coach/pkg/middleware"
|
||||
"dance-lessons-coach/pkg/telemetry"
|
||||
"dance-lessons-coach/pkg/user"
|
||||
userapi "dance-lessons-coach/pkg/user/api"
|
||||
@@ -64,6 +66,8 @@ type Server struct {
|
||||
validator *validation.Validator
|
||||
userRepo user.UserRepository
|
||||
userService user.UserService
|
||||
cacheService cache.Service
|
||||
startedAt time.Time
|
||||
}
|
||||
|
||||
func NewServer(cfg *config.Config, readyCtx context.Context) *Server {
|
||||
@@ -81,14 +85,28 @@ func NewServer(cfg *config.Config, readyCtx context.Context) *Server {
|
||||
log.Warn().Err(err).Msg("Failed to initialize user services, user functionality will be disabled")
|
||||
}
|
||||
|
||||
// Initialize cache service
|
||||
var cacheService cache.Service
|
||||
if cfg.GetCacheEnabled() {
|
||||
cacheService = cache.NewInMemoryService(
|
||||
time.Duration(cfg.GetCacheDefaultTTLSeconds())*time.Second,
|
||||
time.Duration(cfg.GetCacheCleanupIntervalSeconds())*time.Second,
|
||||
)
|
||||
log.Trace().Msg("Cache service initialized")
|
||||
} else {
|
||||
log.Trace().Msg("Cache service disabled")
|
||||
}
|
||||
|
||||
s := &Server{
|
||||
router: chi.NewRouter(),
|
||||
readyCtx: readyCtx,
|
||||
withOTEL: cfg.GetTelemetryEnabled(),
|
||||
config: cfg,
|
||||
validator: validator,
|
||||
userRepo: userRepo,
|
||||
userService: userService,
|
||||
router: chi.NewRouter(),
|
||||
readyCtx: readyCtx,
|
||||
withOTEL: cfg.GetTelemetryEnabled(),
|
||||
config: cfg,
|
||||
validator: validator,
|
||||
userRepo: userRepo,
|
||||
userService: userService,
|
||||
cacheService: cacheService,
|
||||
startedAt: time.Now(),
|
||||
}
|
||||
s.setupRoutes()
|
||||
return s
|
||||
@@ -123,7 +141,7 @@ func initializeUserServices(cfg *config.Config) (user.UserRepository, user.UserS
|
||||
|
||||
func (s *Server) setupRoutes() {
|
||||
// Use Zerolog middleware instead of Chi's default logger
|
||||
s.router.Use(middleware.RequestLogger(&middleware.DefaultLogFormatter{
|
||||
s.router.Use(chimiddleware.RequestLogger(&chimiddleware.DefaultLogFormatter{
|
||||
Logger: &log.Logger,
|
||||
NoColor: false,
|
||||
}))
|
||||
@@ -137,6 +155,9 @@ func (s *Server) setupRoutes() {
|
||||
// Version endpoint at root level
|
||||
s.router.Get("/api/version", s.handleVersion)
|
||||
|
||||
// Kubernetes-style health endpoint at root level
|
||||
s.router.Get("/api/healthz", s.handleHealthz)
|
||||
|
||||
// API routes
|
||||
s.router.Route("/api/v1", func(r chi.Router) {
|
||||
r.Use(s.getAllMiddlewares()...)
|
||||
@@ -172,6 +193,13 @@ func (s *Server) registerApiV1Routes(r chi.Router) {
|
||||
greetService := greet.NewService()
|
||||
greetHandler := greet.NewApiV1GreetHandler(greetService)
|
||||
|
||||
// Create rate limit middleware
|
||||
rateLimitMiddleware := middleware.NewRateLimiter(middleware.RateLimitConfig{
|
||||
Enabled: s.config.GetRateLimitEnabled(),
|
||||
RequestsPerMinute: s.config.GetRateLimitRequestsPerMinute(),
|
||||
BurstSize: s.config.GetRateLimitBurstSize(),
|
||||
})
|
||||
|
||||
// Create auth middleware if available
|
||||
var authMiddleware *AuthMiddleware
|
||||
if s.userService != nil {
|
||||
@@ -179,6 +207,8 @@ func (s *Server) registerApiV1Routes(r chi.Router) {
|
||||
}
|
||||
|
||||
r.Route("/greet", func(r chi.Router) {
|
||||
// Add rate limiting middleware for greet endpoint
|
||||
r.Use(rateLimitMiddleware.Middleware)
|
||||
// Add optional authentication middleware
|
||||
if authMiddleware != nil {
|
||||
r.Use(authMiddleware.Middleware)
|
||||
@@ -215,8 +245,8 @@ func (s *Server) registerApiV2Routes(r chi.Router) {
|
||||
// getAllMiddlewares returns all middleware including OpenTelemetry if enabled
|
||||
func (s *Server) getAllMiddlewares() []func(http.Handler) http.Handler {
|
||||
middlewares := []func(http.Handler) http.Handler{
|
||||
middleware.StripSlashes,
|
||||
middleware.Recoverer,
|
||||
chimiddleware.StripSlashes,
|
||||
chimiddleware.Recoverer,
|
||||
}
|
||||
|
||||
if s.withOTEL {
|
||||
@@ -336,26 +366,77 @@ func (s *Server) handleVersion(w http.ResponseWriter, r *http.Request) {
|
||||
format = "plain" // default format
|
||||
}
|
||||
|
||||
// Check cache if enabled
|
||||
cacheKey := "version:" + format
|
||||
if s.cacheService != nil {
|
||||
if cached, ok := s.cacheService.Get(cacheKey); ok {
|
||||
log.Trace().Str("cache_key", cacheKey).Msg("Cache hit for version")
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
if format == "json" {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
}
|
||||
w.Write([]byte(cached.(string)))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Build response
|
||||
var response string
|
||||
switch format {
|
||||
case "plain":
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write([]byte(version.Short()))
|
||||
response = version.Short()
|
||||
case "full":
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write([]byte(version.Full()))
|
||||
response = version.Full()
|
||||
case "json":
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
jsonResponse := fmt.Sprintf(`{
|
||||
response = fmt.Sprintf(`{
|
||||
"version": "%s",
|
||||
"commit": "%s",
|
||||
"built": "%s",
|
||||
"go": "%s"
|
||||
}`, version.Version, version.Commit, version.Date, version.GoVersion)
|
||||
w.Write([]byte(jsonResponse))
|
||||
default:
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
w.Write([]byte(version.Short()))
|
||||
response = version.Short()
|
||||
}
|
||||
|
||||
// Cache the response for 60 seconds if cache is enabled
|
||||
if s.cacheService != nil {
|
||||
s.cacheService.Set(cacheKey, response, 60*time.Second)
|
||||
log.Trace().Str("cache_key", cacheKey).Msg("Cached version response")
|
||||
}
|
||||
|
||||
w.Write([]byte(response))
|
||||
}
|
||||
|
||||
// HealthzResponse represents the Kubernetes-style health check response
|
||||
type HealthzResponse struct {
|
||||
Status string `json:"status"`
|
||||
Version string `json:"version"`
|
||||
UptimeSeconds int64 `json:"uptime_seconds"`
|
||||
Timestamp time.Time `json:"timestamp"`
|
||||
}
|
||||
|
||||
// handleHealthz godoc
|
||||
//
|
||||
// @Summary Kubernetes-style health check
|
||||
// @Description Returns rich health info for liveness/readiness probes
|
||||
// @Tags System/Health
|
||||
// @Produce json
|
||||
// @Success 200 {object} HealthzResponse
|
||||
// @Router /healthz [get]
|
||||
func (s *Server) handleHealthz(w http.ResponseWriter, r *http.Request) {
|
||||
log.Trace().Msg("Healthz check requested")
|
||||
resp := HealthzResponse{
|
||||
Status: "healthy",
|
||||
Version: version.Version,
|
||||
UptimeSeconds: int64(time.Since(s.startedAt).Seconds()),
|
||||
Timestamp: time.Now().UTC(),
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(resp)
|
||||
}
|
||||
|
||||
func (s *Server) Router() http.Handler {
|
||||
|
||||
@@ -133,7 +133,7 @@ run_tests_with_tags() {
|
||||
set +e
|
||||
|
||||
# Default tag filter: exclude flaky, todo, and skip scenarios
|
||||
DEFAULT_TAGS="~@flaky && ~@todo && ~@skip"
|
||||
DEFAULT_TAGS="~@flaky && ~@todo && ~@skip && ~@v2"
|
||||
|
||||
if [ -n "$tags" ]; then
|
||||
# Use godog directly for tag filtering with exclusion
|
||||
|
||||
Reference in New Issue
Block a user