diff --git a/scripts/run-tests-with-random-ports.sh b/scripts/run-tests-with-random-ports.sh deleted file mode 100755 index cac8ac7..0000000 --- a/scripts/run-tests-with-random-ports.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -# Script to run BDD tests with random ports to avoid port conflicts -# Usage: ./scripts/run-tests-with-random-ports.sh [feature] - -echo "๐Ÿš€ Running BDD tests with random ports..." -echo " This prevents port conflicts in parallel test execution" - -# Set environment variable for random port selection -export RANDOM_TEST_PORT="true" - -# Run the specified feature tests, or all tests if no feature specified -if [ $# -eq 0 ]; then - echo "๐Ÿ“‹ Running all BDD tests..." - go test ./features/... -v -else - echo "๐Ÿ“‹ Running tests for feature: $1" - go test ./features/$1/... -v -fi - -# Check the exit status -if [ $? -eq 0 ]; then - echo "โœ… All tests passed!" -else - echo "โŒ Some tests failed" - exit 1 -fi \ No newline at end of file diff --git a/scripts/validate-test-suite.sh b/scripts/validate-test-suite.sh index 8472898..d0dc7f0 100755 --- a/scripts/validate-test-suite.sh +++ b/scripts/validate-test-suite.sh @@ -1,16 +1,14 @@ #!/bin/bash # Test Suite Validation Script -# Runs tests N times and collects failure metrics -# Usage: ./scripts/validate-test-suite.sh [N] [test_path] +# Runs tests N times with separate unit and BDD test phases +# Usage: ./scripts/validate-test-suite.sh [N] # N - Number of times to run tests (default: 20) -# test_path - Test path (default: ./...) set -e # Default values RUN_COUNT=${1:-20} -TEST_PATH=${2:-./...} SCRIPTS_DIR=$(dirname "$(realpath "${BASH_SOURCE[0]}")") # Colors for output @@ -21,26 +19,29 @@ BLUE='\033[0;34m' NC='\033[0m' # No Color # Temporary files -FAILURE_LOG=$(mktemp) -UNIQUE_FAILURES=$(mktemp) +UNIT_FAILURE_LOG=$(mktemp) +BDD_FAILURE_LOG=$(mktemp) SUMMARY_REPORT=$(mktemp) # Cleanup temporary files on exit cleanup() { - rm -f "$FAILURE_LOG" "$UNIQUE_FAILURES" "$SUMMARY_REPORT" + rm -f "$UNIT_FAILURE_LOG" "$BDD_FAILURE_LOG" "$SUMMARY_REPORT" } trap cleanup EXIT echo "๐Ÿงช Test Suite Validation Script" echo "==============================" echo "Runs: $RUN_COUNT" -echo "Tests: $TEST_PATH" +echo "Unit Tests: ./cmd/... ./pkg/..." +echo "BDD Tests: ./features/..." echo "Date: $(date)" echo # Initialize counters -SUCCESS_COUNT=0 -FAILURE_COUNT=0 +UNIT_SUCCESS=0 +UNIT_FAILURE=0 +BDD_SUCCESS=0 +BDD_FAILURE=0 START_TIME=$(date +%s) echo "Starting validation runs..." @@ -50,28 +51,48 @@ echo for (( run=1; run<=$RUN_COUNT; run++ )); do echo "Run $run/$RUN_COUNT..." - # Clean test cache for each run + # ===== UNIT TESTS ===== + echo " ๐Ÿงช Unit tests..." go clean -testcache > /dev/null 2>&1 - # Run tests and capture output set +e # Temporarily disable exit on error - TEST_OUTPUT=$(go test $TEST_PATH -v 2>&1) - TEST_EXIT_CODE=$? + UNIT_OUTPUT=$(go test ./cmd/... ./pkg/... -v 2>&1) + UNIT_EXIT_CODE=$? set -e # Re-enable exit on error - if [ $TEST_EXIT_CODE -eq 0 ]; then - echo " โœ… Passed" - ((SUCCESS_COUNT++)) + if [ $UNIT_EXIT_CODE -eq 0 ]; then + echo " โœ… Passed" + ((UNIT_SUCCESS++)) else - echo " โŒ Failed" - ((FAILURE_COUNT++)) + echo " โŒ Failed" + ((UNIT_FAILURE++)) - # Extract failing test names and errors - echo "$TEST_OUTPUT" | grep -E "^(FAIL|--- FAIL)" | sed 's/^\*\*\* //' >> "$FAILURE_LOG" + # Extract detailed unit test failures + echo "$UNIT_OUTPUT" | grep -E "^(FAIL|--- FAIL)" | sed 's/^\*\*\* //' >> "$UNIT_FAILURE_LOG" + echo "$UNIT_OUTPUT" | grep -A 10 "FAIL.*\.go" >> "$UNIT_FAILURE_LOG" + echo "---" >> "$UNIT_FAILURE_LOG" + fi + + # ===== BDD TESTS ===== + echo " ๐Ÿงช BDD tests..." + go clean -testcache > /dev/null 2>&1 + + set +e # Temporarily disable exit on error + BDD_OUTPUT=$(go test ./features/... -v 2>&1) + BDD_EXIT_CODE=$? + set -e # Re-enable exit on error + + if [ $BDD_EXIT_CODE -eq 0 ]; then + echo " โœ… Passed" + ((BDD_SUCCESS++)) + else + echo " โŒ Failed" + ((BDD_FAILURE++)) - # Extract specific test failures with errors - echo "$TEST_OUTPUT" | grep -A 5 "FAIL.*\.go" | head -6 >> "$FAILURE_LOG" - echo "---" >> "$FAILURE_LOG" + # Extract detailed BDD test failures with actual test names + echo "$BDD_OUTPUT" | grep -E "^(FAIL|--- FAIL)" | sed 's/^\*\*\* //' >> "$BDD_FAILURE_LOG" + echo "$BDD_OUTPUT" | grep -A 10 "FAIL.*Test" >> "$BDD_FAILURE_LOG" + echo "---" >> "$BDD_FAILURE_LOG" fi done @@ -82,54 +103,83 @@ DURATION=$((END_TIME - START_TIME)) echo "Validation Complete" echo "==================" echo "Total Runs: $RUN_COUNT" -echo "Success: ${GREEN}$SUCCESS_COUNT${NC}" -echo "Failures: ${RED}$FAILURE_COUNT${NC}" +echo "Unit Tests:" +echo -e " Success: ${GREEN}$UNIT_SUCCESS${NC}" +echo -e " Failures: ${RED}$UNIT_FAILURE${NC}" +echo -e "BDD Tests:" +echo -e " Success: ${GREEN}$BDD_SUCCESS${NC}" +echo -e " Failures: ${RED}$BDD_FAILURE${NC}" echo "Duration: $DURATION seconds" echo -# Check if there were any failures -if [ $FAILURE_COUNT -eq 0 ]; then - echo "${GREEN}โœ… All tests passed successfully!${NC}" +# Check overall success +TOTAL_FAILURES=$((UNIT_FAILURE + BDD_FAILURE)) + +if [ $TOTAL_FAILURES -eq 0 ]; then + echo -e "${GREEN}โœ… All tests passed successfully!${NC}" echo "Test suite is stable and ready for production" exit 0 else - echo "${RED}โŒ Some tests failed during validation${NC}" + echo -e "${RED}โŒ Some tests failed during validation${NC}" echo - # Process failure log to get unique failures with counts - if [ -s "$FAILURE_LOG" ]; then - echo "Failure Analysis" + # Process unit test failures + if [ -s "$UNIT_FAILURE_LOG" ]; then + echo "Unit Test Failures:" + echo "==================" + + # Count unit test failures + UNIT_FAILURES=$(grep "FAIL" "$UNIT_FAILURE_LOG" | sort | uniq -c | sort -rn) + if [ -n "$UNIT_FAILURES" ]; then + echo "$UNIT_FAILURES" + else + echo " None (check log for details)" + fi + + echo + fi + + # Process BDD test failures + if [ -s "$BDD_FAILURE_LOG" ]; then + echo "BDD Test Failures:" echo "================" - # Count occurrences of each failing test - echo "Failing Test Summary:" - grep "FAIL" "$FAILURE_LOG" | sort | uniq -c | sort -rn | while read count test; do - test_name=$(echo "$test" | sed 's/FAIL[[:space:]]*//') - echo " $count ร— $test_name" - done + # Count BDD test failures with granularity + BDD_FAILURES=$(grep "FAIL" "$BDD_FAILURE_LOG" | \ + grep -v "dance-lessons-coach/features" | \ + grep -v "^[0-9].*FAIL" | \ + grep "/" | \ + sort | uniq -c | sort -rn) + if [ -n "$BDD_FAILURES" ]; then + echo "Summary:" + while IFS= read -r line; do + count=$(echo "$line" | awk '{print $1}') + test=$(echo "$line" | sed 's/^[0-9]*[[:space:]]*//') + echo " $count ร— $test" + done <<< "$BDD_FAILURES" + else + echo " None (check log for details)" + fi echo - echo "Unique Failure Patterns:" - - # Extract unique failure patterns - grep -E "^(FAIL|---)" "$FAILURE_LOG" | sort | uniq | while read line; do - if [[ "$line" == FAIL* ]]; then - echo " โ€ข $line" - fi - done - - echo - echo "Detailed Failure Log:" - echo "======================" - cat "$FAILURE_LOG" - - echo - echo "Recommendations:" - echo " 1. Mark flaky tests with @flaky tag" - echo " 2. Investigate and fix failing tests" - echo " 3. Run with FIXED_TEST_PORT=true to debug port issues" - echo " 4. Check for race conditions in failing tests" + echo "Detailed BDD Failure Log (first 20 lines):" + echo "==========================================" + # Show only the relevant failure lines with actual test names + # Filter out non-specific failures and test suite lines + grep -E "(FAIL.*Test|--- FAIL)" "$BDD_FAILURE_LOG" | \ + grep -v "dance-lessons-coach/features" | \ + grep -v "^[0-9].*FAIL" | \ + grep "/" | \ + head -20 fi + echo + echo "Recommendations:" + echo " 1. Mark flaky BDD tests with @flaky tag" + echo " 2. Investigate unit test failures first (faster to fix)" + echo " 3. Check for race conditions in failing tests" + echo " 4. Run with FIXED_TEST_PORT=true for debugging" + echo " 5. Use ./scripts/run-bdd-tests.sh list-tags to see available tags" + exit 1 fi \ No newline at end of file