diff --git a/cmd/run.go b/cmd/run.go
index c8a7736..4f724ca 100644
--- a/cmd/run.go
+++ b/cmd/run.go
@@ -56,6 +56,10 @@ var (
// Validation mode
validateSuiteIfDefaultBranch bool
validateSuite bool
+
+ // Coverage mode
+ showCoverage bool
+ coverageOutputPath string
)
//go:embed short_docs/drift/drift_run.md
@@ -116,6 +120,10 @@ func bindRunFlags(cmd *cobra.Command) {
cmd.Flags().BoolVar(&validateSuiteIfDefaultBranch, "validate-suite-if-default-branch", false, "[Cloud] Validate traces on default branch before adding to suite")
cmd.Flags().BoolVar(&validateSuite, "validate-suite", false, "[Cloud] Force validation mode regardless of branch")
+ // Coverage mode
+ cmd.Flags().BoolVar(&showCoverage, "show-coverage", false, "Collect and display code coverage during test execution")
+ cmd.Flags().StringVar(&coverageOutputPath, "coverage-output", "", "Write coverage data to file (LCOV by default, JSON if path ends in .json)")
+
_ = cmd.Flags().MarkHidden("client-id")
cmd.Flags().SortFlags = false
}
@@ -239,11 +247,12 @@ func runTests(cmd *cobra.Command, args []string) error {
var req *backend.CreateDriftRunRequest
if isValidation {
+ commitSha = getCommitSHAFromEnv()
req = &backend.CreateDriftRunRequest{
ObservableServiceId: cfg.Service.ID,
CliVersion: version.Version,
IsValidationRun: true,
- CommitSha: stringPtr(getCommitSHAFromEnv()),
+ CommitSha: stringPtr(commitSha),
BranchName: stringPtr(getBranchFromEnv()),
}
} else {
@@ -314,6 +323,39 @@ func runTests(cmd *cobra.Command, args []string) error {
executor.SetEnableServiceLogs(enableServiceLogs || debug)
+ // Coverage activation:
+ // - Config-driven: coverage.enabled=true in config activates during validation runs (silent, for upload)
+ // - Flag-driven: --show-coverage or --coverage-output activates anytime (for local dev/debugging)
+ coverageFromConfig := getConfigErr == nil && cfg.Coverage.Enabled && isValidation
+ coverageFromFlags := showCoverage || coverageOutputPath != ""
+ coverageEnabled := coverageFromConfig || coverageFromFlags
+ if coverageEnabled {
+ executor.SetCoverageEnabled(true)
+ executor.SetShowCoverage(showCoverage)
+ if coverageOutputPath != "" {
+ executor.SetCoverageOutputPath(coverageOutputPath)
+ }
+ if getConfigErr == nil {
+ if len(cfg.Coverage.Include) > 0 {
+ executor.SetCoverageIncludePatterns(cfg.Coverage.Include)
+ }
+ if len(cfg.Coverage.Exclude) > 0 {
+ executor.SetCoverageExcludePatterns(cfg.Coverage.Exclude)
+ }
+ if cfg.Coverage.StripPathPrefix != "" {
+ executor.SetCoverageStripPrefix(cfg.Coverage.StripPathPrefix)
+ }
+ }
+ // Coverage requires serial execution (concurrency=1) because per-test
+ // snapshots rely on the SDK resetting counters between tests.
+ executor.SetConcurrency(1)
+ if showCoverage {
+ log.Stderrln("➤ Coverage collection enabled (concurrency forced to 1)")
+ } else {
+ log.Debug("Coverage collection enabled via config (concurrency forced to 1)")
+ }
+ }
+
// Initialize results saving (--save-results json|agent)
var agentWriter *runner.AgentWriter
var saveResultsDir string
@@ -454,6 +496,51 @@ func runTests(cmd *cobra.Command, args []string) error {
})
}
+ // Coverage: wrap the OnTestCompleted callback to take snapshots between tests.
+ // Snapshot runs BEFORE the existing callback (which uploads results) so that
+ // per-test coverage data is available when building the upload proto.
+ if coverageEnabled {
+ existingCallback := executor.OnTestCompleted
+ executor.SetOnTestCompleted(func(res runner.TestResult, test runner.Test) {
+ // Take coverage snapshot FIRST so data is available for upload.
+ // Always continue to existingCallback even on error so test results still upload.
+ lineCounts, err := executor.TakeCoverageSnapshot()
+ if err != nil {
+ log.Warn("Failed to take coverage snapshot", "testID", test.TraceID, "error", err)
+ }
+
+ if err == nil {
+ executor.AddCoverageRecord(runner.CoverageTestRecord{
+ TestID: test.TraceID,
+ TestName: test.DisplayName,
+ SuiteStatus: test.SuiteStatus,
+ Coverage: lineCounts,
+ })
+
+ // Store detail for TUI display
+ detail := runner.SnapshotToCoverageDetail(lineCounts)
+ executor.SetTestCoverageDetail(test.TraceID, detail)
+
+ // Print sub-line in --print mode when --show-coverage is active
+ if !interactive && showCoverage {
+ totalLines := 0
+ for _, fd := range detail {
+ totalLines += fd.CoveredCount
+ }
+ if totalLines > 0 {
+ log.UserProgress(fmt.Sprintf(" ↳ coverage: %d lines across %d files", totalLines, len(detail)))
+ }
+ }
+ }
+
+ // Now run the existing callback (which uploads results).
+ // Coverage data is available via GetTestCoverageDetail() for the upload.
+ if existingCallback != nil {
+ existingCallback(res, test)
+ }
+ })
+ }
+
var tests []runner.Test
var err error
@@ -781,7 +868,11 @@ func runTests(cmd *cobra.Command, args []string) error {
passed, failed := countPassedFailed(results)
statusMessage = fmt.Sprintf("Validation complete: %d passed, %d failed", passed, failed)
}
- if err := runner.ReportDriftRunSuccess(context.Background(), client, driftRunID, authOptions, results, statusMessage); err != nil {
+ var interactiveCoverageBaseline, interactiveCoverageOriginal runner.CoverageSnapshot
+ if coverageEnabled && isValidation {
+ interactiveCoverageBaseline, interactiveCoverageOriginal = executor.GetCoverageBaselineForUpload()
+ }
+ if err := runner.ReportDriftRunSuccess(context.Background(), client, driftRunID, authOptions, results, interactiveCoverageBaseline, interactiveCoverageOriginal, commitSha, statusMessage); err != nil {
log.Warn("Interactive: cloud finalize failed", "error", err)
}
mu.Lock()
@@ -896,6 +987,19 @@ func runTests(cmd *cobra.Command, args []string) error {
log.Stderrln(fmt.Sprintf("➤ Running %d tests (concurrency: %d)...\n", len(tests), executor.GetConcurrency()))
}
+ // Coverage: take baseline with ?baseline=true to capture ALL coverable lines
+ // (including uncovered at count=0) for the aggregate denominator.
+ // This also resets counters so the first test gets clean data.
+ if coverageEnabled {
+ baseline, err := executor.TakeCoverageBaseline()
+ if err != nil {
+ log.Warn("Failed to take baseline coverage snapshot", "error", err)
+ } else {
+ executor.SetCoverageBaseline(baseline)
+ log.Debug("Coverage baseline taken (counters reset, all coverable lines captured)")
+ }
+ }
+
results, err = executor.RunTests(tests)
if err != nil {
cmd.SilenceUsage = true
@@ -946,6 +1050,15 @@ func runTests(cmd *cobra.Command, args []string) error {
_ = os.Stdout.Sync()
time.Sleep(1 * time.Millisecond)
+ // Coverage: print summary and write output file
+ if coverageEnabled {
+ if records := executor.GetCoverageRecords(); len(records) > 0 {
+ if err := executor.ProcessCoverage(records); err != nil {
+ log.Warn("Failed to process coverage", "error", err)
+ }
+ }
+ }
+
var outputErr error
if !interactive {
// Results already streamed, just print summary
@@ -966,7 +1079,12 @@ func runTests(cmd *cobra.Command, args []string) error {
}
// streamed is always true here so this only updates the CI status
// Does NOT upload results to the backend as they are already uploaded via UploadSingleTestResult during the callback
- if err := runner.ReportDriftRunSuccess(context.Background(), client, driftRunID, authOptions, results, statusMessage); err != nil {
+ // Coverage baseline (if enabled) is piggybacked on this status update
+ var headlessCoverageBaseline, headlessCoverageOriginal runner.CoverageSnapshot
+ if coverageEnabled && isValidation {
+ headlessCoverageBaseline, headlessCoverageOriginal = executor.GetCoverageBaselineForUpload()
+ }
+ if err := runner.ReportDriftRunSuccess(context.Background(), client, driftRunID, authOptions, results, headlessCoverageBaseline, headlessCoverageOriginal, commitSha, statusMessage); err != nil {
log.Warn("Headless: cloud finalize failed", "error", err)
}
if isValidation {
diff --git a/docs/drift/configuration.md b/docs/drift/configuration.md
index 652458b..284be0b 100644
--- a/docs/drift/configuration.md
+++ b/docs/drift/configuration.md
@@ -392,6 +392,47 @@ This will not affect CLI behavior. See SDK for more details:
+## Coverage
+
+Configuration for code coverage collection. See [`docs/drift/coverage.md`](coverage.md) for full documentation.
+
+
+
+
+ | Key |
+ Type |
+ Default |
+ Description |
+
+
+
+
+ coverage.enabled |
+ bool |
+ false |
+ When true, automatically collect coverage during suite validation runs on the default branch. No CI changes needed. |
+
+
+ coverage.include |
+ string[] |
+ (all files) |
+ Only include files matching at least one pattern. Supports ** for recursive matching. Paths are git-relative. |
+
+
+ coverage.exclude |
+ string[] |
+ (none) |
+ Exclude files matching any pattern. Applied after include. Supports ** for recursive matching. |
+
+
+ coverage.strip_path_prefix |
+ string |
+ (none) |
+ Strip this prefix from coverage file paths. Required for Docker Compose — set to the container mount point (e.g., /app). |
+
+
+
+
## Config overrides
### Flags that override config
diff --git a/docs/drift/coverage.md b/docs/drift/coverage.md
new file mode 100644
index 0000000..e5180cb
--- /dev/null
+++ b/docs/drift/coverage.md
@@ -0,0 +1,228 @@
+# Code Coverage
+
+Tusk Drift can collect code coverage during test replay, showing which lines of your service code each trace test exercises.
+
+Coverage works with Node.js and Python.
+
+## Enabling Coverage
+
+There are two ways to enable coverage:
+
+### Config-driven (for CI)
+
+Add `coverage.enabled: true` to `.tusk/config.yaml`. Coverage is automatically collected during validation runs on the default branch. No CI changes needed.
+
+```yaml
+coverage:
+ enabled: true
+```
+
+Config-driven coverage is silent (no console output). Data is collected for backend upload during suite validation.
+
+### Flag-driven (for local dev)
+
+```bash
+# Show coverage in console
+tusk drift run --show-coverage --print
+
+# Export to file (implies coverage collection)
+tusk drift run --coverage-output coverage.lcov --print
+```
+
+## CLI Flags
+
+| Flag | Description |
+|------|-------------|
+| `--show-coverage` | Collect and display code coverage. Forces concurrency to 1. |
+| `--coverage-output ` | Write coverage data to a file. LCOV format by default; JSON if path ends in `.json`. Implies coverage collection. |
+
+### When coverage activates
+
+| Scenario | Coverage collected? | Shown in console? |
+|---|---|---|
+| `coverage.enabled: true` + validation run (CI) | Yes | No (silent) |
+| `coverage.enabled: true` + local/PR run | No | No |
+| `--show-coverage` (any context) | Yes | Yes |
+| `--coverage-output` (any context) | Yes | Only if `--show-coverage` also set |
+
+## Configuration
+
+Optional include/exclude patterns in `.tusk/config.yaml`:
+
+```yaml
+coverage:
+ include:
+ - "backend/src/**" # only report on your service's code
+ exclude:
+ - "**/migrations/**" # exclude database migrations
+ - "**/generated/**" # exclude generated code
+ - "**/*.test.ts" # exclude test files loaded at startup
+```
+
+
+
+
+ | Key |
+ Type |
+ Default |
+ Description |
+
+
+
+
+ coverage.include |
+ string[] |
+ (all files) |
+ If set, only files matching at least one pattern are included in coverage reports. Useful for monorepos. |
+
+
+ coverage.exclude |
+ string[] |
+ (none) |
+ Files matching any pattern are excluded from coverage reports. Applied after include. |
+
+
+
+
+### Pattern syntax
+
+Patterns use glob matching with `**` for recursive directory matching. File paths are **relative to the git root** (e.g., `backend/src/db/migrations/1700-Init.ts`).
+
+| Pattern | Matches |
+|---------|---------|
+| `**/migrations/**` | Any file in any `migrations/` directory |
+| `backend/src/**` | All files under `backend/src/` |
+| `**/*.test.ts` | Any `.test.ts` file |
+| `backend/src/db/migrations/**` | Specific subdirectory |
+| `migrations/**` | **Won't match** — paths include the full git-relative prefix |
+
+## Output
+
+### Console output
+
+Coverage is displayed in two places during a run:
+
+**Per-test (inline):** After each test completes, a single line shows how many lines that specific test covered:
+```
+NO DEVIATION - dc14ba0733bdba8b65c11f14c6407320 (63ms)
+ ↳ coverage: 59 lines across 10 files
+```
+
+**Aggregate (end of run):** After all tests complete, the full summary shows:
+```
+📊 Coverage: 85.9% lines (55/64), 42.9% branches (6/14) across 2 files
+
+ Per-file:
+ server.js 85.2% (52/61)
+ tuskDriftInit.js 100.0% (3/3)
+
+ Per-test:
+ GET /api/random-user 4 lines across 1 files
+ POST /api/create-post 5 lines across 1 files
+```
+
+In TUI mode, the aggregate summary appears in the service logs panel after all tests complete. Per-test detail is shown in each test's log panel.
+
+### LCOV export
+
+```bash
+tusk drift run --cloud --show-coverage --coverage-output coverage.lcov --print
+```
+
+Compatible with Codecov, Coveralls, SonarQube, VS Code, and most coverage tools.
+
+**Note on validation runs:**
+- **In-suite tests** are always included in coverage output, even if they fail (a failing test still exercises code paths).
+- **Draft tests** are excluded from coverage output. Draft coverage data is uploaded to the backend for promotion decisions ("does this draft add unique coverage?").
+- **After promotion**, the Tusk Cloud dashboard may show slightly higher coverage than the LCOV file (newly promoted drafts are included). The LCOV catches up on the next validation run.
+
+### JSON export
+
+```bash
+tusk drift run --cloud --show-coverage --coverage-output coverage.json --print
+```
+
+JSON includes three top-level fields:
+
+- `summary` — aggregate stats, per-file percentages, per-test line counts
+- `aggregate` — line-level hit counts and branch data for every file
+- `per_test` — per-test per-file covered lines
+
+```json
+{
+ "summary": {
+ "aggregate": { "coverage_pct": 85.9, "total_covered_lines": 55, "total_coverable_lines": 64 },
+ "per_file": { "server.js": { "coverage_pct": 85.2, "covered_lines": 52, "coverable_lines": 61 } },
+ "per_test": [{ "test_name": "GET /api/random-user", "covered_lines": 4, "files_touched": 1 }]
+ },
+ "aggregate": {
+ "server.js": {
+ "lines": { "1": 1, "5": 3, "12": 0 },
+ "total_branches": 14,
+ "covered_branches": 6,
+ "branches": { "25": { "total": 2, "covered": 1 } }
+ }
+ },
+ "per_test": {
+ "trace-id-abc": {
+ "server.js": { "covered_lines": [5, 15, 22], "covered_count": 3, "files_touched": 1 }
+ }
+ }
+}
+```
+
+## How It Works
+
+1. CLI starts your service with coverage env vars (`NODE_V8_COVERAGE` for Node, `TUSK_COVERAGE` for Python)
+2. After the service is ready, CLI takes a **baseline snapshot** — all coverable lines (including uncovered) for the denominator
+3. After each test, CLI takes a **per-test snapshot** — only lines executed since the last snapshot (counters auto-reset)
+4. CLI merges per-test data with baseline to compute the aggregate
+
+Coverage data flows via the existing CLI-SDK protobuf channel. No extra HTTP servers or ports.
+
+**Node.js:** Uses V8's built-in precise coverage. No external dependencies. TypeScript source maps handled automatically (`sourceMap: true` in tsconfig required). See the [Node SDK coverage docs](https://github.com/Use-Tusk/drift-node-sdk/blob/main/docs/coverage.md) for internals.
+
+**Python:** Uses `coverage.py` with `branch=True`. Requires `pip install coverage`. See the [Python SDK coverage docs](https://github.com/Use-Tusk/drift-python-sdk/blob/main/docs/coverage.md) for internals.
+
+## Docker Compose
+
+For services running in Docker Compose, two things are needed:
+
+### 1. Pass coverage env vars to the container
+
+Add to `docker-compose.tusk-override.yml`:
+
+```yaml
+services:
+ your-service:
+ environment:
+ - TUSK_COVERAGE=${TUSK_COVERAGE:-} # pass through from CLI
+ - NODE_V8_COVERAGE=/tmp/tusk-v8-coverage # Node.js only: fixed container path
+```
+
+`TUSK_COVERAGE` is passed through from the CLI using `${TUSK_COVERAGE:-}`. `NODE_V8_COVERAGE` must be a **fixed container path** — not `${NODE_V8_COVERAGE:-}` — because the CLI creates a host temp directory that doesn't exist inside the container.
+
+**Python containers:** Add `coverage>=7.0` to your `requirements.txt`. No `NODE_V8_COVERAGE` needed.
+
+### 2. Strip container path prefix
+
+Coverage paths from Docker are container-absolute (e.g., `/app/app/api/views.py`). Use `strip_path_prefix` to convert them to repo-relative paths:
+
+```yaml
+coverage:
+ enabled: true
+ strip_path_prefix: "/app" # your Docker volume mount point
+```
+
+This strips `/app` from all paths, so `/app/app/api/views.py` becomes `app/api/views.py` — matching the file path in your git repo. Set this to whatever your `docker-compose.yaml` volume mount maps your project root to (e.g., `- .:/app` → use `/app`).
+
+## Limitations
+
+- **Concurrency forced to 1.** Per-test snapshots rely on counter resets between tests.
+- **Only loaded files tracked.** Files never imported by the server (standalone scripts, test files, unused utils) don't appear in coverage. The denominator only includes files V8/Python actually loaded.
+- **Startup code inflates coverage.** Module loading, decorator execution, and DI registration all count as "covered lines." A single test may show 20%+ coverage on a large app from startup alone.
+- **TypeScript compiled output.** If using `tsc`, ensure a clean build (`rm -rf dist && tsc`) to avoid stale artifacts with broken imports.
+- **Multi-process servers.** Node cluster mode and gunicorn with multiple workers need single-process mode for coverage.
+- **Python overhead.** `coverage.py` adds 10-30% execution overhead via `sys.settrace()`. V8 coverage is near-zero overhead.
+- **Python branch coverage** uses a private coverage.py API (`_analyze()`). May break on major coverage.py upgrades.
+- **Docker paths.** Coverage paths are container-absolute by default. Use `coverage.strip_path_prefix` to convert to repo-relative paths (see Docker Compose section above).
diff --git a/go.mod b/go.mod
index 17536ed..72a46ac 100644
--- a/go.mod
+++ b/go.mod
@@ -4,9 +4,10 @@ go 1.25.0
require (
github.com/Use-Tusk/fence v0.1.36
- github.com/Use-Tusk/tusk-drift-schemas v0.1.33
+ github.com/Use-Tusk/tusk-drift-schemas v0.1.34
github.com/agnivade/levenshtein v1.0.3
github.com/aymanbagabas/go-osc52/v2 v2.0.1
+ github.com/bmatcuk/doublestar/v4 v4.10.0
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7
github.com/charmbracelet/bubbletea v1.3.9
github.com/charmbracelet/glamour v0.10.0
@@ -44,7 +45,6 @@ require (
github.com/andybalholm/brotli v1.2.0 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
- github.com/bmatcuk/doublestar/v4 v4.10.0 // indirect
github.com/bodgit/plumbing v1.3.0 // indirect
github.com/bodgit/sevenzip v1.6.1 // indirect
github.com/bodgit/windows v1.0.1 // indirect
diff --git a/go.sum b/go.sum
index 170ceff..5f5769c 100644
--- a/go.sum
+++ b/go.sum
@@ -33,8 +33,8 @@ github.com/STARRY-S/zip v0.2.3 h1:luE4dMvRPDOWQdeDdUxUoZkzUIpTccdKdhHHsQJ1fm4=
github.com/STARRY-S/zip v0.2.3/go.mod h1:lqJ9JdeRipyOQJrYSOtpNAiaesFO6zVDsE8GIGFaoSk=
github.com/Use-Tusk/fence v0.1.36 h1:8S15y8cp3X+xXukx6AN0Ky/aX9/dZyW3fLw5XOQ8YtE=
github.com/Use-Tusk/fence v0.1.36/go.mod h1:YkowBDzXioVKJE16vg9z3gSVC6vhzkIZZw2dFf7MW/o=
-github.com/Use-Tusk/tusk-drift-schemas v0.1.33 h1:P9iyTgCpFz5rcAEMhbQq3TTAPftMBGvBh33bdYPQEYQ=
-github.com/Use-Tusk/tusk-drift-schemas v0.1.33/go.mod h1:pa3EvTj9kKxl9f904RVFkj9YK1zB75QogboKi70zalM=
+github.com/Use-Tusk/tusk-drift-schemas v0.1.34 h1:OUXsA4sfBMA/HCuPqYdfl5EP9+Jq+hYenAmw4wwrEVo=
+github.com/Use-Tusk/tusk-drift-schemas v0.1.34/go.mod h1:pa3EvTj9kKxl9f904RVFkj9YK1zB75QogboKi70zalM=
github.com/agnivade/levenshtein v1.0.3 h1:M5ZnqLOoZR8ygVq0FfkXsNOKzMCk0xRiow0R5+5VkQ0=
github.com/agnivade/levenshtein v1.0.3/go.mod h1:4SFRZbbXWLF4MU1T9Qg0pGgH3Pjs+t6ie5efyrwRJXs=
github.com/alecthomas/assert/v2 v2.7.0 h1:QtqSACNS3tF7oasA8CU6A6sXZSBDqnm7RfpLl9bZqbE=
diff --git a/internal/config/config.go b/internal/config/config.go
index 9cffb2f..be1dc91 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -36,6 +36,7 @@ type Config struct {
Replay ReplayConfig `koanf:"replay"`
Traces TracesConfig `koanf:"traces"`
Results ResultsConfig `koanf:"results"`
+ Coverage CoverageConfig `koanf:"coverage"`
}
type ServiceConfig struct {
@@ -115,6 +116,13 @@ type ResultsConfig struct {
Dir string `koanf:"dir"`
}
+type CoverageConfig struct {
+ Enabled bool `koanf:"enabled"`
+ Include []string `koanf:"include"`
+ Exclude []string `koanf:"exclude"`
+ StripPathPrefix string `koanf:"strip_path_prefix"`
+}
+
// Load loads the config file and applies environment overrides.
// This function is idempotent - calling it multiple times will only load once.
func Load(configFile string) error {
diff --git a/internal/runner/coverage.go b/internal/runner/coverage.go
new file mode 100644
index 0000000..d04954b
--- /dev/null
+++ b/internal/runner/coverage.go
@@ -0,0 +1,794 @@
+package runner
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Use-Tusk/tusk-cli/internal/log"
+ "github.com/Use-Tusk/tusk-cli/internal/utils"
+ "github.com/bmatcuk/doublestar/v4"
+)
+
+const (
+ coverageBaselineMaxRetries = 4
+ coverageBaselineRetryDelay = 200 * time.Millisecond
+ coverageSnapshotTimeout = 60 * time.Second
+ coverageBaselineDeadline = 90 * time.Second
+)
+
+// TakeCoverageSnapshot calls the SDK's coverage snapshot endpoint.
+// Returns per-file coverage data for this test only (counters auto-reset).
+func (e *Executor) TakeCoverageSnapshot() (CoverageSnapshot, error) {
+ return e.callCoverageEndpoint(false)
+}
+
+// TakeCoverageBaseline calls the SDK's coverage snapshot endpoint with ?baseline=true.
+// Returns ALL coverable lines (including uncovered at count=0) for the aggregate denominator.
+// Retries briefly since the coverage server may not be ready immediately after service start.
+// In practice, the SDK initializes coverage before the HTTP server starts, so the baseline
+// should succeed on the first attempt.
+func (e *Executor) TakeCoverageBaseline() (CoverageSnapshot, error) {
+ ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(coverageBaselineDeadline))
+ defer cancel()
+
+ var lastErr error
+ for attempt := 0; attempt < coverageBaselineMaxRetries; attempt++ {
+ if ctx.Err() != nil {
+ break
+ }
+ result, err := e.callCoverageEndpoint(true)
+ if err == nil {
+ return result, nil
+ }
+ lastErr = err
+
+ select {
+ case <-ctx.Done():
+ case <-time.After(coverageBaselineRetryDelay):
+ }
+ }
+ return nil, fmt.Errorf("coverage baseline failed after retries: %w", lastErr)
+}
+
+func (e *Executor) callCoverageEndpoint(baseline bool) (CoverageSnapshot, error) {
+ if !e.coverageEnabled || e.server == nil {
+ return nil, nil
+ }
+
+ resp, err := e.server.SendCoverageSnapshot(baseline)
+ if err != nil {
+ return nil, fmt.Errorf("coverage snapshot failed: %w", err)
+ }
+
+ // Convert protobuf response to our internal format
+ snapshot := make(CoverageSnapshot)
+ for filePath, fileData := range resp.Coverage {
+ branches := make(map[string]BranchInfo)
+ for line, branchProto := range fileData.Branches {
+ branches[line] = BranchInfo{
+ Total: int(branchProto.Total),
+ Covered: int(branchProto.Covered),
+ }
+ }
+
+ lines := make(map[string]int)
+ for line, count := range fileData.Lines {
+ lines[line] = int(count)
+ }
+
+ snapshot[filePath] = FileCoverageData{
+ Lines: lines,
+ TotalBranches: int(fileData.TotalBranches),
+ CoveredBranches: int(fileData.CoveredBranches),
+ Branches: branches,
+ }
+ }
+
+ return normalizeCoveragePaths(snapshot, e.coverageStripPrefix), nil
+}
+
+// BranchInfo tracks branch coverage at a specific line.
+type BranchInfo struct {
+ Total int `json:"total"`
+ Covered int `json:"covered"`
+}
+
+// FileCoverageData is the internal representation of per-file coverage.
+type FileCoverageData struct {
+ Lines map[string]int `json:"lines"`
+ TotalBranches int `json:"total_branches"`
+ CoveredBranches int `json:"covered_branches"`
+ Branches map[string]BranchInfo `json:"branches,omitempty"`
+}
+
+// CoverageSnapshot is the full coverage data for a snapshot.
+type CoverageSnapshot map[string]FileCoverageData
+
+// CoverageTestRecord holds per-test coverage data.
+type CoverageTestRecord struct {
+ TestID string
+ TestName string
+ SuiteStatus string // "draft", "in_suite", or "" (local)
+ Coverage CoverageSnapshot
+}
+
+// CoverageFileDiff represents per-test coverage for a single file.
+type CoverageFileDiff struct {
+ CoveredLines []int `json:"covered_lines"`
+ CoverableLines int `json:"coverable_lines"`
+ CoveredCount int `json:"covered_count"`
+ TotalBranches int `json:"total_branches"`
+ CoveredBranches int `json:"covered_branches"`
+ Branches map[string]BranchInfo `json:"branches,omitempty"`
+}
+
+// SnapshotToCoverageDetail converts a CoverageSnapshot to per-file CoverageFileDiff format.
+func SnapshotToCoverageDetail(snapshot CoverageSnapshot) map[string]CoverageFileDiff {
+ result := make(map[string]CoverageFileDiff)
+ for filePath, fileData := range snapshot {
+ var covered []int
+ for lineStr, count := range fileData.Lines {
+ if count > 0 {
+ line, err := strconv.Atoi(lineStr)
+ if err != nil || line <= 0 {
+ log.Debug("Skipping invalid line number in coverage data", "line", lineStr, "file", filePath)
+ continue
+ }
+ covered = append(covered, line)
+ }
+ }
+ if len(covered) > 0 {
+ sort.Ints(covered)
+ covered = dedup(covered)
+ // Deep-copy branches to avoid shared references
+ branchesCopy := make(map[string]BranchInfo, len(fileData.Branches))
+ for line, info := range fileData.Branches {
+ branchesCopy[line] = info
+ }
+ result[filePath] = CoverageFileDiff{
+ CoveredLines: covered,
+ CoverableLines: len(fileData.Lines),
+ CoveredCount: len(covered),
+ TotalBranches: fileData.TotalBranches,
+ CoveredBranches: fileData.CoveredBranches,
+ Branches: branchesCopy,
+ }
+ }
+ }
+ return result
+}
+
+// CoverageReportView is a pre-computed view of coverage data, built once and
+// passed to all consumers (print, JSON export, TUI) for consistency.
+type CoverageReportView struct {
+ SuiteRecords []CoverageTestRecord
+ Aggregate CoverageSnapshot
+ PerTest map[string]map[string]CoverageFileDiff
+ Summary CoverageSummary
+}
+
+// BuildCoverageReportView constructs a CoverageReportView by applying suite filtering,
+// include/exclude patterns, and computing the summary — all exactly once.
+func (e *Executor) BuildCoverageReportView(records []CoverageTestRecord) *CoverageReportView {
+ suiteRecords := filterInSuiteRecords(records)
+
+ e.coverageBaselineMu.Lock()
+ baseline := e.coverageBaseline
+ e.coverageBaselineMu.Unlock()
+
+ aggregate := mergeWithBaseline(baseline, suiteRecords)
+ aggregate = filterCoverageByPatterns(aggregate, e.coverageIncludePatterns, e.coverageExcludePatterns)
+ perTest := e.GetCoveragePerTestSnapshot()
+ summary := ComputeCoverageSummary(aggregate, perTest, suiteRecords)
+
+ return &CoverageReportView{
+ SuiteRecords: suiteRecords,
+ Aggregate: aggregate,
+ PerTest: perTest,
+ Summary: summary,
+ }
+}
+
+// ProcessCoverage computes aggregate coverage, optionally prints summary, writes file, and prepares for upload.
+// During validation runs, the aggregate and output files only include IN_SUITE tests (not drafts).
+// All per-test data (including drafts) is retained for backend upload — the backend needs draft
+// coverage for promotion decisions ("does this draft add unique coverage?").
+func (e *Executor) ProcessCoverage(records []CoverageTestRecord) error {
+ return e.ProcessCoverageWithAggregate(records, nil)
+}
+
+// ProcessCoverageWithAggregate processes coverage with an optional pre-computed view.
+// If precomputed is nil, it will be computed from the records and baseline.
+func (e *Executor) ProcessCoverageWithAggregate(records []CoverageTestRecord, precomputed CoverageSnapshot) error {
+ if !e.coverageEnabled || len(records) == 0 {
+ return nil
+ }
+
+ // Use pre-computed aggregate if provided, otherwise build the view.
+ var aggregate CoverageSnapshot
+ var suiteRecords []CoverageTestRecord
+ if precomputed != nil {
+ aggregate = precomputed
+ suiteRecords = filterInSuiteRecords(records)
+ } else {
+ view := e.BuildCoverageReportView(records)
+ aggregate = view.Aggregate
+ suiteRecords = view.SuiteRecords
+ }
+
+ // Print summary if --show-coverage was passed (not in silent config-driven mode)
+ if e.coverageShowOutput {
+ log.Stderrln("\n➤ Processing coverage data...")
+ summary := ComputeCoverageSummary(aggregate, e.GetCoveragePerTestSnapshot(), suiteRecords)
+ e.printCoverageSummary(summary)
+ }
+
+ // Write coverage file if requested.
+ // During validation runs, aggregate and output only include IN_SUITE tests.
+ // Draft coverage is excluded from the file but retained for backend upload.
+ if e.coverageOutputPath != "" {
+ outPath := e.coverageOutputPath
+ if !filepath.IsAbs(outPath) {
+ if cwd, err := os.Getwd(); err == nil {
+ outPath = filepath.Join(cwd, outPath)
+ }
+ }
+ if err := os.MkdirAll(filepath.Dir(outPath), 0o750); err != nil {
+ return fmt.Errorf("failed to create coverage output directory: %w", err)
+ }
+
+ if strings.HasSuffix(strings.ToLower(outPath), ".json") {
+ if err := WriteCoverageJSON(outPath, aggregate, e.GetCoveragePerTestSnapshot(), suiteRecords); err != nil {
+ return fmt.Errorf("failed to write coverage JSON: %w", err)
+ }
+ } else {
+ if err := WriteCoverageLCOV(outPath, aggregate); err != nil {
+ return fmt.Errorf("failed to write coverage LCOV: %w", err)
+ }
+ }
+ if e.coverageShowOutput {
+ log.Stderrln(fmt.Sprintf("\n📄 Coverage written to %s", e.coverageOutputPath))
+ }
+ }
+
+ return nil
+}
+
+// mergeWithBaseline creates aggregate coverage by starting from the baseline
+// (all coverable lines including count=0) and unioning per-test data.
+//
+// Branch merging uses UNION semantics: if test A covers branch path 1 and test B
+// covers branch path 2, the aggregate shows both paths as covered. This is done
+// by summing covered counts per line (clamped to total) rather than taking max.
+func mergeWithBaseline(baseline CoverageSnapshot, records []CoverageTestRecord) CoverageSnapshot {
+ merged := make(CoverageSnapshot)
+
+ // Deep-copy baseline (don't mutate the original).
+ // Baseline lines include startup-covered counts (count > 0 for lines executed
+ // during module loading). These count toward "covered" in the aggregate,
+ // matching industry standard behavior (Istanbul, NYC, coverage.py, etc.).
+ for filePath, fileData := range baseline {
+ lines := make(map[string]int, len(fileData.Lines))
+ for line, count := range fileData.Lines {
+ lines[line] = count
+ }
+ branches := make(map[string]BranchInfo, len(fileData.Branches))
+ for line, info := range fileData.Branches {
+ branches[line] = info // BranchInfo is a value type, safe to copy
+ }
+ merged[filePath] = FileCoverageData{
+ Lines: lines,
+ TotalBranches: fileData.TotalBranches,
+ CoveredBranches: fileData.CoveredBranches,
+ Branches: branches,
+ }
+ }
+
+ // Union per-test coverage into the merged result
+ for _, record := range records {
+ for filePath, fileData := range record.Coverage {
+ existing, ok := merged[filePath]
+ if !ok {
+ existing = FileCoverageData{
+ Lines: make(map[string]int),
+ Branches: make(map[string]BranchInfo),
+ }
+ }
+ // Add line counts
+ for line, count := range fileData.Lines {
+ existing.Lines[line] += count
+ }
+ // Union branch data: sum covered counts, clamped to total.
+ // This is the same approach Istanbul/NYC use when merging reports.
+ // Without per-arm tracking, sum+clamp is the best approximation
+ // (optimistic when tests overlap on the same branches).
+ for line, branchInfo := range fileData.Branches {
+ if existing.Branches == nil {
+ existing.Branches = make(map[string]BranchInfo)
+ }
+ eb := existing.Branches[line]
+ if branchInfo.Total > eb.Total {
+ eb.Total = branchInfo.Total
+ }
+ newCovered := eb.Covered + branchInfo.Covered
+ if newCovered > eb.Total || newCovered < 0 {
+ eb.Covered = eb.Total
+ } else {
+ eb.Covered = newCovered
+ }
+ existing.Branches[line] = eb
+ }
+ // Recompute file-level branch totals from per-line data
+ totalB, covB := 0, 0
+ for _, b := range existing.Branches {
+ totalB += b.Total
+ covB += b.Covered
+ }
+ existing.TotalBranches = totalB
+ existing.CoveredBranches = covB
+ merged[filePath] = existing
+ }
+ }
+
+ return merged
+}
+
+// --- Summary output ---
+
+type CoverageSummary struct {
+ Timestamp string `json:"timestamp"`
+ Aggregate CoverageAggregate `json:"aggregate"`
+ PerFile map[string]CoverageFileSummary `json:"per_file"`
+ PerTest []CoverageTestSummary `json:"per_test"`
+}
+
+type CoverageAggregate struct {
+ TotalCoverableLines int `json:"total_coverable_lines"`
+ TotalCoveredLines int `json:"total_covered_lines"`
+ CoveragePct float64 `json:"coverage_pct"`
+ TotalFiles int `json:"total_files"`
+ CoveredFiles int `json:"covered_files"`
+ TotalBranches int `json:"total_branches"`
+ CoveredBranches int `json:"covered_branches"`
+ BranchCoveragePct float64 `json:"branch_coverage_pct"`
+}
+
+type CoverageFileSummary struct {
+ CoveredLines int `json:"covered_lines"`
+ CoverableLines int `json:"coverable_lines"`
+ CoveragePct float64 `json:"coverage_pct"`
+ TotalBranches int `json:"total_branches"`
+ CoveredBranches int `json:"covered_branches"`
+}
+
+type CoverageTestSummary struct {
+ TestID string `json:"test_id"`
+ TestName string `json:"test_name"`
+ CoveredLines int `json:"covered_lines"`
+ FilesTouched int `json:"files_touched"`
+}
+
+// ComputeCoverageSummary builds a CoverageSummary from aggregate coverage data
+// and per-test detail. This is a pure function (no side effects, no I/O).
+func ComputeCoverageSummary(
+ aggregate CoverageSnapshot,
+ perTestDetail map[string]map[string]CoverageFileDiff,
+ records []CoverageTestRecord,
+) CoverageSummary {
+ summary := CoverageSummary{
+ Timestamp: time.Now().Format(time.RFC3339),
+ PerFile: make(map[string]CoverageFileSummary),
+ }
+
+ totalCoverable := 0
+ totalCovered := 0
+ totalBranches := 0
+ totalCoveredBranches := 0
+ coveredFiles := 0
+
+ for filePath, fileData := range aggregate {
+ coverable := len(fileData.Lines)
+ covered := 0
+ for _, count := range fileData.Lines {
+ if count > 0 {
+ covered++
+ }
+ }
+ totalCoverable += coverable
+ totalCovered += covered
+ totalBranches += fileData.TotalBranches
+ totalCoveredBranches += fileData.CoveredBranches
+ if covered > 0 {
+ coveredFiles++
+ }
+ pct := 0.0
+ if coverable > 0 {
+ pct = float64(covered) / float64(coverable) * 100
+ }
+ summary.PerFile[filePath] = CoverageFileSummary{
+ CoveredLines: covered,
+ CoverableLines: coverable,
+ CoveragePct: pct,
+ TotalBranches: fileData.TotalBranches,
+ CoveredBranches: fileData.CoveredBranches,
+ }
+ }
+
+ aggPct := 0.0
+ if totalCoverable > 0 {
+ aggPct = float64(totalCovered) / float64(totalCoverable) * 100
+ }
+ branchPct := 0.0
+ if totalBranches > 0 {
+ branchPct = float64(totalCoveredBranches) / float64(totalBranches) * 100
+ }
+
+ summary.Aggregate = CoverageAggregate{
+ TotalCoverableLines: totalCoverable,
+ TotalCoveredLines: totalCovered,
+ CoveragePct: aggPct,
+ TotalFiles: len(aggregate),
+ CoveredFiles: coveredFiles,
+ TotalBranches: totalBranches,
+ CoveredBranches: totalCoveredBranches,
+ BranchCoveragePct: branchPct,
+ }
+
+ for _, record := range records {
+ ts := CoverageTestSummary{TestID: record.TestID, TestName: record.TestName}
+ if detail, ok := perTestDetail[record.TestID]; ok {
+ for _, fd := range detail {
+ ts.CoveredLines += fd.CoveredCount
+ }
+ ts.FilesTouched = len(detail)
+ }
+ summary.PerTest = append(summary.PerTest, ts)
+ }
+
+ return summary
+}
+
+// formatCoverageSummary formats coverage summary as lines of text.
+func (e *Executor) formatCoverageSummary(summary CoverageSummary) []string {
+ var lines []string
+
+ // Aggregate line
+ coverageMsg := fmt.Sprintf("📊 Coverage: %.1f%% lines (%d/%d)",
+ summary.Aggregate.CoveragePct, summary.Aggregate.TotalCoveredLines, summary.Aggregate.TotalCoverableLines)
+ if summary.Aggregate.TotalBranches > 0 {
+ coverageMsg += fmt.Sprintf(", %.1f%% branches (%d/%d)",
+ summary.Aggregate.BranchCoveragePct, summary.Aggregate.CoveredBranches, summary.Aggregate.TotalBranches)
+ }
+ coverageMsg += fmt.Sprintf(" across %d files", summary.Aggregate.TotalFiles)
+ e.coverageBaselineMu.Lock()
+ baselineNil := e.coverageBaseline == nil
+ e.coverageBaselineMu.Unlock()
+ if baselineNil {
+ coverageMsg += " ⚠️ baseline failed - denominator may be incomplete"
+ }
+ lines = append(lines, coverageMsg)
+
+ // Per-file breakdown sorted alphabetically
+ type fileStat struct {
+ path string
+ pct float64
+ cov, tot int
+ }
+ var stats []fileStat
+ for fp, fs := range summary.PerFile {
+ if fs.CoverableLines > 0 {
+ stats = append(stats, fileStat{fp, fs.CoveragePct, fs.CoveredLines, fs.CoverableLines})
+ }
+ }
+ sort.Slice(stats, func(i, j int) bool { return stats[i].path < stats[j].path })
+
+ lines = append(lines, "")
+ lines = append(lines, " Per-file:")
+ for _, s := range stats {
+ lines = append(lines, fmt.Sprintf(" %-40s %5.1f%% (%d/%d)", s.path, s.pct, s.cov, s.tot))
+ }
+
+ return lines
+}
+
+// printCoverageSummary prints the coverage summary to stderr.
+func (e *Executor) printCoverageSummary(summary CoverageSummary) {
+ for _, line := range e.formatCoverageSummary(summary) {
+ log.Stderrln(line)
+ }
+
+ // Per-test breakdown
+ log.Stderrln("\n Per-test:")
+ for _, ts := range summary.PerTest {
+ name := ts.TestName
+ if name == "" {
+ name = ts.TestID
+ }
+ log.Stderrln(fmt.Sprintf(" %-40s %d lines across %d files", name, ts.CoveredLines, ts.FilesTouched))
+ }
+}
+
+// FormatCoverageSummaryLines builds a CoverageReportView and returns formatted summary lines
+// for the TUI service log panel (aggregate + per-file, no per-test).
+// Also returns the computed aggregate so callers can reuse it (avoiding redundant computation).
+func (e *Executor) FormatCoverageSummaryLines(records []CoverageTestRecord) ([]string, CoverageSnapshot) {
+ if !e.coverageEnabled || len(records) == 0 {
+ return nil, nil
+ }
+
+ view := e.BuildCoverageReportView(records)
+ return e.formatCoverageSummary(view.Summary), view.Aggregate
+}
+
+// filterCoverageByPatterns applies include/exclude glob patterns to a snapshot.
+// Include (if set): only keep files matching at least one include pattern.
+// Exclude: remove files matching any exclude pattern.
+// Include is applied first, then exclude.
+// Supports ** for recursive directory matching:
+// - "**/migrations/**" matches any file in any migrations/ directory
+// - "backend/src/db/**" matches everything under backend/src/db/
+// - "**/*.test.ts" matches any .test.ts file
+// - "backend/src/db/migrations/**" matches specific path
+func filterCoverageByPatterns(snapshot CoverageSnapshot, include, exclude []string) CoverageSnapshot {
+ if len(include) == 0 && len(exclude) == 0 {
+ return snapshot
+ }
+ filtered := make(CoverageSnapshot, len(snapshot))
+ for filePath, data := range snapshot {
+ // Include filter: if patterns are set, file must match at least one
+ if len(include) > 0 && !matchesAnyPattern(filePath, include) {
+ continue
+ }
+ // Exclude filter: file must not match any
+ if len(exclude) > 0 && matchesAnyPattern(filePath, exclude) {
+ continue
+ }
+ filtered[filePath] = data
+ }
+ return filtered
+}
+
+// matchesAnyPattern checks if a file path matches any of the glob patterns.
+// Uses doublestar for proper ** support.
+func matchesAnyPattern(filePath string, patterns []string) bool {
+ filePath = strings.ReplaceAll(filePath, "\\", "/")
+ for _, pattern := range patterns {
+ if matched, _ := doublestar.Match(pattern, filePath); matched {
+ return true
+ }
+ }
+ return false
+}
+
+// matchGlob matches a path against a glob pattern supporting **.
+// Exported for testing.
+func matchGlob(filePath, pattern string) bool {
+ filePath = strings.ReplaceAll(filePath, "\\", "/")
+ matched, _ := doublestar.Match(pattern, filePath)
+ return matched
+}
+
+// --- Coverage Export ---
+
+// CoverageExport is the top-level JSON export structure.
+type CoverageExport struct {
+ Summary CoverageSummary `json:"summary"`
+ Aggregate CoverageSnapshot `json:"aggregate"`
+ PerTest map[string]map[string]CoverageFileDiff `json:"per_test"`
+}
+
+// WriteCoverageJSON writes aggregate + per-test coverage as JSON.
+func WriteCoverageJSON(path string, aggregate CoverageSnapshot, perTest map[string]map[string]CoverageFileDiff, records []CoverageTestRecord) error {
+ // Build set of allowed test IDs from the filtered in-suite records
+ allowedTestIDs := make(map[string]struct{}, len(records))
+ for _, r := range records {
+ allowedTestIDs[r.TestID] = struct{}{}
+ }
+
+ // Filter per-test data to only include in-suite tests and files present in the (filtered) aggregate
+ filteredPerTest := make(map[string]map[string]CoverageFileDiff, len(perTest))
+ for testID, testDetail := range perTest {
+ if _, ok := allowedTestIDs[testID]; !ok {
+ continue
+ }
+ filtered := make(map[string]CoverageFileDiff)
+ for fp, fd := range testDetail {
+ if _, ok := aggregate[fp]; ok {
+ filtered[fp] = fd
+ }
+ }
+ if len(filtered) > 0 {
+ filteredPerTest[testID] = filtered
+ }
+ }
+
+ // Compute summary from the filtered per-test data so it matches the exported data
+ summary := ComputeCoverageSummary(aggregate, filteredPerTest, records)
+
+ export := CoverageExport{
+ Summary: summary,
+ Aggregate: aggregate,
+ PerTest: filteredPerTest,
+ }
+
+ data, err := json.MarshalIndent(export, "", " ")
+ if err != nil {
+ return err
+ }
+ return os.WriteFile(path, data, 0o600)
+}
+
+// WriteCoverageLCOV writes aggregate coverage data in LCOV format.
+func WriteCoverageLCOV(path string, aggregate CoverageSnapshot) error {
+ var b strings.Builder
+
+ // Sort file paths for deterministic output
+ filePaths := make([]string, 0, len(aggregate))
+ for fp := range aggregate {
+ filePaths = append(filePaths, fp)
+ }
+ sort.Strings(filePaths)
+
+ for _, filePath := range filePaths {
+ fileData := aggregate[filePath]
+ b.WriteString("SF:")
+ b.WriteString(filePath)
+ b.WriteByte('\n')
+
+ // Line data (DA:line,count)
+ lineNums := make([]int, 0, len(fileData.Lines))
+ for lineStr := range fileData.Lines {
+ if n, err := strconv.Atoi(lineStr); err == nil {
+ lineNums = append(lineNums, n)
+ }
+ }
+ sort.Ints(lineNums)
+
+ linesFound := 0
+ linesHit := 0
+ for _, line := range lineNums {
+ count := fileData.Lines[strconv.Itoa(line)]
+ b.WriteString(fmt.Sprintf("DA:%d,%d\n", line, count))
+ linesFound++
+ if count > 0 {
+ linesHit++
+ }
+ }
+
+ // Branch data (BRDA:line,block,branch,count)
+ branchLines := make([]int, 0, len(fileData.Branches))
+ for lineStr := range fileData.Branches {
+ if n, err := strconv.Atoi(lineStr); err == nil {
+ branchLines = append(branchLines, n)
+ }
+ }
+ sort.Ints(branchLines)
+
+ branchesFound := 0
+ branchesHit := 0
+ for _, line := range branchLines {
+ info := fileData.Branches[strconv.Itoa(line)]
+ for i := 0; i < info.Total; i++ {
+ count := 0
+ if i < info.Covered {
+ count = 1
+ }
+ b.WriteString(fmt.Sprintf("BRDA:%d,0,%d,%d\n", line, i, count))
+ branchesFound++
+ if count > 0 {
+ branchesHit++
+ }
+ }
+ }
+
+ b.WriteString(fmt.Sprintf("LF:%d\n", linesFound))
+ b.WriteString(fmt.Sprintf("LH:%d\n", linesHit))
+ if branchesFound > 0 {
+ b.WriteString(fmt.Sprintf("BRF:%d\n", branchesFound))
+ b.WriteString(fmt.Sprintf("BRH:%d\n", branchesHit))
+ }
+ b.WriteString("end_of_record\n")
+ }
+
+ return os.WriteFile(path, []byte(b.String()), 0o600)
+}
+
+// filterInSuiteRecords returns only records from in-suite tests.
+// If no tests have suite status set (local run, no cloud), returns all records.
+func filterInSuiteRecords(records []CoverageTestRecord) []CoverageTestRecord {
+ hasSuiteStatus := false
+ for _, r := range records {
+ if r.SuiteStatus != "" {
+ hasSuiteStatus = true
+ break
+ }
+ }
+ if !hasSuiteStatus {
+ return records
+ }
+
+ var filtered []CoverageTestRecord
+ for _, r := range records {
+ if r.SuiteStatus != "draft" {
+ filtered = append(filtered, r)
+ }
+ }
+ return filtered
+}
+
+// --- Helpers ---
+
+func dedup(sorted []int) []int {
+ if len(sorted) == 0 {
+ return sorted
+ }
+ result := []int{sorted[0]}
+ for i := 1; i < len(sorted); i++ {
+ if sorted[i] != sorted[i-1] {
+ result = append(result, sorted[i])
+ }
+ }
+ return result
+}
+
+// normalizeCoveragePaths converts absolute file paths to repo-relative paths.
+//
+// For non-Docker: uses git root as the base (handles monorepos, cd into subdirs).
+// For Docker: coverage.strip_path_prefix strips the container mount point first
+// (e.g., "/app"), then git root normalization converts the rest to repo-relative.
+func normalizeCoveragePaths(snapshot CoverageSnapshot, stripPrefix string) CoverageSnapshot {
+ if len(snapshot) == 0 {
+ return snapshot
+ }
+
+ // Step 1: Strip container path prefix if configured (Docker Compose)
+ if stripPrefix != "" {
+ stripPrefix = strings.TrimRight(stripPrefix, "/")
+ stripped := make(CoverageSnapshot, len(snapshot))
+ for absPath, fileData := range snapshot {
+ newPath := absPath
+ if strings.HasPrefix(absPath, stripPrefix+"/") {
+ newPath = absPath[len(stripPrefix)+1:]
+ } else if absPath == stripPrefix {
+ newPath = "."
+ }
+ stripped[newPath] = fileData
+ }
+ snapshot = stripped
+ }
+
+ // Step 2: Normalize to git-root-relative paths
+ base := getPathNormalizationBase()
+ if base == "" {
+ return snapshot
+ }
+
+ normalized := make(CoverageSnapshot, len(snapshot))
+ for absPath, fileData := range snapshot {
+ relPath, err := filepath.Rel(base, absPath)
+ if err != nil || strings.HasPrefix(relPath, "..") {
+ // Already relative (from strip_prefix) or outside git root — keep as-is
+ relPath = absPath
+ }
+ normalized[relPath] = fileData
+ }
+ return normalized
+}
+
+// getPathNormalizationBase returns the git root, falling back to cwd.
+func getPathNormalizationBase() string {
+ if root, err := utils.GetGitRootDir(); err == nil {
+ return root
+ }
+ if cwd, err := os.Getwd(); err == nil {
+ return cwd
+ }
+ return ""
+}
diff --git a/internal/runner/coverage_test.go b/internal/runner/coverage_test.go
new file mode 100644
index 0000000..33f56a7
--- /dev/null
+++ b/internal/runner/coverage_test.go
@@ -0,0 +1,552 @@
+package runner
+
+import (
+ "encoding/json"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDedup(t *testing.T) {
+ tests := []struct {
+ name string
+ input []int
+ expected []int
+ }{
+ {name: "empty", input: []int{}, expected: []int{}},
+ {name: "single element", input: []int{1}, expected: []int{1}},
+ {name: "no duplicates", input: []int{1, 2, 3}, expected: []int{1, 2, 3}},
+ {name: "all duplicates", input: []int{5, 5, 5}, expected: []int{5}},
+ {name: "some duplicates", input: []int{1, 1, 2, 3, 3, 4}, expected: []int{1, 2, 3, 4}},
+ {name: "duplicates at end", input: []int{1, 2, 3, 3}, expected: []int{1, 2, 3}},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := dedup(tt.input)
+ assert.Equal(t, tt.expected, result)
+ })
+ }
+}
+
+// Helper to create a simple FileCoverageData with just line counts
+func makeFileData(lines map[string]int) FileCoverageData {
+ return FileCoverageData{Lines: lines}
+}
+
+// Helper to create FileCoverageData with branches
+func makeFileDataWithBranches(lines map[string]int, totalB, covB int, branches map[string]BranchInfo) FileCoverageData {
+ return FileCoverageData{
+ Lines: lines,
+ TotalBranches: totalB,
+ CoveredBranches: covB,
+ Branches: branches,
+ }
+}
+
+func TestSnapshotToCoverageDetail(t *testing.T) {
+ t.Run("empty input", func(t *testing.T) {
+ result := SnapshotToCoverageDetail(nil)
+ assert.Empty(t, result)
+ })
+
+ t.Run("single file with covered lines", func(t *testing.T) {
+ input := CoverageSnapshot{
+ "/app/main.go": makeFileData(map[string]int{"1": 1, "2": 3, "5": 0, "10": 1}),
+ }
+ result := SnapshotToCoverageDetail(input)
+ require.Contains(t, result, "/app/main.go")
+ fd := result["/app/main.go"]
+ assert.Equal(t, []int{1, 2, 10}, fd.CoveredLines)
+ assert.Equal(t, 3, fd.CoveredCount)
+ assert.Equal(t, 4, fd.CoverableLines)
+ })
+
+ t.Run("file with only zero counts is excluded", func(t *testing.T) {
+ input := CoverageSnapshot{
+ "/app/unused.go": makeFileData(map[string]int{"1": 0, "2": 0}),
+ }
+ result := SnapshotToCoverageDetail(input)
+ assert.Empty(t, result)
+ })
+
+ t.Run("includes branch data", func(t *testing.T) {
+ input := CoverageSnapshot{
+ "/app/main.go": makeFileDataWithBranches(
+ map[string]int{"1": 1, "5": 1},
+ 4, 2,
+ map[string]BranchInfo{"5": {Total: 2, Covered: 1}},
+ ),
+ }
+ result := SnapshotToCoverageDetail(input)
+ fd := result["/app/main.go"]
+ assert.Equal(t, 4, fd.TotalBranches)
+ assert.Equal(t, 2, fd.CoveredBranches)
+ assert.Equal(t, 2, fd.Branches["5"].Total)
+ assert.Equal(t, 1, fd.Branches["5"].Covered)
+ })
+}
+
+func TestMergeWithBaseline(t *testing.T) {
+ t.Run("nil baseline nil records", func(t *testing.T) {
+ result := mergeWithBaseline(nil, nil)
+ assert.Empty(t, result)
+ })
+
+ t.Run("nil baseline with records", func(t *testing.T) {
+ records := []CoverageTestRecord{
+ {
+ TestID: "test-1",
+ Coverage: CoverageSnapshot{"/app/main.go": makeFileData(map[string]int{"1": 1, "2": 3})},
+ },
+ }
+ result := mergeWithBaseline(nil, records)
+ require.Contains(t, result, "/app/main.go")
+ assert.Equal(t, 1, result["/app/main.go"].Lines["1"])
+ assert.Equal(t, 3, result["/app/main.go"].Lines["2"])
+ })
+
+ t.Run("baseline with no records", func(t *testing.T) {
+ baseline := CoverageSnapshot{
+ "/app/main.go": makeFileData(map[string]int{"1": 0, "2": 0, "3": 0}),
+ }
+ result := mergeWithBaseline(baseline, nil)
+ require.Contains(t, result, "/app/main.go")
+ assert.Equal(t, 0, result["/app/main.go"].Lines["1"])
+ assert.Equal(t, 0, result["/app/main.go"].Lines["3"])
+ })
+
+ t.Run("baseline merged with records adds counts", func(t *testing.T) {
+ baseline := CoverageSnapshot{
+ "/app/main.go": makeFileData(map[string]int{"1": 0, "2": 0, "3": 0, "4": 0}),
+ }
+ records := []CoverageTestRecord{
+ {TestID: "test-1", Coverage: CoverageSnapshot{"/app/main.go": makeFileData(map[string]int{"1": 1, "3": 2})}},
+ {TestID: "test-2", Coverage: CoverageSnapshot{"/app/main.go": makeFileData(map[string]int{"1": 1, "4": 1})}},
+ }
+ result := mergeWithBaseline(baseline, records)
+ require.Contains(t, result, "/app/main.go")
+ assert.Equal(t, 2, result["/app/main.go"].Lines["1"]) // 0+1+1
+ assert.Equal(t, 0, result["/app/main.go"].Lines["2"]) // baseline 0, no test
+ assert.Equal(t, 2, result["/app/main.go"].Lines["3"]) // 0+2
+ assert.Equal(t, 1, result["/app/main.go"].Lines["4"]) // 0+1
+ })
+
+ t.Run("records can add new files not in baseline", func(t *testing.T) {
+ baseline := CoverageSnapshot{
+ "/app/main.go": makeFileData(map[string]int{"1": 0}),
+ }
+ records := []CoverageTestRecord{
+ {TestID: "test-1", Coverage: CoverageSnapshot{"/app/new.go": makeFileData(map[string]int{"10": 5})}},
+ }
+ result := mergeWithBaseline(baseline, records)
+ assert.Len(t, result, 2)
+ assert.Equal(t, 5, result["/app/new.go"].Lines["10"])
+ })
+
+ t.Run("baseline is not mutated", func(t *testing.T) {
+ baseline := CoverageSnapshot{
+ "/app/main.go": makeFileData(map[string]int{"1": 0}),
+ }
+ records := []CoverageTestRecord{
+ {TestID: "test-1", Coverage: CoverageSnapshot{"/app/main.go": makeFileData(map[string]int{"1": 5})}},
+ }
+ _ = mergeWithBaseline(baseline, records)
+ assert.Equal(t, 0, baseline["/app/main.go"].Lines["1"])
+ })
+
+ t.Run("merges branch data", func(t *testing.T) {
+ baseline := CoverageSnapshot{
+ "/app/main.go": makeFileDataWithBranches(
+ map[string]int{"1": 0},
+ 4, 0,
+ map[string]BranchInfo{"5": {Total: 2, Covered: 0}},
+ ),
+ }
+ records := []CoverageTestRecord{
+ {TestID: "test-1", Coverage: CoverageSnapshot{
+ "/app/main.go": makeFileDataWithBranches(
+ map[string]int{"1": 1},
+ 2, 1,
+ map[string]BranchInfo{"5": {Total: 2, Covered: 1}},
+ ),
+ }},
+ }
+ result := mergeWithBaseline(baseline, records)
+ assert.Equal(t, 1, result["/app/main.go"].Branches["5"].Covered)
+ assert.Equal(t, 2, result["/app/main.go"].Branches["5"].Total)
+ })
+
+ t.Run("branch union semantics: two tests cover different branches", func(t *testing.T) {
+ baseline := CoverageSnapshot{
+ "/app/main.go": makeFileDataWithBranches(
+ map[string]int{"1": 0},
+ 2, 0,
+ map[string]BranchInfo{"5": {Total: 2, Covered: 0}},
+ ),
+ }
+ records := []CoverageTestRecord{
+ {TestID: "test-1", Coverage: CoverageSnapshot{
+ "/app/main.go": makeFileDataWithBranches(
+ map[string]int{"1": 1},
+ 2, 1,
+ map[string]BranchInfo{"5": {Total: 2, Covered: 1}}, // test 1 covers 1 branch
+ ),
+ }},
+ {TestID: "test-2", Coverage: CoverageSnapshot{
+ "/app/main.go": makeFileDataWithBranches(
+ map[string]int{"1": 1},
+ 2, 1,
+ map[string]BranchInfo{"5": {Total: 2, Covered: 1}}, // test 2 covers 1 branch
+ ),
+ }},
+ }
+ result := mergeWithBaseline(baseline, records)
+ // Union: 1 + 1 = 2, clamped to total 2
+ assert.Equal(t, 2, result["/app/main.go"].Branches["5"].Covered)
+ assert.Equal(t, 2, result["/app/main.go"].Branches["5"].Total)
+ })
+
+ t.Run("baseline branches not mutated", func(t *testing.T) {
+ baseline := CoverageSnapshot{
+ "/app/main.go": makeFileDataWithBranches(
+ map[string]int{"1": 0},
+ 2, 0,
+ map[string]BranchInfo{"5": {Total: 2, Covered: 0}},
+ ),
+ }
+ records := []CoverageTestRecord{
+ {TestID: "test-1", Coverage: CoverageSnapshot{
+ "/app/main.go": makeFileDataWithBranches(
+ map[string]int{"1": 1},
+ 2, 1,
+ map[string]BranchInfo{"5": {Total: 2, Covered: 1}},
+ ),
+ }},
+ }
+ _ = mergeWithBaseline(baseline, records)
+ // Original baseline branches should be untouched
+ assert.Equal(t, 0, baseline["/app/main.go"].Branches["5"].Covered)
+ })
+}
+
+func TestComputeCoverageSummary(t *testing.T) {
+ t.Run("empty aggregate", func(t *testing.T) {
+ summary := ComputeCoverageSummary(nil, nil, nil)
+ assert.Equal(t, 0, summary.Aggregate.TotalCoverableLines)
+ assert.Equal(t, 0.0, summary.Aggregate.CoveragePct)
+ })
+
+ t.Run("computes aggregate percentages", func(t *testing.T) {
+ aggregate := CoverageSnapshot{
+ "main.go": makeFileData(map[string]int{"1": 1, "2": 1, "3": 0, "4": 0}),
+ }
+ summary := ComputeCoverageSummary(aggregate, nil, nil)
+ assert.Equal(t, 4, summary.Aggregate.TotalCoverableLines)
+ assert.Equal(t, 2, summary.Aggregate.TotalCoveredLines)
+ assert.Equal(t, 50.0, summary.Aggregate.CoveragePct)
+ assert.Equal(t, 1, summary.Aggregate.TotalFiles)
+ assert.Equal(t, 1, summary.Aggregate.CoveredFiles)
+ })
+
+ t.Run("computes per-file summaries", func(t *testing.T) {
+ aggregate := CoverageSnapshot{
+ "a.go": makeFileData(map[string]int{"1": 1, "2": 0}),
+ "b.go": makeFileData(map[string]int{"1": 1, "2": 1}),
+ }
+ summary := ComputeCoverageSummary(aggregate, nil, nil)
+ assert.Equal(t, 50.0, summary.PerFile["a.go"].CoveragePct)
+ assert.Equal(t, 100.0, summary.PerFile["b.go"].CoveragePct)
+ })
+
+ t.Run("includes branch coverage", func(t *testing.T) {
+ aggregate := CoverageSnapshot{
+ "main.go": makeFileDataWithBranches(
+ map[string]int{"1": 1},
+ 4, 2,
+ map[string]BranchInfo{"5": {Total: 2, Covered: 1}},
+ ),
+ }
+ summary := ComputeCoverageSummary(aggregate, nil, nil)
+ assert.Equal(t, 4, summary.Aggregate.TotalBranches)
+ assert.Equal(t, 2, summary.Aggregate.CoveredBranches)
+ assert.Equal(t, 50.0, summary.Aggregate.BranchCoveragePct)
+ })
+
+ t.Run("includes per-test summaries", func(t *testing.T) {
+ aggregate := CoverageSnapshot{
+ "main.go": makeFileData(map[string]int{"1": 1}),
+ }
+ perTest := map[string]map[string]CoverageFileDiff{
+ "test-1": {"main.go": {CoveredCount: 5, CoverableLines: 10}},
+ "test-2": {"main.go": {CoveredCount: 3, CoverableLines: 10}},
+ }
+ records := []CoverageTestRecord{
+ {TestID: "test-1", TestName: "GET /api"},
+ {TestID: "test-2", TestName: "POST /api"},
+ }
+ summary := ComputeCoverageSummary(aggregate, perTest, records)
+ require.Len(t, summary.PerTest, 2)
+ assert.Equal(t, 5, summary.PerTest[0].CoveredLines)
+ assert.Equal(t, "GET /api", summary.PerTest[0].TestName)
+ assert.Equal(t, 3, summary.PerTest[1].CoveredLines)
+ })
+}
+
+func TestNormalizeCoveragePaths(t *testing.T) {
+ t.Run("nil input returns empty", func(t *testing.T) {
+ result := normalizeCoveragePaths(nil, "")
+ assert.Len(t, result, 0)
+ })
+
+ t.Run("empty input returns empty", func(t *testing.T) {
+ result := normalizeCoveragePaths(CoverageSnapshot{}, "")
+ assert.Empty(t, result)
+ })
+
+ t.Run("strip_path_prefix strips container mount point", func(t *testing.T) {
+ snapshot := CoverageSnapshot{
+ "/app/app/api/views.py": FileCoverageData{Lines: map[string]int{"1": 1}},
+ "/app/app/settings.py": FileCoverageData{Lines: map[string]int{"1": 1}},
+ "/app/tusk_drift_init.py": FileCoverageData{Lines: map[string]int{"1": 1}},
+ }
+ result := normalizeCoveragePaths(snapshot, "/app")
+ assert.Contains(t, result, "app/api/views.py")
+ assert.Contains(t, result, "app/settings.py")
+ assert.Contains(t, result, "tusk_drift_init.py")
+ })
+
+ t.Run("strip_path_prefix with trailing slash", func(t *testing.T) {
+ snapshot := CoverageSnapshot{
+ "/app/server.py": FileCoverageData{Lines: map[string]int{"1": 1}},
+ }
+ result := normalizeCoveragePaths(snapshot, "/app/")
+ assert.Contains(t, result, "server.py")
+ })
+
+ t.Run("strip_path_prefix with cd backend", func(t *testing.T) {
+ snapshot := CoverageSnapshot{
+ "/app/backend/src/server.py": FileCoverageData{Lines: map[string]int{"1": 1}},
+ }
+ result := normalizeCoveragePaths(snapshot, "/app")
+ assert.Contains(t, result, "backend/src/server.py")
+ })
+
+ // Note: full git root normalization depends on git root which is environment-specific.
+ // We test the function handles edge cases; full integration is tested E2E.
+}
+
+func TestMatchGlob(t *testing.T) {
+ tests := []struct {
+ path string
+ pattern string
+ want bool
+ }{
+ // ** patterns
+ {"backend/src/db/migrations/1700-Init.ts", "**/migrations/**", true},
+ {"backend/src/db/migrations/foo/bar.ts", "**/migrations/**", true},
+ {"backend/src/services/ResourceService.ts", "**/migrations/**", false},
+
+ // Leading **
+ {"backend/src/utils/test.test.ts", "**/*.test.ts", true},
+ {"foo.test.ts", "**/*.test.ts", true},
+ {"backend/src/utils/test.ts", "**/*.test.ts", false},
+
+ // Trailing **
+ {"backend/src/db/migrations/1700-Init.ts", "backend/src/db/**", true},
+ {"backend/src/db/config.ts", "backend/src/db/**", true},
+ {"backend/src/services/foo.ts", "backend/src/db/**", false},
+
+ // Specific path with **
+ {"backend/src/db/migrations/1700-Init.ts", "backend/src/db/migrations/**", true},
+ {"backend/src/db/config.ts", "backend/src/db/migrations/**", false},
+
+ // No ** — standard glob
+ {"server.js", "server.js", true},
+ {"server.ts", "server.js", false},
+ {"server.js", "*.js", true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.path+"_"+tt.pattern, func(t *testing.T) {
+ got := matchGlob(tt.path, tt.pattern)
+ assert.Equal(t, tt.want, got, "matchGlob(%q, %q)", tt.path, tt.pattern)
+ })
+ }
+}
+
+func TestFilterCoverageByPatterns(t *testing.T) {
+ snapshot := CoverageSnapshot{
+ "backend/src/db/migrations/1700-Init.ts": FileCoverageData{Lines: map[string]int{"1": 1}},
+ "backend/src/db/migrations/1701-Add.ts": FileCoverageData{Lines: map[string]int{"1": 1}},
+ "backend/src/services/ResourceService.ts": FileCoverageData{Lines: map[string]int{"1": 1}},
+ "backend/src/scripts/runMigration.ts": FileCoverageData{Lines: map[string]int{"1": 1}},
+ "backend/src/utils/test.test.ts": FileCoverageData{Lines: map[string]int{"1": 1}},
+ "shared/utils/helpers.ts": FileCoverageData{Lines: map[string]int{"1": 1}},
+ }
+
+ t.Run("exclude only", func(t *testing.T) {
+ result := filterCoverageByPatterns(snapshot, nil, []string{
+ "**/migrations/**",
+ "**/scripts/**",
+ })
+ assert.Len(t, result, 3)
+ assert.Contains(t, result, "backend/src/services/ResourceService.ts")
+ assert.Contains(t, result, "backend/src/utils/test.test.ts")
+ assert.Contains(t, result, "shared/utils/helpers.ts")
+ })
+
+ t.Run("include only", func(t *testing.T) {
+ result := filterCoverageByPatterns(snapshot, []string{
+ "backend/src/**",
+ }, nil)
+ assert.Len(t, result, 5)
+ assert.Contains(t, result, "backend/src/services/ResourceService.ts")
+ assert.NotContains(t, result, "shared/utils/helpers.ts")
+ })
+
+ t.Run("include and exclude", func(t *testing.T) {
+ result := filterCoverageByPatterns(snapshot, []string{
+ "backend/src/**",
+ }, []string{
+ "**/migrations/**",
+ })
+ assert.Len(t, result, 3)
+ assert.Contains(t, result, "backend/src/services/ResourceService.ts")
+ assert.Contains(t, result, "backend/src/scripts/runMigration.ts")
+ assert.Contains(t, result, "backend/src/utils/test.test.ts")
+ assert.NotContains(t, result, "shared/utils/helpers.ts")
+ assert.NotContains(t, result, "backend/src/db/migrations/1700-Init.ts")
+ })
+
+ t.Run("no patterns returns all", func(t *testing.T) {
+ result := filterCoverageByPatterns(snapshot, nil, nil)
+ assert.Len(t, result, 6)
+ })
+}
+
+func TestWriteCoverageLCOV(t *testing.T) {
+ t.Run("writes valid LCOV format", func(t *testing.T) {
+ aggregate := CoverageSnapshot{
+ "src/server.js": FileCoverageData{
+ Lines: map[string]int{
+ "1": 1, "2": 3, "5": 0, "10": 1,
+ },
+ Branches: map[string]BranchInfo{
+ "5": {Total: 2, Covered: 1},
+ },
+ TotalBranches: 2,
+ CoveredBranches: 1,
+ },
+ }
+ path := filepath.Join(t.TempDir(), "coverage.lcov")
+ err := WriteCoverageLCOV(path, aggregate)
+ require.NoError(t, err)
+
+ data, err := os.ReadFile(path) //nolint:gosec // test file, path from t.TempDir()
+ require.NoError(t, err)
+ content := string(data)
+
+ assert.Contains(t, content, "SF:src/server.js")
+ assert.Contains(t, content, "DA:1,1")
+ assert.Contains(t, content, "DA:5,0")
+ assert.Contains(t, content, "LF:4")
+ assert.Contains(t, content, "LH:3")
+ assert.Contains(t, content, "BRF:2")
+ assert.Contains(t, content, "BRH:1")
+ assert.Contains(t, content, "end_of_record")
+ })
+
+ t.Run("empty snapshot writes empty file", func(t *testing.T) {
+ path := filepath.Join(t.TempDir(), "coverage.lcov")
+ err := WriteCoverageLCOV(path, CoverageSnapshot{})
+ require.NoError(t, err)
+
+ data, err := os.ReadFile(path) //nolint:gosec // test file, path from t.TempDir()
+ require.NoError(t, err)
+ assert.Empty(t, strings.TrimSpace(string(data)))
+ })
+
+ t.Run("multiple files sorted deterministically", func(t *testing.T) {
+ aggregate := CoverageSnapshot{
+ "z/last.js": FileCoverageData{Lines: map[string]int{"1": 1}},
+ "a/first.js": FileCoverageData{Lines: map[string]int{"1": 1}},
+ }
+ path := filepath.Join(t.TempDir(), "coverage.lcov")
+ err := WriteCoverageLCOV(path, aggregate)
+ require.NoError(t, err)
+
+ data, err := os.ReadFile(path) //nolint:gosec // test file, path from t.TempDir()
+ require.NoError(t, err)
+ content := string(data)
+
+ firstIdx := strings.Index(content, "SF:a/first.js")
+ lastIdx := strings.Index(content, "SF:z/last.js")
+ assert.True(t, firstIdx < lastIdx, "files should be sorted alphabetically")
+ })
+}
+
+func TestWriteCoverageJSON(t *testing.T) {
+ t.Run("writes valid JSON with expected structure", func(t *testing.T) {
+ aggregate := CoverageSnapshot{
+ "src/server.js": FileCoverageData{
+ Lines: map[string]int{
+ "1": 1, "2": 3, "5": 0,
+ },
+ TotalBranches: 4,
+ CoveredBranches: 2,
+ },
+ }
+ perTest := map[string]map[string]CoverageFileDiff{
+ "test-1": {
+ "src/server.js": {CoveredLines: []int{1, 2}, CoveredCount: 2},
+ },
+ }
+ records := []CoverageTestRecord{
+ {TestID: "test-1", TestName: "GET /api"},
+ }
+
+ path := filepath.Join(t.TempDir(), "coverage.json")
+ err := WriteCoverageJSON(path, aggregate, perTest, records)
+ require.NoError(t, err)
+
+ data, err := os.ReadFile(path) //nolint:gosec // test file, path from t.TempDir()
+ require.NoError(t, err)
+
+ var result map[string]interface{}
+ err = json.Unmarshal(data, &result)
+ require.NoError(t, err)
+
+ // Top-level keys: aggregate (raw snapshot), per_test, summary (computed)
+ assert.Contains(t, result, "aggregate")
+ assert.Contains(t, result, "per_test")
+ assert.Contains(t, result, "summary")
+
+ summary := result["summary"].(map[string]interface{})
+ assert.Contains(t, summary, "aggregate")
+ assert.Contains(t, summary, "per_file")
+ assert.Contains(t, summary, "timestamp")
+
+ agg := summary["aggregate"].(map[string]interface{})
+ assert.Equal(t, float64(3), agg["total_coverable_lines"])
+ assert.Equal(t, float64(2), agg["total_covered_lines"])
+ })
+
+ t.Run("empty snapshot writes valid JSON", func(t *testing.T) {
+ path := filepath.Join(t.TempDir(), "coverage.json")
+ err := WriteCoverageJSON(path, CoverageSnapshot{}, nil, nil)
+ require.NoError(t, err)
+
+ data, err := os.ReadFile(path) //nolint:gosec // test file, path from t.TempDir()
+ require.NoError(t, err)
+
+ var result map[string]interface{}
+ err = json.Unmarshal(data, &result)
+ require.NoError(t, err)
+ assert.Contains(t, result, "aggregate")
+ })
+}
diff --git a/internal/runner/environment_replay.go b/internal/runner/environment_replay.go
index 15e121c..ebf814f 100644
--- a/internal/runner/environment_replay.go
+++ b/internal/runner/environment_replay.go
@@ -62,6 +62,17 @@ func ReplayTestsByEnvironment(
log.ServiceLog(fmt.Sprintf("✓ Environment ready (%.1fs)", envStartDuration))
log.Stderrln(fmt.Sprintf("✓ Environment ready (%.1fs)", envStartDuration))
+ // Coverage: take baseline snapshot to capture all coverable lines and reset counters
+ if executor.IsCoverageEnabled() {
+ baseline, err := executor.TakeCoverageBaseline()
+ if err != nil {
+ log.Warn("Failed to take baseline coverage snapshot", "error", err)
+ } else {
+ executor.SetCoverageBaseline(baseline)
+ log.Debug("Coverage baseline taken (counters reset, all coverable lines captured)")
+ }
+ }
+
// 3. Run tests for this environment
results, err := executor.RunTests(group.Tests)
if err != nil {
diff --git a/internal/runner/executor.go b/internal/runner/executor.go
index fa67598..2d2851d 100644
--- a/internal/runner/executor.go
+++ b/internal/runner/executor.go
@@ -92,6 +92,21 @@ type Executor struct {
replayComposeOverride string
replayEnvVars map[string]string
replaySandboxConfigPath string
+
+ // Coverage
+ coverageEnabled bool
+ coverageShowOutput bool
+ coverageOutputPath string
+ coverageTempDir string
+ coverageIncludePatterns []string
+ coverageExcludePatterns []string
+ coverageStripPrefix string
+ coveragePerTest map[string]map[string]CoverageFileDiff
+ coveragePerTestMu sync.Mutex
+ coverageBaseline CoverageSnapshot
+ coverageBaselineMu sync.Mutex
+ coverageRecords []CoverageTestRecord
+ coverageRecordsMu sync.Mutex
}
func NewExecutor() *Executor {
@@ -473,6 +488,179 @@ func (e *Executor) SetOnTestCompleted(callback func(TestResult, Test)) {
e.OnTestCompleted = callback
}
+func (e *Executor) SetCoverageEnabled(enabled bool) {
+ e.coverageEnabled = enabled
+}
+
+func (e *Executor) IsCoverageEnabled() bool {
+ return e.coverageEnabled
+}
+
+func (e *Executor) SetShowCoverage(show bool) {
+ e.coverageShowOutput = show
+}
+
+func (e *Executor) IsCoverageShowOutput() bool {
+ return e.coverageShowOutput
+}
+
+func (e *Executor) GetCoverageOutputPath() string {
+ return e.coverageOutputPath
+}
+
+// GetCoverageBaselineForUpload returns two snapshots:
+// - merged: baseline + all per-test records (complete denominator for coverable lines)
+// - originalBaseline: raw baseline only (for startup-covered lines attribution)
+//
+// The merged snapshot ensures the denominator includes lines discovered during test
+// execution that weren't in the initial baseline snapshot. The original baseline is
+// kept separate so startup coverage is not conflated with test-driven coverage.
+func (e *Executor) GetCoverageBaselineForUpload() (merged CoverageSnapshot, originalBaseline CoverageSnapshot) {
+ e.coverageBaselineMu.Lock()
+ baseline := e.coverageBaseline
+ e.coverageBaselineMu.Unlock()
+
+ // If no baseline was captured, skip the aggregate upload entirely.
+ // Without a baseline denominator, coverage % would be near 100% (misleading).
+ // Per-test coverage is still uploaded via TraceTestCoverageData independently.
+ if baseline == nil {
+ return nil, nil
+ }
+
+ records := e.GetCoverageRecords()
+ if len(records) == 0 {
+ return nil, nil
+ }
+
+ // Merge baseline with ALL per-test records (not filtered by suite status)
+ // to get the complete set of coverable lines for the denominator
+ aggregate := mergeWithBaseline(baseline, records)
+
+ // Apply include/exclude patterns to both
+ aggregate = filterCoverageByPatterns(aggregate, e.coverageIncludePatterns, e.coverageExcludePatterns)
+ filteredBaseline := filterCoverageByPatterns(baseline, e.coverageIncludePatterns, e.coverageExcludePatterns)
+
+ return aggregate, filteredBaseline
+}
+
+func (e *Executor) SetCoverageOutputPath(path string) {
+ e.coverageOutputPath = path
+}
+
+func (e *Executor) SetCoverageIncludePatterns(patterns []string) {
+ e.coverageIncludePatterns = patterns
+}
+
+func (e *Executor) SetCoverageExcludePatterns(patterns []string) {
+ e.coverageExcludePatterns = patterns
+}
+
+func (e *Executor) SetCoverageStripPrefix(prefix string) {
+ e.coverageStripPrefix = prefix
+}
+
+// SetCoverageBaseline merges new baseline data into the existing baseline.
+// Called per environment group - accumulates across service restarts.
+func (e *Executor) SetCoverageBaseline(baseline CoverageSnapshot) {
+ e.coverageBaselineMu.Lock()
+ defer e.coverageBaselineMu.Unlock()
+ if e.coverageBaseline == nil {
+ e.coverageBaseline = make(CoverageSnapshot)
+ }
+ for filePath, fileData := range baseline {
+ existing, ok := e.coverageBaseline[filePath]
+ if !ok {
+ existing = FileCoverageData{
+ Lines: make(map[string]int),
+ Branches: make(map[string]BranchInfo),
+ }
+ }
+ for line, count := range fileData.Lines {
+ if existingCount, ok := existing.Lines[line]; !ok || existingCount == 0 {
+ existing.Lines[line] = count
+ }
+ }
+ // Merge branch data (keep max per line)
+ for line, branchInfo := range fileData.Branches {
+ if existing.Branches == nil {
+ existing.Branches = make(map[string]BranchInfo)
+ }
+ if eb, ok := existing.Branches[line]; !ok || branchInfo.Total > eb.Total {
+ existing.Branches[line] = branchInfo
+ }
+ }
+ // Recompute file-level totals from merged per-line data
+ totalB, covB := 0, 0
+ for _, b := range existing.Branches {
+ totalB += b.Total
+ covB += b.Covered
+ }
+ existing.TotalBranches = totalB
+ existing.CoveredBranches = covB
+ e.coverageBaseline[filePath] = existing
+ }
+}
+
+// SetTestCoverageDetail stores per-test coverage diff for display in TUI/print.
+func (e *Executor) SetTestCoverageDetail(testID string, detail map[string]CoverageFileDiff) {
+ e.coveragePerTestMu.Lock()
+ defer e.coveragePerTestMu.Unlock()
+ if e.coveragePerTest == nil {
+ e.coveragePerTest = make(map[string]map[string]CoverageFileDiff)
+ }
+ e.coveragePerTest[testID] = detail
+}
+
+// GetTestCoverageDetail returns a copy of per-test coverage diff for a given test.
+func (e *Executor) GetTestCoverageDetail(testID string) map[string]CoverageFileDiff {
+ e.coveragePerTestMu.Lock()
+ defer e.coveragePerTestMu.Unlock()
+ if e.coveragePerTest == nil {
+ return nil
+ }
+ original := e.coveragePerTest[testID]
+ if original == nil {
+ return nil
+ }
+ // Return a copy to avoid concurrent map access from TUI goroutines
+ copied := make(map[string]CoverageFileDiff, len(original))
+ for k, v := range original {
+ copied[k] = v
+ }
+ return copied
+}
+
+// GetCoveragePerTestSnapshot returns a shallow copy of the entire per-test coverage map.
+// The outer map is copied so callers can iterate without holding the mutex.
+func (e *Executor) GetCoveragePerTestSnapshot() map[string]map[string]CoverageFileDiff {
+ e.coveragePerTestMu.Lock()
+ defer e.coveragePerTestMu.Unlock()
+ if e.coveragePerTest == nil {
+ return nil
+ }
+ copied := make(map[string]map[string]CoverageFileDiff, len(e.coveragePerTest))
+ for k, v := range e.coveragePerTest {
+ copied[k] = v
+ }
+ return copied
+}
+
+// AddCoverageRecord stores a per-test coverage record.
+func (e *Executor) AddCoverageRecord(record CoverageTestRecord) {
+ e.coverageRecordsMu.Lock()
+ defer e.coverageRecordsMu.Unlock()
+ e.coverageRecords = append(e.coverageRecords, record)
+}
+
+// GetCoverageRecords returns a copy of all coverage records.
+func (e *Executor) GetCoverageRecords() []CoverageTestRecord {
+ e.coverageRecordsMu.Lock()
+ defer e.coverageRecordsMu.Unlock()
+ records := make([]CoverageTestRecord, len(e.coverageRecords))
+ copy(records, e.coverageRecords)
+ return records
+}
+
func (e *Executor) SetSuiteSpans(spans []*core.Span) {
e.suiteSpans = spans
if e.server != nil && len(spans) > 0 {
diff --git a/internal/runner/results_upload.go b/internal/runner/results_upload.go
index 7d512e8..a23b48c 100644
--- a/internal/runner/results_upload.go
+++ b/internal/runner/results_upload.go
@@ -4,7 +4,10 @@ import (
"context"
"encoding/json"
"fmt"
+ "math"
"os"
+ "sort"
+ "strconv"
"strings"
"time"
@@ -105,6 +108,9 @@ func ReportDriftRunSuccess(
driftRunID string,
authOptions api.AuthOptions,
results []TestResult,
+ coverageBaseline CoverageSnapshot,
+ coverageOriginalBaseline CoverageSnapshot,
+ commitSha string,
statusMessageOverride ...string,
) error {
// Note: We always report SUCCESS status here unless there was an error executing tests.
@@ -120,9 +126,84 @@ func ReportDriftRunSuccess(
CiStatus: finalStatus,
CiStatusMessage: &statusMessage,
}
+
+ // Attach coverage baseline if available
+ if coverageBaseline != nil {
+ statusReq.CoverageBaseline = buildCoverageBaselineProto(coverageBaseline, coverageOriginalBaseline, commitSha)
+ }
+
return client.UpdateDriftRunCIStatus(ctx, statusReq, authOptions)
}
+// buildCoverageBaselineProto builds the proto from two snapshots:
+// - merged: all coverable lines (baseline + per-test, for denominator)
+// - originalBaseline: raw baseline only (for startup-covered lines)
+//
+// This separation ensures StartupCoveredLinesByFile only contains lines covered
+// during module loading, not lines covered by test execution.
+func buildCoverageBaselineProto(merged CoverageSnapshot, originalBaseline CoverageSnapshot, commitSha string) *backend.CoverageBaseline {
+ baseline := &backend.CoverageBaseline{
+ CommitSha: commitSha,
+ CoverableLinesByFile: make(map[string]*backend.FileLineRanges),
+ StartupCoveredLinesByFile: make(map[string]*backend.FileLineRanges),
+ }
+
+ // Coverable lines from the merged snapshot (complete denominator)
+ totalCoverable := int32(0) //nolint:gosec // line counts are safely within int32 range
+ for filePath, fileData := range merged {
+ totalCoverable += int32(len(fileData.Lines)) //nolint:gosec // line counts are safely within int32 range
+
+ var allLines []int32
+ for lineStr := range fileData.Lines {
+ if n, err := strconv.Atoi(lineStr); err == nil && n >= 0 && n <= math.MaxInt32 {
+ allLines = append(allLines, int32(n)) //nolint:gosec // bounds checked above
+ }
+ }
+ sort.Slice(allLines, func(i, j int) bool { return allLines[i] < allLines[j] })
+ baseline.CoverableLinesByFile[filePath] = toLineRangesProto(allLines)
+ }
+ baseline.TotalCoverableLines = totalCoverable
+
+ // Startup-covered lines from the original baseline only
+ for filePath, fileData := range originalBaseline {
+ var coveredLines []int32
+ for lineStr, count := range fileData.Lines {
+ if count > 0 {
+ if n, err := strconv.Atoi(lineStr); err == nil && n >= 0 && n <= math.MaxInt32 {
+ coveredLines = append(coveredLines, int32(n)) //nolint:gosec // bounds checked above
+ }
+ }
+ }
+ if len(coveredLines) > 0 {
+ sort.Slice(coveredLines, func(i, j int) bool { return coveredLines[i] < coveredLines[j] })
+ baseline.StartupCoveredLinesByFile[filePath] = toLineRangesProto(coveredLines)
+ }
+ }
+
+ return baseline
+}
+
+// toLineRangesProto compresses sorted int32s into LineRange protos.
+// [1,2,3,5,6,10] -> [{1,3},{5,6},{10,10}]
+func toLineRangesProto(sorted []int32) *backend.FileLineRanges {
+ if len(sorted) == 0 {
+ return &backend.FileLineRanges{}
+ }
+ var ranges []*backend.LineRange
+ start, end := sorted[0], sorted[0]
+ for i := 1; i < len(sorted); i++ {
+ if sorted[i] == end+1 {
+ end = sorted[i]
+ } else {
+ ranges = append(ranges, &backend.LineRange{Start: start, End: end})
+ start = sorted[i]
+ end = sorted[i]
+ }
+ }
+ ranges = append(ranges, &backend.LineRange{Start: start, End: end})
+ return &backend.FileLineRanges{Ranges: ranges}
+}
+
func BuildTraceTestResultsProto(e *Executor, results []TestResult, tests []Test) []*backend.TraceTestResult {
out := make([]*backend.TraceTestResult, 0, len(results))
@@ -246,7 +327,33 @@ func BuildTraceTestResultsProto(e *Executor, results []TestResult, tests []Test)
}
}
+ // Per-test coverage data (if coverage is enabled)
+ if e != nil && e.IsCoverageEnabled() {
+ detail := e.GetTestCoverageDetail(r.TestID)
+ if len(detail) > 0 {
+ covData := &backend.TraceTestCoverageData{
+ CoveredLinesByFile: make(map[string]*backend.FileLineRanges),
+ }
+ totalCovered := int32(0)
+ for filePath, fd := range detail {
+ totalCovered += int32(fd.CoveredCount) //nolint:gosec // coverage counts are safely within int32 range
+ sorted := toInt32Slice(fd.CoveredLines)
+ covData.CoveredLinesByFile[filePath] = toLineRangesProto(sorted)
+ }
+ covData.TotalCoveredLines = totalCovered
+ tr.CoverageData = covData
+ }
+ }
+
out = append(out, tr)
}
return out
}
+
+func toInt32Slice(ints []int) []int32 {
+ result := make([]int32, len(ints))
+ for i, v := range ints {
+ result[i] = int32(v) //nolint:gosec // line numbers are safely within int32 range
+ }
+ return result
+}
diff --git a/internal/runner/server.go b/internal/runner/server.go
index c9f0b4f..cc5bde5 100644
--- a/internal/runner/server.go
+++ b/internal/runner/server.go
@@ -696,6 +696,9 @@ func (ms *Server) handleConnection(conn net.Conn) {
case core.MessageType_MESSAGE_TYPE_SET_TIME_TRAVEL:
// SDK is responding to our SetTimeTravel request
ms.handleSetTimeTravelResponse(&sdkMsg)
+ case core.MessageType_MESSAGE_TYPE_COVERAGE_SNAPSHOT:
+ log.Debug("Received coverage snapshot response", "requestId", sdkMsg.RequestId)
+ ms.handleCoverageSnapshotResponse(&sdkMsg)
default:
log.Debug("Unknown message type", "type", sdkMsg.Type)
}
@@ -982,6 +985,78 @@ func (ms *Server) waitForSDKResponse(requestID string, timeout time.Duration) (*
}
}
+// SendCoverageSnapshot sends a coverage snapshot request to the SDK and waits for the response.
+// Returns per-file coverage data. If baseline=true, includes all coverable lines (count=0 for uncovered).
+func (ms *Server) SendCoverageSnapshot(baseline bool) (*core.CoverageSnapshotResponse, error) {
+ ms.mu.RLock()
+ conn := ms.sdkConnection
+ ms.mu.RUnlock()
+
+ if conn == nil {
+ return nil, fmt.Errorf("no SDK connection available")
+ }
+
+ requestID := fmt.Sprintf("coverage-%d", time.Now().UnixNano())
+
+ msg := &core.CLIMessage{
+ Type: core.MessageType_MESSAGE_TYPE_COVERAGE_SNAPSHOT,
+ RequestId: requestID,
+ Payload: &core.CLIMessage_CoverageSnapshotRequest{
+ CoverageSnapshotRequest: &core.CoverageSnapshotRequest{
+ Baseline: baseline,
+ },
+ },
+ }
+
+ // Register the pending response channel BEFORE sending so we don't miss
+ // a fast SDK reply that arrives before the channel is registered.
+ respChan := make(chan *core.SDKMessage, 1)
+ ms.pendingMu.Lock()
+ ms.pendingRequests[requestID] = respChan
+ ms.pendingMu.Unlock()
+
+ defer func() {
+ ms.pendingMu.Lock()
+ delete(ms.pendingRequests, requestID)
+ ms.pendingMu.Unlock()
+ }()
+
+ if err := ms.sendProtobufResponse(conn, msg); err != nil {
+ return nil, fmt.Errorf("failed to send coverage snapshot request: %w", err)
+ }
+
+ var response *core.SDKMessage
+ select {
+ case response = <-respChan:
+ case <-time.After(coverageSnapshotTimeout):
+ return nil, fmt.Errorf("failed to receive coverage snapshot response: timeout waiting for SDK response")
+ }
+
+ coverageResp := response.GetCoverageSnapshotResponse()
+ if coverageResp == nil {
+ return nil, fmt.Errorf("unexpected response type for coverage snapshot")
+ }
+
+ if !coverageResp.Success {
+ return nil, fmt.Errorf("SDK coverage snapshot failed: %s", coverageResp.Error)
+ }
+
+ return coverageResp, nil
+}
+
+// handleCoverageSnapshotResponse routes coverage snapshot responses to pending request channels
+func (ms *Server) handleCoverageSnapshotResponse(msg *core.SDKMessage) {
+ ms.pendingMu.Lock()
+ respChan, ok := ms.pendingRequests[msg.RequestId]
+ ms.pendingMu.Unlock()
+
+ if ok {
+ respChan <- msg
+ } else {
+ log.Debug("Received coverage snapshot response with unknown request ID", "requestId", msg.RequestId)
+ }
+}
+
// handleSetTimeTravelResponse routes SetTimeTravel responses to pending request channels
func (ms *Server) handleSetTimeTravelResponse(msg *core.SDKMessage) {
ms.pendingMu.Lock()
diff --git a/internal/runner/service.go b/internal/runner/service.go
index e8bd65d..e65d6f1 100644
--- a/internal/runner/service.go
+++ b/internal/runner/service.go
@@ -43,8 +43,11 @@ func (e *Executor) StartService() error {
log.Debug("Starting service", "command", cfg.Service.Start.Command)
- // Wrap command with fence sandboxing (if supported and enabled)
command := cfg.Service.Start.Command
+
+ // Coverage: nothing to set here, env vars injected below after sandbox wrapping
+
+ // Wrap command with fence sandboxing (if supported and enabled)
replayOverridePath := e.getReplayComposeOverride()
if replayOverridePath != "" && isComposeBasedStartCommand(command) {
commandWithReplayOverride, injected, injectErr := injectComposeOverrideFile(command, replayOverridePath)
@@ -146,6 +149,26 @@ func (e *Executor) StartService() error {
}
env = append(env, "TUSK_DRIFT_MODE=REPLAY")
+
+ // Coverage: inject env vars that SDK coverage servers listen for.
+ // NODE_V8_COVERAGE is required by the Node SDK to enable V8 coverage collection.
+ // Coverage env vars:
+ // TUSK_COVERAGE=true - language-agnostic signal for both Node and Python SDKs
+ // NODE_V8_COVERAGE= - Node-specific: tells V8 to collect coverage data
+ // TS_NODE_EMIT=true - Node-specific: forces ts-node to write compiled JS to disk
+ if e.coverageEnabled {
+ env = append(env, "TUSK_COVERAGE=true")
+ // Node.js: V8 coverage needs a directory to write JSON files
+ v8CoverageDir, err := os.MkdirTemp("", "tusk-v8-coverage-*")
+ if err != nil {
+ return fmt.Errorf("failed to create temp dir for V8 coverage: %w", err)
+ }
+ e.coverageTempDir = v8CoverageDir
+ env = append(env, fmt.Sprintf("NODE_V8_COVERAGE=%s", v8CoverageDir))
+ env = append(env, "TS_NODE_EMIT=true")
+ log.Debug("Coverage enabled", "v8_dir", v8CoverageDir)
+ }
+
e.serviceCmd.Env = env
// Always capture service logs during startup.
@@ -311,6 +334,11 @@ func (e *Executor) StopService() error {
e.fenceManager.Cleanup()
e.fenceManager = nil
}
+ // Clean up V8 coverage temp directory
+ if e.coverageTempDir != "" {
+ _ = os.RemoveAll(e.coverageTempDir)
+ e.coverageTempDir = ""
+ }
log.ServiceLog("Service stopped")
}()
diff --git a/internal/tui/onboard-cloud/helpers.go b/internal/tui/onboard-cloud/helpers.go
index 3b80e77..ffcdab1 100644
--- a/internal/tui/onboard-cloud/helpers.go
+++ b/internal/tui/onboard-cloud/helpers.go
@@ -223,12 +223,7 @@ func saveSelectedClientToCLIConfig(clientID, clientName string) {
}
func getGitRootDir() (string, error) {
- cmd := exec.Command("git", "rev-parse", "--show-toplevel")
- out, err := cmd.Output()
- if err != nil {
- return "", fmt.Errorf("failed to get git root: %w", err)
- }
- return strings.TrimSpace(string(out)), nil
+ return utils.GetGitRootDir()
}
func detectGitHubIndicators() bool {
diff --git a/internal/tui/test_executor.go b/internal/tui/test_executor.go
index 1619ff2..62b3730 100644
--- a/internal/tui/test_executor.go
+++ b/internal/tui/test_executor.go
@@ -6,6 +6,7 @@ import (
"io"
"log/slog"
"os"
+ "path/filepath"
"slices"
"strings"
"time"
@@ -620,6 +621,40 @@ func (m *testExecutorModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
if m.executor.OnTestCompleted != nil {
m.executor.OnTestCompleted(msg.result, test)
}
+
+ // Show per-file coverage breakdown in test log panel
+ if m.executor.IsCoverageEnabled() {
+ if detail := m.executor.GetTestCoverageDetail(test.TraceID); len(detail) > 0 {
+ totalLines := 0
+ for _, fd := range detail {
+ totalLines += fd.CoveredCount
+ }
+ m.addTestLog(test.TraceID, fmt.Sprintf(" 📊 Coverage: %d lines across %d files", totalLines, len(detail)))
+ // Sort file paths for deterministic display
+ filePaths := make([]string, 0, len(detail))
+ for fp := range detail {
+ filePaths = append(filePaths, fp)
+ }
+ slices.Sort(filePaths)
+ for _, filePath := range filePaths {
+ fd := detail[filePath]
+ // Paths are already git-relative from normalizeCoveragePaths.
+ // Only try Rel() on absolute paths (shouldn't happen, but defensive).
+ shortPath := filePath
+ if filepath.IsAbs(filePath) {
+ if cwd, err := os.Getwd(); err == nil {
+ if rel, err := filepath.Rel(cwd, filePath); err == nil {
+ shortPath = rel
+ }
+ }
+ }
+ m.addTestLog(test.TraceID, fmt.Sprintf(" %-40s %d lines", shortPath, fd.CoveredCount))
+ }
+ } else {
+ m.addTestLog(test.TraceID, " 📊 Coverage: 0 new lines")
+ }
+ }
+
if m.opts != nil && m.opts.OnTestCompleted != nil {
res := msg.result
go m.opts.OnTestCompleted(res, test, m.executor)
@@ -715,6 +750,29 @@ func (m *testExecutorModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) {
m.addServiceLog("\n" + strings.Repeat("=", 60))
m.addServiceLog("🏁 All tests completed!")
+ // Show aggregate coverage summary in service logs and write output file
+ if m.executor.IsCoverageEnabled() {
+ records := m.executor.GetCoverageRecords()
+ summaryLines, aggregate := m.executor.FormatCoverageSummaryLines(records)
+ if len(summaryLines) > 0 {
+ m.addServiceLog("")
+ for _, line := range summaryLines {
+ m.addServiceLog(line)
+ }
+ }
+ // Write coverage output file if requested. Suppress console display
+ // since we already showed the summary above via FormatCoverageSummaryLines.
+ // Pass pre-computed aggregate to avoid redundant computation.
+ savedShowOutput := m.executor.IsCoverageShowOutput()
+ m.executor.SetShowCoverage(false)
+ if err := m.executor.ProcessCoverageWithAggregate(records, aggregate); err != nil {
+ m.addServiceLog(fmt.Sprintf("⚠️ Failed to process coverage: %v", err))
+ } else if outputPath := m.executor.GetCoverageOutputPath(); outputPath != "" {
+ m.addServiceLog(fmt.Sprintf("📄 Coverage written to %s", outputPath))
+ }
+ m.executor.SetShowCoverage(savedShowOutput)
+ }
+
// All-tests completed upload (non-blocking)
if m.opts != nil && m.opts.OnAllCompleted != nil {
results := make([]runner.TestResult, len(m.results))
@@ -1043,6 +1101,17 @@ func (m *testExecutorModel) startNextEnvironmentGroup() tea.Cmd {
m.serviceStarted = true
m.addServiceLog("✅ Environment ready")
+ // Coverage: take baseline snapshot to capture all coverable lines and reset counters
+ if m.executor.IsCoverageEnabled() {
+ baseline, err := m.executor.TakeCoverageBaseline()
+ if err != nil {
+ m.addServiceLog("⚠️ Coverage baseline failed: " + err.Error())
+ } else {
+ m.executor.SetCoverageBaseline(baseline)
+ m.addServiceLog("✅ Coverage baseline captured")
+ }
+ }
+
// Build list of global test indices for this environment
envIdx := m.currentGroupIndex - 1 // We already incremented it above
m.currentEnvTestIndices = make([]int, 0, len(group.Tests))
diff --git a/internal/utils/filesystem.go b/internal/utils/filesystem.go
index 30b2f59..2336b59 100644
--- a/internal/utils/filesystem.go
+++ b/internal/utils/filesystem.go
@@ -3,6 +3,7 @@ package utils
import (
"fmt"
"os"
+ "os/exec"
"path/filepath"
"strings"
)
@@ -138,6 +139,17 @@ func EnsureDir(dir string) error {
return os.MkdirAll(dir, 0o750)
}
+// GetGitRootDir returns the root of the current git repository.
+// Returns empty string and an error if not in a git repo.
+func GetGitRootDir() (string, error) {
+ cmd := exec.Command("git", "rev-parse", "--show-toplevel")
+ out, err := cmd.Output()
+ if err != nil {
+ return "", fmt.Errorf("failed to get git root: %w", err)
+ }
+ return strings.TrimSpace(string(out)), nil
+}
+
// FindTraceFile searches for a JSONL trace file containing the given trace ID.
// If filename is provided, it tries that first before searching
func FindTraceFile(traceID string, filename string) (string, error) {