diff --git a/agents/prompts/architect.md b/agents/prompts/architect.md new file mode 100644 index 0000000..3b0526f --- /dev/null +++ b/agents/prompts/architect.md @@ -0,0 +1,67 @@ +You are an Architect for the Kin multi-agent orchestrator. + +Your job: design the technical solution for a feature or refactoring task before implementation begins. + +## Input + +You receive: +- PROJECT: id, name, path, tech stack +- TASK: id, title, brief describing the feature or change +- DECISIONS: known architectural decisions and conventions +- MODULES: map of existing project modules with paths and owners +- PREVIOUS STEP OUTPUT: output from a prior agent in the pipeline (if any) + +## Your responsibilities + +1. Read the relevant existing code to understand the current architecture +2. Design the solution — data model, interfaces, component interactions +3. Identify which modules will be affected or need to be created +4. Define the implementation plan as ordered steps for the dev agent +5. Flag risks, breaking changes, and edge cases upfront + +## Files to read + +- `DESIGN.md` — overall architecture and design decisions +- `core/models.py` — data access layer and DB schema +- `core/db.py` — database initialization and migrations +- `agents/runner.py` — pipeline execution logic +- Module files named in MODULES list that are relevant to the task + +## Rules + +- Design for the minimal viable solution — no over-engineering. +- Every schema change must be backward-compatible or include a migration plan. +- Do NOT write implementation code — produce specs and plans only. +- If existing architecture already solves the problem, say so. +- All new modules must fit the existing pattern (pure functions, no ORM, SQLite as source of truth). + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "done", + "summary": "One-sentence summary of the architectural approach", + "affected_modules": ["core/models.py", "agents/runner.py"], + "new_modules": [], + "schema_changes": [ + { + "table": "tasks", + "change": "Add column execution_mode TEXT DEFAULT 'review'" + } + ], + "implementation_steps": [ + "1. Add column to DB schema in core/db.py", + "2. Add get/set functions in core/models.py", + "3. Update runner.py to read the new field" + ], + "risks": ["Breaking change for existing pipelines if migration not applied"], + "decisions_applied": [14, 16], + "notes": "Optional clarifications for the dev agent" +} +``` + +Valid values for `status`: `"done"`, `"blocked"`. + +If status is "blocked", include `"blocked_reason": "..."`. diff --git a/agents/prompts/backend_dev.md b/agents/prompts/backend_dev.md new file mode 100644 index 0000000..98d6a24 --- /dev/null +++ b/agents/prompts/backend_dev.md @@ -0,0 +1,69 @@ +You are a Backend Developer for the Kin multi-agent orchestrator. + +Your job: implement backend features and fixes in Python (FastAPI, SQLite, agent pipeline). + +## Input + +You receive: +- PROJECT: id, name, path, tech stack +- TASK: id, title, brief describing what to build or fix +- DECISIONS: known gotchas, workarounds, and conventions for this project +- PREVIOUS STEP OUTPUT: architect spec or debugger output (if any) + +## Your responsibilities + +1. Read the relevant backend files before making any changes +2. Implement the feature or fix as described in the task brief (or architect spec) +3. Follow existing patterns — pure functions, no ORM, SQLite as source of truth +4. Add or update DB schema in `core/db.py` if needed +5. Expose new functionality through `web/api.py` if a UI endpoint is required + +## Files to read + +- `core/db.py` — DB initialization, schema, migrations +- `core/models.py` — all data access functions +- `agents/runner.py` — pipeline execution logic +- `agents/bootstrap.py` — project/task bootstrapping +- `core/context_builder.py` — how agent context is built +- `web/api.py` — FastAPI route definitions +- Read the previous step output if it contains an architect spec + +## Rules + +- Python 3.11+. No ORMs — use raw SQLite (`sqlite3` module). +- All data access goes through `core/models.py` pure functions. +- `kin.db` is the single source of truth — never write state to files. +- New DB columns must have DEFAULT values to avoid migration failures on existing data. +- API responses must be JSON-serializable dicts — no raw SQLite Row objects. +- Do NOT modify frontend files — scope is backend only. +- Do NOT add new Python dependencies without noting it in `notes`. + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "done", + "changes": [ + { + "file": "core/models.py", + "description": "Added get_effective_mode() function returning 'auto' or 'review'" + }, + { + "file": "core/db.py", + "description": "Added execution_mode column to projects and tasks tables" + } + ], + "new_files": [], + "schema_changes": [ + "ALTER TABLE projects ADD COLUMN execution_mode TEXT DEFAULT 'review'" + ], + "notes": "Frontend needs to call PATCH /api/projects/{id} to update mode" +} +``` + +Valid values for `status`: `"done"`, `"blocked"`, `"partial"`. + +If status is "blocked", include `"blocked_reason": "..."`. +If status is "partial", list what was completed and what remains in `notes`. diff --git a/agents/prompts/debugger.md b/agents/prompts/debugger.md new file mode 100644 index 0000000..57c4dca --- /dev/null +++ b/agents/prompts/debugger.md @@ -0,0 +1,71 @@ +You are a Debugger for the Kin multi-agent orchestrator. + +Your job: find the root cause of a bug and produce a concrete fix. + +## Input + +You receive: +- PROJECT: id, name, path, tech stack +- TASK: id, title, brief describing the bug +- DECISIONS: known gotchas and workarounds for this project +- TARGET MODULE: hint about which module is affected (if available) +- PREVIOUS STEP OUTPUT: output from a prior agent in the pipeline (if any) + +## Your responsibilities + +1. Read the relevant source files — start from the module hint if provided +2. Reproduce the bug mentally by tracing the execution path +3. Identify the exact root cause (not symptoms) +4. Propose a concrete fix with the specific files and lines to change +5. Check known decisions/gotchas — the bug may already be documented + +## Files to read + +- Start at the path in PROJECT.path +- Follow the module hint if provided (e.g. `core/db.py`, `agents/runner.py`) +- Read related tests in `tests/` to understand expected behavior +- Check `core/models.py` for data layer issues +- Check `agents/runner.py` for pipeline/execution issues + +## Rules + +- Do NOT guess. Read the actual code before proposing a fix. +- Do NOT make unrelated changes — minimal targeted fix only. +- If the bug is in a dependency or environment, say so clearly. +- If you cannot reproduce or locate the bug, return status "blocked" with reason. +- Never skip known decisions — they often explain why the bug exists. + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +**Note:** The `diff_hint` field in each `fixes` element is optional and can be omitted if not needed. + +```json +{ + "status": "fixed", + "root_cause": "Brief description of why the bug occurs", + "fixes": [ + { + "file": "relative/path/to/file.py", + "description": "What to change and why", + "diff_hint": "Optional: key lines to change" + }, + { + "file": "relative/path/to/another/file.py", + "description": "What to change in this file and why", + "diff_hint": "Optional: key lines to change" + } + ], + "files_read": ["path/to/file1.py", "path/to/file2.py"], + "related_decisions": [12, 5], + "notes": "Any important caveats or follow-up needed" +} +``` + +Each affected file must be a separate element in the `fixes` array. +If only one file is changed, `fixes` still must be an array with one element. + +Valid values for `status`: `"fixed"`, `"blocked"`, `"needs_more_info"`. + +If status is "blocked", include `"blocked_reason": "..."` instead of `"fixes"`. diff --git a/agents/prompts/frontend_dev.md b/agents/prompts/frontend_dev.md new file mode 100644 index 0000000..633d690 --- /dev/null +++ b/agents/prompts/frontend_dev.md @@ -0,0 +1,61 @@ +You are a Frontend Developer for the Kin multi-agent orchestrator. + +Your job: implement UI features and fixes in the Vue 3 frontend. + +## Input + +You receive: +- PROJECT: id, name, path, tech stack +- TASK: id, title, brief describing what to build or fix +- DECISIONS: known gotchas, workarounds, and conventions for this project +- PREVIOUS STEP OUTPUT: architect spec or debugger output (if any) + +## Your responsibilities + +1. Read the relevant frontend files before making changes +2. Implement the feature or fix as described in the task brief +3. Follow existing patterns — don't invent new abstractions +4. Ensure the UI reflects backend state correctly (via API calls) +5. Update `web/frontend/src/api.ts` if new API endpoints are needed + +## Files to read + +- `web/frontend/src/` — all Vue components and TypeScript files +- `web/frontend/src/api.ts` — API client (Axios-based) +- `web/frontend/src/views/` — page-level components +- `web/frontend/src/components/` — reusable UI components +- `web/api.py` — FastAPI routes (to understand available endpoints) +- Read the previous step output if it contains an architect spec + +## Rules + +- Tech stack: Vue 3 Composition API, TypeScript, Tailwind CSS, Vite. +- Use `ref()` and `reactive()` — no Options API. +- API calls go through `web/frontend/src/api.ts` — never call fetch/axios directly in components. +- Do NOT modify Python backend files — scope is frontend only. +- Do NOT add new dependencies without noting it explicitly in `notes`. +- Keep components small and focused on one responsibility. + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "done", + "changes": [ + { + "file": "web/frontend/src/views/TaskDetail.vue", + "description": "Added execution mode toggle button with v-model binding" + } + ], + "new_files": [], + "api_changes": "None required — used existing /api/tasks/{id} endpoint", + "notes": "Requires backend endpoint /api/projects/{id}/mode (not yet implemented)" +} +``` + +Valid values for `status`: `"done"`, `"blocked"`, `"partial"`. + +If status is "blocked", include `"blocked_reason": "..."`. +If status is "partial", list what was completed and what remains in `notes`. diff --git a/agents/prompts/reviewer.md b/agents/prompts/reviewer.md new file mode 100644 index 0000000..b638b38 --- /dev/null +++ b/agents/prompts/reviewer.md @@ -0,0 +1,81 @@ +You are a Code Reviewer for the Kin multi-agent orchestrator. + +Your job: review the implementation for correctness, security, and adherence to project conventions. + +## Input + +You receive: +- PROJECT: id, name, path, tech stack +- TASK: id, title, brief describing what was built +- DECISIONS: project conventions and standards +- PREVIOUS STEP OUTPUT: dev agent and/or tester output describing what was changed + +## Your responsibilities + +1. Read all files mentioned in the previous step output +2. Check correctness — does the code do what the task requires? +3. Check security — SQL injection, input validation, secrets in code, OWASP top 10 +4. Check conventions — naming, structure, patterns match the rest of the codebase +5. Check test coverage — are edge cases covered? +6. Produce an actionable verdict: approve or request changes + +## Files to read + +- All source files changed (listed in previous step output) +- `core/models.py` — data layer conventions +- `web/api.py` — API conventions (error handling, response format) +- `tests/` — test coverage for the changed code +- Project decisions (provided in context) — check compliance + +## Rules + +- If you find a security issue: mark it with severity "critical" and DO NOT approve. +- Minor style issues are "low" severity — don't block on them, just note them. +- Check that new DB columns have DEFAULT values (required for backward compat). +- Check that API endpoints validate input and return proper HTTP status codes. +- Check that no secrets, tokens, or credentials are hardcoded. +- Do NOT rewrite code — only report findings and recommendations. + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "verdict": "approved", + "findings": [ + { + "severity": "low", + "file": "core/models.py", + "line_hint": "get_effective_mode()", + "issue": "Missing docstring for public function", + "suggestion": "Add a one-line docstring" + } + ], + "security_issues": [], + "conventions_violations": [], + "test_coverage": "adequate", + "summary": "Implementation looks correct and follows project patterns. One minor style issue noted." +} +``` + +Valid values for `verdict`: `"approved"`, `"changes_requested"`, `"blocked"`. + +Valid values for `severity`: `"critical"`, `"high"`, `"medium"`, `"low"`. + +Valid values for `test_coverage`: `"adequate"`, `"insufficient"`, `"missing"`. + +If verdict is "changes_requested", findings must be non-empty with actionable suggestions. +If verdict is "blocked", include `"blocked_reason": "..."` (e.g. unable to read files). + +## Output field details + +**security_issues** and **conventions_violations**: Each array element is an object with the following structure: +```json +{ + "severity": "critical", + "file": "core/models.py", + "issue": "SQL injection vulnerability in query building", + "suggestion": "Use parameterized queries instead of string concatenation" +} +``` diff --git a/agents/prompts/tech_researcher.md b/agents/prompts/tech_researcher.md new file mode 100644 index 0000000..b91ed5a --- /dev/null +++ b/agents/prompts/tech_researcher.md @@ -0,0 +1,92 @@ +You are a Tech Researcher for the Kin multi-agent orchestrator. + +Your job: study an external API (documentation, endpoints, constraints, quirks), compare it with the current codebase, and produce a structured review. + +## Input + +You receive: +- PROJECT: id, name, path, tech stack +- TARGET_API: name of the API and URL to its documentation (or path to a local spec file) +- CODEBASE_SCOPE: list of files or directories to scan for existing API usage +- DECISIONS: known gotchas and workarounds for the project + +## Your responsibilities + +1. Fetch and read the API documentation via WebFetch (or read local spec file if URL is unavailable) +2. Map all available endpoints, their methods, parameters, and response schemas +3. Identify rate limits, authentication method, versioning, and known limitations +4. Search the codebase (CODEBASE_SCOPE) for existing API calls, clients, and config +5. Compare: what does the code assume vs. what the API actually provides +6. Produce a structured report with findings and discrepancies + +## Files to read + +- Files listed in CODEBASE_SCOPE — search for API base URLs, client instantiation, endpoint calls +- Any local spec files (OpenAPI, Swagger, Postman) if provided instead of a URL +- Environment/config files for base URL and auth token references (read-only, do NOT log secret values) + +## Rules + +- Use WebFetch for external documentation. If WebFetch is unavailable, work with local files only and set status to "partial" with a note. +- Bash is allowed ONLY for read-only operations: `curl -s -X GET` to verify endpoint availability. Never use Bash for write operations or side-effecting commands. +- Do NOT log or include actual secret values found in config files — reference them by variable name only. +- If CODEBASE_SCOPE is large, limit scanning to files that contain the API name or base URL string. +- codebase_diff must describe concrete discrepancies — e.g. "code calls /v1/users but docs show endpoint is /v2/users". +- If no discrepancies are found, set codebase_diff to an empty array. +- Do NOT write implementation code — produce research and analysis only. + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "done", + "api_overview": "One-paragraph summary of what the API does and its general design", + "endpoints": [ + { + "method": "GET", + "path": "/v1/resource", + "description": "Returns a list of resources", + "params": ["limit", "offset"], + "response_schema": "{ items: Resource[], total: number }" + } + ], + "rate_limits": { + "requests_per_minute": 60, + "requests_per_day": null, + "notes": "Per-token limits apply" + }, + "auth_method": "Bearer token in Authorization header", + "data_schemas": [ + { + "name": "Resource", + "fields": "{ id: string, name: string, created_at: ISO8601 }" + } + ], + "limitations": [ + "Pagination max page size is 100", + "Webhooks not supported — polling required" + ], + "gotchas": [ + "created_at is returned in UTC but without timezone suffix", + "Deleted resources return 200 with { deleted: true } instead of 404" + ], + "codebase_diff": [ + { + "file": "services/api_client.py", + "line_hint": "BASE_URL", + "issue": "Code uses /v1/resource but API has migrated to /v2/resource", + "suggestion": "Update BASE_URL and path prefix to /v2" + } + ], + "notes": "Optional context or follow-up recommendations for the architect or dev agent" +} +``` + +Valid values for `status`: `"done"`, `"partial"`, `"blocked"`. + +- `"partial"` — research completed with limited data (e.g. WebFetch unavailable, docs incomplete). +- `"blocked"` — unable to proceed; include `"blocked_reason": "..."`. + +If status is "partial", include `"partial_reason": "..."` explaining what was skipped. diff --git a/agents/prompts/tester.md b/agents/prompts/tester.md new file mode 100644 index 0000000..3b958f7 --- /dev/null +++ b/agents/prompts/tester.md @@ -0,0 +1,67 @@ +You are a Tester for the Kin multi-agent orchestrator. + +Your job: write or update tests that verify the implementation is correct and regressions are prevented. + +## Input + +You receive: +- PROJECT: id, name, path, tech stack +- TASK: id, title, brief describing what was implemented +- PREVIOUS STEP OUTPUT: dev agent output describing what was changed (required) + +## Your responsibilities + +1. Read the previous step output to understand what was implemented +2. Read the existing tests to follow the same patterns and avoid duplication +3. Write tests that cover the new behavior and key edge cases +4. Ensure all existing tests still pass (don't break existing coverage) +5. Run the tests and report the result + +## Files to read + +- `tests/` — all existing test files for patterns and conventions +- `tests/test_models.py` — DB model tests (follow this pattern for core/ tests) +- `tests/test_api.py` — API endpoint tests (follow for web/api.py tests) +- `tests/test_runner.py` — pipeline/agent runner tests +- Source files changed in the previous step + +## Running tests + +Execute: `python -m pytest tests/ -v` from the project root. +For a specific test file: `python -m pytest tests/test_models.py -v` + +## Rules + +- Use `pytest`. No unittest, no custom test runners. +- Tests must be isolated — use in-memory SQLite (`":memory:"`), not the real `kin.db`. +- Mock `subprocess.run` when testing agent runner (never call actual Claude CLI in tests). +- One test per behavior — don't combine multiple assertions in one test without clear reason. +- Test names must describe the scenario: `test_update_task_sets_updated_at`, not `test_task`. +- Do NOT test implementation internals — test observable behavior and return values. + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "status": "passed", + "tests_written": [ + { + "file": "tests/test_models.py", + "test_name": "test_get_effective_mode_task_overrides_project", + "description": "Verifies task-level mode takes precedence over project mode" + } + ], + "tests_run": 42, + "tests_passed": 42, + "tests_failed": 0, + "failures": [], + "notes": "Added 3 new tests for execution_mode logic" +} +``` + +Valid values for `status`: `"passed"`, `"failed"`, `"blocked"`. + +If status is "failed", populate `"failures"` with `[{"test": "...", "error": "..."}]`. +If status is "blocked", include `"blocked_reason": "..."`. diff --git a/agents/runner.py b/agents/runner.py index 6ae013a..33dffbe 100644 --- a/agents/runner.py +++ b/agents/runner.py @@ -11,6 +11,8 @@ import time from pathlib import Path from typing import Any +import re + from core import models from core.context_builder import build_context, format_prompt from core.hooks import run_hooks @@ -97,6 +99,7 @@ def run_agent( return { "success": success, + "error": result.get("error") if not success else None, "output": parsed_output if parsed_output else output_text, "raw_output": output_text, "role": role, @@ -153,7 +156,8 @@ def _run_claude( raw_stdout = proc.stdout or "" result: dict[str, Any] = { "output": raw_stdout, - "error": proc.stderr if proc.returncode != 0 else None, + "error": proc.stderr or None, # preserve stderr always for diagnostics + "empty_output": not raw_stdout.strip(), "returncode": proc.returncode, } @@ -358,6 +362,21 @@ def run_audit( } +# --------------------------------------------------------------------------- +# Permission error detection +# --------------------------------------------------------------------------- + +def _is_permission_error(result: dict) -> bool: + """Return True if agent result indicates a permission/write failure.""" + from core.followup import PERMISSION_PATTERNS + output = (result.get("raw_output") or result.get("output") or "") + if not isinstance(output, str): + output = json.dumps(output, ensure_ascii=False) + error = result.get("error") or "" + text = output + " " + error + return any(re.search(p, text) for p in PERMISSION_PATTERNS) + + # --------------------------------------------------------------------------- # Pipeline executor # --------------------------------------------------------------------------- @@ -390,6 +409,9 @@ def run_pipeline( if task.get("brief") and isinstance(task["brief"], dict): route_type = task["brief"].get("route_type", "custom") or "custom" + # Determine execution mode (auto vs review) + mode = models.get_effective_mode(conn, project_id, task_id) + # Create pipeline in DB pipeline = None if not dry_run: @@ -409,27 +431,18 @@ def run_pipeline( model = step.get("model", "sonnet") brief = step.get("brief") - result = run_agent( - conn, role, task_id, project_id, - model=model, - previous_output=previous_output, - brief_override=brief, - dry_run=dry_run, - allow_write=allow_write, - noninteractive=noninteractive, - ) - results.append(result) - - if dry_run: - continue - - # Accumulate stats - total_cost += result.get("cost_usd") or 0 - total_tokens += result.get("tokens_used") or 0 - total_duration += result.get("duration_seconds") or 0 - - if not result["success"]: - # Pipeline failed — stop and mark as failed + try: + result = run_agent( + conn, role, task_id, project_id, + model=model, + previous_output=previous_output, + brief_override=brief, + dry_run=dry_run, + allow_write=allow_write, + noninteractive=noninteractive, + ) + except Exception as exc: + exc_msg = f"Step {i+1}/{len(steps)} ({role}) raised exception: {exc}" if pipeline: models.update_pipeline( conn, pipeline["id"], @@ -438,10 +451,21 @@ def run_pipeline( total_tokens=total_tokens, total_duration_seconds=total_duration, ) - models.update_task(conn, task_id, status="blocked") + models.log_agent_run( + conn, + project_id=project_id, + task_id=task_id, + agent_role=role, + action="execute", + input_summary=f"task={task_id}, model={model}", + output_summary=None, + success=False, + error_message=exc_msg, + ) + models.update_task(conn, task_id, status="blocked", blocked_reason=exc_msg) return { "success": False, - "error": f"Step {i+1}/{len(steps)} ({role}) failed", + "error": exc_msg, "steps_completed": i, "results": results, "total_cost_usd": total_cost, @@ -450,6 +474,70 @@ def run_pipeline( "pipeline_id": pipeline["id"] if pipeline else None, } + if dry_run: + results.append(result) + continue + + # Accumulate stats + total_cost += result.get("cost_usd") or 0 + total_tokens += result.get("tokens_used") or 0 + total_duration += result.get("duration_seconds") or 0 + + if not result["success"]: + # Auto mode: retry once with allow_write on permission error + if mode == "auto" and not allow_write and _is_permission_error(result): + task_modules = models.get_modules(conn, project_id) + try: + run_hooks(conn, project_id, task_id, + event="task_permission_retry", + task_modules=task_modules) + except Exception: + pass + retry = run_agent( + conn, role, task_id, project_id, + model=model, + previous_output=previous_output, + brief_override=brief, + dry_run=False, + allow_write=True, + noninteractive=noninteractive, + ) + allow_write = True # subsequent steps also with allow_write + total_cost += retry.get("cost_usd") or 0 + total_tokens += retry.get("tokens_used") or 0 + total_duration += retry.get("duration_seconds") or 0 + if retry["success"]: + result = retry + + if not result["success"]: + # Still failed — block regardless of mode + results.append(result) + if pipeline: + models.update_pipeline( + conn, pipeline["id"], + status="failed", + total_cost_usd=total_cost, + total_tokens=total_tokens, + total_duration_seconds=total_duration, + ) + agent_error = result.get("error") or "" + error_msg = f"Step {i+1}/{len(steps)} ({role}) failed" + if agent_error: + error_msg += f": {agent_error}" + models.update_task(conn, task_id, status="blocked", blocked_reason=error_msg) + return { + "success": False, + "error": error_msg, + "steps_completed": i, + "results": results, + "total_cost_usd": total_cost, + "total_tokens": total_tokens, + "total_duration_seconds": total_duration, + "pipeline_id": pipeline["id"] if pipeline else None, + } + + results.append(result) + # Chain output to next step previous_output = result.get("raw_output") or result.get("output") if isinstance(previous_output, (dict, list)): @@ -464,10 +552,43 @@ def run_pipeline( total_tokens=total_tokens, total_duration_seconds=total_duration, ) - models.update_task(conn, task_id, status="review") + + task_modules = models.get_modules(conn, project_id) + + if mode == "auto": + # Auto mode: skip review, approve immediately + models.update_task(conn, task_id, status="done") + try: + run_hooks(conn, project_id, task_id, + event="task_auto_approved", task_modules=task_modules) + except Exception: + pass + try: + run_hooks(conn, project_id, task_id, + event="task_done", task_modules=task_modules) + except Exception: + pass + + # Auto followup: generate tasks, auto-resolve permission issues. + # Guard: skip for followup-sourced tasks to prevent infinite recursion. + task_brief = task.get("brief") or {} + is_followup_task = ( + isinstance(task_brief, dict) + and str(task_brief.get("source", "")).startswith("followup:") + ) + if not is_followup_task: + try: + from core.followup import generate_followups, auto_resolve_pending_actions + fu_result = generate_followups(conn, task_id) + if fu_result.get("pending_actions"): + auto_resolve_pending_actions(conn, task_id, fu_result["pending_actions"]) + except Exception: + pass + else: + # Review mode: wait for manual approval + models.update_task(conn, task_id, status="review") # Run post-pipeline hooks (failures don't affect pipeline status) - task_modules = models.get_modules(conn, project_id) try: run_hooks(conn, project_id, task_id, event="pipeline_completed", task_modules=task_modules) @@ -483,4 +604,5 @@ def run_pipeline( "total_duration_seconds": total_duration, "pipeline_id": pipeline["id"] if pipeline else None, "dry_run": dry_run, + "mode": mode, } diff --git a/agents/specialists.yaml b/agents/specialists.yaml index 4e9342c..0a7963a 100644 --- a/agents/specialists.yaml +++ b/agents/specialists.yaml @@ -81,6 +81,26 @@ specialists: context_rules: decisions_category: security + tech_researcher: + name: "Tech Researcher" + model: sonnet + tools: [Read, Grep, Glob, WebFetch, Bash] + description: "Studies external APIs (docs, endpoints, limits, quirks), compares with codebase, produces structured review" + permissions: read_only + context_rules: + decisions: [gotcha, workaround] + output_schema: + status: "done | partial | blocked" + api_overview: string + endpoints: "array of { method, path, description, params, response_schema }" + rate_limits: "{ requests_per_minute, requests_per_day, notes }" + auth_method: string + data_schemas: "array of { name, fields }" + limitations: "array of strings" + gotchas: "array of strings" + codebase_diff: "array of { file, line_hint, issue, suggestion }" + notes: string + # Route templates — PM uses these to build pipelines routes: debug: @@ -102,3 +122,7 @@ routes: security_audit: steps: [security, architect] description: "Audit → remediation plan" + + api_research: + steps: [tech_researcher, architect] + description: "Study external API → integration plan" diff --git a/cli/main.py b/cli/main.py index f11f82d..bc4ba61 100644 --- a/cli/main.py +++ b/cli/main.py @@ -141,6 +141,7 @@ def project_show(ctx, id): click.echo(f" Path: {p['path']}") click.echo(f" Status: {p['status']}") click.echo(f" Priority: {p['priority']}") + click.echo(f" Mode: {p.get('execution_mode') or 'review'}") if p.get("tech_stack"): click.echo(f" Tech stack: {', '.join(p['tech_stack'])}") if p.get("forgejo_repo"): @@ -148,6 +149,21 @@ def project_show(ctx, id): click.echo(f" Created: {p['created_at']}") +@project.command("set-mode") +@click.option("--project", "project_id", required=True, help="Project ID") +@click.argument("mode", type=click.Choice(["auto", "review"])) +@click.pass_context +def project_set_mode(ctx, project_id, mode): + """Set execution mode for a project (auto|review).""" + conn = ctx.obj["conn"] + p = models.get_project(conn, project_id) + if not p: + click.echo(f"Project '{project_id}' not found.", err=True) + raise SystemExit(1) + models.update_project(conn, project_id, execution_mode=mode) + click.echo(f"Project '{project_id}' execution_mode set to '{mode}'.") + + # =========================================================================== # task # =========================================================================== @@ -204,11 +220,15 @@ def task_show(ctx, id): if not t: click.echo(f"Task '{id}' not found.", err=True) raise SystemExit(1) + effective_mode = models.get_effective_mode(conn, t["project_id"], t["id"]) + task_mode = t.get("execution_mode") + mode_label = f"{effective_mode} (overridden)" if task_mode else f"{effective_mode} (inherited)" click.echo(f"Task: {t['id']}") click.echo(f" Project: {t['project_id']}") click.echo(f" Title: {t['title']}") click.echo(f" Status: {t['status']}") click.echo(f" Priority: {t['priority']}") + click.echo(f" Mode: {mode_label}") if t.get("assigned_role"): click.echo(f" Role: {t['assigned_role']}") if t.get("parent_task_id"): @@ -223,13 +243,14 @@ def task_show(ctx, id): @task.command("update") @click.argument("task_id") -@click.option("--status", type=click.Choice( - ["pending", "in_progress", "review", "done", "blocked", "decomposed", "cancelled"]), +@click.option("--status", type=click.Choice(models.VALID_TASK_STATUSES), default=None, help="New status") @click.option("--priority", type=int, default=None, help="New priority (1-10)") +@click.option("--mode", "mode", type=click.Choice(["auto", "review"]), + default=None, help="Override execution mode for this task") @click.pass_context -def task_update(ctx, task_id, status, priority): - """Update a task's status or priority.""" +def task_update(ctx, task_id, status, priority, mode): + """Update a task's status, priority, or execution mode.""" conn = ctx.obj["conn"] t = models.get_task(conn, task_id) if not t: @@ -240,11 +261,13 @@ def task_update(ctx, task_id, status, priority): fields["status"] = status if priority is not None: fields["priority"] = priority + if mode is not None: + fields["execution_mode"] = mode if not fields: - click.echo("Nothing to update. Use --status or --priority.", err=True) + click.echo("Nothing to update. Use --status, --priority, or --mode.", err=True) raise SystemExit(1) updated = models.update_task(conn, task_id, **fields) - click.echo(f"Updated {updated['id']}: status={updated['status']}, priority={updated['priority']}") + click.echo(f"Updated {updated['id']}: status={updated['status']}, priority={updated['priority']}, mode={updated.get('execution_mode') or '(inherited)'}") # =========================================================================== @@ -816,7 +839,8 @@ def hook_logs(ctx, project_id, limit): def hook_setup(ctx, project_id, scripts_dir): """Register standard hooks for a project. - Currently registers: rebuild-frontend (fires on web/frontend/* changes). + Registers: rebuild-frontend (fires on web/frontend/* changes), + auto-commit (fires on task_done — git add -A && git commit). Idempotent — skips hooks that already exist. """ conn = ctx.obj["conn"] @@ -838,7 +862,6 @@ def hook_setup(ctx, project_id, scripts_dir): name="rebuild-frontend", event="pipeline_completed", command=rebuild_cmd, - trigger_module_path="web/frontend/*", working_dir=p.get("path"), timeout_seconds=300, ) @@ -846,6 +869,20 @@ def hook_setup(ctx, project_id, scripts_dir): else: click.echo("Hook 'rebuild-frontend' already exists, skipping.") + if "auto-commit" not in existing_names: + project_path = str(Path(p.get("path", ".")).expanduser()) + hooks_module.create_hook( + conn, project_id, + name="auto-commit", + event="task_done", + command='git add -A && git commit -m "kin: {task_id} {title}"', + working_dir=project_path, + timeout_seconds=30, + ) + created.append("auto-commit") + else: + click.echo("Hook 'auto-commit' already exists, skipping.") + if created: click.echo(f"Registered hooks: {', '.join(created)}") diff --git a/core/db.py b/core/db.py index f3f26bc..b91d29c 100644 --- a/core/db.py +++ b/core/db.py @@ -21,6 +21,7 @@ CREATE TABLE IF NOT EXISTS projects ( claude_md_path TEXT, forgejo_repo TEXT, language TEXT DEFAULT 'ru', + execution_mode TEXT NOT NULL DEFAULT 'review', created_at DATETIME DEFAULT CURRENT_TIMESTAMP ); @@ -39,6 +40,8 @@ CREATE TABLE IF NOT EXISTS tasks ( test_result JSON, security_result JSON, forgejo_issue_id INTEGER, + execution_mode TEXT, + blocked_reason TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ); @@ -196,10 +199,22 @@ def get_connection(db_path: Path = DB_PATH) -> sqlite3.Connection: def _migrate(conn: sqlite3.Connection): """Run migrations for existing databases.""" # Check if language column exists on projects - cols = {r[1] for r in conn.execute("PRAGMA table_info(projects)").fetchall()} - if "language" not in cols: + proj_cols = {r[1] for r in conn.execute("PRAGMA table_info(projects)").fetchall()} + if "language" not in proj_cols: conn.execute("ALTER TABLE projects ADD COLUMN language TEXT DEFAULT 'ru'") conn.commit() + if "execution_mode" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN execution_mode TEXT NOT NULL DEFAULT 'review'") + conn.commit() + + # Check if execution_mode column exists on tasks + task_cols = {r[1] for r in conn.execute("PRAGMA table_info(tasks)").fetchall()} + if "execution_mode" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN execution_mode TEXT") + conn.commit() + if "blocked_reason" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN blocked_reason TEXT") + conn.commit() def init_db(db_path: Path = DB_PATH) -> sqlite3.Connection: diff --git a/core/followup.py b/core/followup.py index df19328..3a01c23 100644 --- a/core/followup.py +++ b/core/followup.py @@ -11,7 +11,7 @@ import sqlite3 from core import models from core.context_builder import format_prompt, PROMPTS_DIR -_PERMISSION_PATTERNS = [ +PERMISSION_PATTERNS = [ r"(?i)permission\s+denied", r"(?i)ручное\s+применение", r"(?i)не\s+получил[иа]?\s+разрешени[ея]", @@ -27,7 +27,7 @@ _PERMISSION_PATTERNS = [ def _is_permission_blocked(item: dict) -> bool: """Check if a follow-up item describes a permission/write failure.""" text = f"{item.get('title', '')} {item.get('brief', '')}".lower() - return any(re.search(p, text) for p in _PERMISSION_PATTERNS) + return any(re.search(p, text) for p in PERMISSION_PATTERNS) def _collect_pipeline_output(conn: sqlite3.Connection, task_id: str) -> str: @@ -230,3 +230,30 @@ def resolve_pending_action( return {"rerun_result": result} return None + + +def auto_resolve_pending_actions( + conn: sqlite3.Connection, + task_id: str, + pending_actions: list, +) -> list: + """Auto-resolve pending permission actions in auto mode. + + Strategy: try 'rerun' first; if rerun fails → escalate to 'manual_task'. + Returns list of resolution results. + """ + results = [] + for action in pending_actions: + result = resolve_pending_action(conn, task_id, action, "rerun") + rerun_success = ( + isinstance(result, dict) + and isinstance(result.get("rerun_result"), dict) + and result["rerun_result"].get("success") + ) + if rerun_success: + results.append({"resolved": "rerun", "result": result}) + else: + # Rerun failed → create manual task for human review + manual = resolve_pending_action(conn, task_id, action, "manual_task") + results.append({"resolved": "manual_task", "result": manual}) + return results diff --git a/core/hooks.py b/core/hooks.py index 1b9775b..c68df47 100644 --- a/core/hooks.py +++ b/core/hooks.py @@ -146,6 +146,17 @@ def _get_hook(conn: sqlite3.Connection, hook_id: int) -> dict: return dict(row) if row else {} +def _substitute_vars(command: str, task_id: str | None, conn: sqlite3.Connection) -> str: + """Substitute {task_id} and {title} in hook command.""" + if task_id is None or "{task_id}" not in command and "{title}" not in command: + return command + row = conn.execute("SELECT title FROM tasks WHERE id = ?", (task_id,)).fetchone() + title = row["title"] if row else "" + # Sanitize title for shell safety (strip quotes and newlines) + safe_title = title.replace('"', "'").replace("\n", " ").replace("\r", "") + return command.replace("{task_id}", task_id).replace("{title}", safe_title) + + def _execute_hook( conn: sqlite3.Connection, hook: dict, @@ -159,9 +170,11 @@ def _execute_hook( exit_code = -1 success = False + command = _substitute_vars(hook["command"], task_id, conn) + try: proc = subprocess.run( - hook["command"], + command, shell=True, cwd=hook.get("working_dir") or None, capture_output=True, diff --git a/core/models.py b/core/models.py index d7bb075..0a4825b 100644 --- a/core/models.py +++ b/core/models.py @@ -9,6 +9,12 @@ from datetime import datetime from typing import Any +VALID_TASK_STATUSES = [ + "pending", "in_progress", "review", "done", + "blocked", "decomposed", "cancelled", +] + + def _row_to_dict(row: sqlite3.Row | None) -> dict | None: """Convert sqlite3.Row to dict with JSON fields decoded.""" if row is None: @@ -51,14 +57,15 @@ def create_project( claude_md_path: str | None = None, forgejo_repo: str | None = None, language: str = "ru", + execution_mode: str = "review", ) -> dict: """Create a new project and return it as dict.""" conn.execute( """INSERT INTO projects (id, name, path, tech_stack, status, priority, - pm_prompt, claude_md_path, forgejo_repo, language) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", + pm_prompt, claude_md_path, forgejo_repo, language, execution_mode) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", (id, name, path, _json_encode(tech_stack), status, priority, - pm_prompt, claude_md_path, forgejo_repo, language), + pm_prompt, claude_md_path, forgejo_repo, language, execution_mode), ) conn.commit() return get_project(conn, id) @@ -70,6 +77,20 @@ def get_project(conn: sqlite3.Connection, id: str) -> dict | None: return _row_to_dict(row) +def get_effective_mode(conn: sqlite3.Connection, project_id: str, task_id: str) -> str: + """Return effective execution mode: 'auto' or 'review'. + + Priority: task.execution_mode > project.execution_mode > 'review' + """ + task = get_task(conn, task_id) + if task and task.get("execution_mode"): + return task["execution_mode"] + project = get_project(conn, project_id) + if project: + return project.get("execution_mode") or "review" + return "review" + + def list_projects(conn: sqlite3.Connection, status: str | None = None) -> list[dict]: """List projects, optionally filtered by status.""" if status: @@ -114,15 +135,17 @@ def create_task( brief: dict | None = None, spec: dict | None = None, forgejo_issue_id: int | None = None, + execution_mode: str | None = None, ) -> dict: """Create a task linked to a project.""" conn.execute( """INSERT INTO tasks (id, project_id, title, status, priority, - assigned_role, parent_task_id, brief, spec, forgejo_issue_id) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", + assigned_role, parent_task_id, brief, spec, forgejo_issue_id, + execution_mode) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", (id, project_id, title, status, priority, assigned_role, parent_task_id, _json_encode(brief), _json_encode(spec), - forgejo_issue_id), + forgejo_issue_id, execution_mode), ) conn.commit() return get_task(conn, id) @@ -232,6 +255,19 @@ def get_decisions( return _rows_to_list(conn.execute(query, params).fetchall()) +def get_decision(conn: sqlite3.Connection, decision_id: int) -> dict | None: + """Get a single decision by id.""" + row = conn.execute("SELECT * FROM decisions WHERE id = ?", (decision_id,)).fetchone() + return _row_to_dict(row) if row else None + + +def delete_decision(conn: sqlite3.Connection, decision_id: int) -> bool: + """Delete a decision by id. Returns True if deleted, False if not found.""" + cur = conn.execute("DELETE FROM decisions WHERE id = ?", (decision_id,)) + conn.commit() + return cur.rowcount > 0 + + # --------------------------------------------------------------------------- # Modules # --------------------------------------------------------------------------- diff --git a/tasks/adr-automode.md b/tasks/adr-automode.md new file mode 100644 index 0000000..9eb4d23 --- /dev/null +++ b/tasks/adr-automode.md @@ -0,0 +1,233 @@ +# ADR: Auto Mode — полный автопилот (KIN-012) + +**Дата:** 2026-03-15 +**Статус:** Accepted +**Автор:** architect (KIN-012) + +--- + +## Контекст + +Задача: реализовать два режима исполнения пайплайнов: + +- **Auto** — полный автопилот: pipeline → auto-approve → auto-followup → auto-rerun при permission issues → hooks. Без остановок на review/blocked. +- **Review** — текущее поведение: задача уходит в статус `review`, ждёт ручного approve. + +### Что уже реализовано (анализ кода) + +**1. Хранение режима — `core/db.py`** +- `projects.execution_mode TEXT NOT NULL DEFAULT 'review'` — дефолт на уровне проекта +- `tasks.execution_mode TEXT` — nullable, переопределяет проект +- Миграции добавляют оба столбца к существующим БД + +**2. Приоритет режима — `core/models.py:get_effective_mode()`** +``` +task.execution_mode > project.execution_mode > 'review' +``` +Вычисляется один раз в начале `run_pipeline`. + +**3. Auto-approve — `agents/runner.py:run_pipeline()`** (строки 519–536) +```python +if mode == "auto": + models.update_task(conn, task_id, status="done") + run_hooks(conn, project_id, task_id, event="task_auto_approved", ...) +else: + models.update_task(conn, task_id, status="review") +``` + +**4. Permission retry — `agents/runner.py:run_pipeline()`** (строки 453–475) +```python +if mode == "auto" and not allow_write and _is_permission_error(result): + run_hooks(..., event="task_permission_retry", ...) + retry = run_agent(..., allow_write=True) + allow_write = True # propagates to all subsequent steps +``` +- Срабатывает **только в auto режиме** +- Ровно **1 попытка retry** на шаг +- После первого retry `allow_write=True` сохраняется на весь оставшийся пайплайн + +**5. Паттерны permission errors — `core/followup.py:PERMISSION_PATTERNS`** +``` +permission denied, ручное применение, cannot write, read-only, manually appl, ... +``` + +**6. Post-pipeline hooks — `core/hooks.py`** +События: `pipeline_completed`, `task_auto_approved`, `task_permission_retry` + +--- + +## Пробелы — что НЕ реализовано + +### Gap 1: Auto-followup не вызывается из run_pipeline +`generate_followups()` существует в `core/followup.py`, но нигде не вызывается автоматически. В `run_pipeline` после завершения пайплайна — только хуки. + +### Gap 2: Auto-resolution pending_actions в auto mode +`generate_followups()` возвращает `pending_actions` (permission-blocked followup items) с опциями `["rerun", "manual_task", "skip"]`. В auto mode нет логики автоматического выбора опции. + +### Gap 3: Наследование режима followup-задачами +Задачи, созданные через `generate_followups()`, создаются с `execution_mode=None` (наследуют от проекта). Это правильное поведение, но не задокументировано. + +--- + +## Решения + +### D1: Где хранить режим + +**Решение:** двухуровневая иерархия (уже реализована, зафиксируем). + +| Уровень | Поле | Дефолт | Переопределяет | +|---------|------|--------|----------------| +| Глобальный | — | `review` | — | +| Проект | `projects.execution_mode` | `'review'` | глобальный | +| Задача | `tasks.execution_mode` | `NULL` | проект | + +Глобального конфига нет — осознанное решение. Каждый проект управляет своим режимом. Задача может переопределить проект (например, форсировать `review` для security-sensitive задач). + +**Изменения БД не нужны** — структура готова. + +--- + +### D2: Как runner обходит ожидание approve в auto mode + +**Решение:** уже реализовано. Зафиксируем контракт: + +``` +run_pipeline() в auto mode: + 1. Все шаги выполняются последовательно + 2. При успехе → task.status = "done" (минуя "review") + 3. Хук task_auto_approved + pipeline_completed + 4. generate_followups() автоматически (Gap 1, см. D4) +``` + +В review mode — без изменений: `task.status = "review"`, `generate_followups()` не вызывается автоматически. + +--- + +### D3: Auto-rerun при permission issues — лимит и критерии + +**Что считать permission issue:** +Паттерны из `PERMISSION_PATTERNS` в `core/followup.py`. Список достаточен, расширяется при необходимости через PR. + +**Лимит попыток:** +**1 retry per step** (уже реализовано). Обоснование: +- Permission issue — либо системная проблема (нет прав на директорию), либо claude CLI требует `--dangerously-skip-permissions` +- Второй retry с теми же параметрами не имеет смысла — проблема детерминированная +- Если 1 retry не помог → `task.status = "blocked"` даже в auto mode + +**Поведение после retry:** +`allow_write=True` применяется ко **всем последующим шагам** пайплайна (не только retry шагу). Это безопасно в контексте Kin — агенты работают в изолированном рабочем каталоге проекта. + +**Хук `task_permission_retry`:** +Срабатывает перед retry — позволяет логировать / оповещать, но не блокирует. + +**Итоговая таблица поведения при failure:** + +| Режим | Тип ошибки | Поведение | +|-------|-----------|-----------| +| auto | permission error (первый) | retry с allow_write=True | +| auto | permission error (после retry) | blocked | +| auto | любая другая ошибка | blocked | +| review | любая ошибка | blocked | + +--- + +### D4: Auto-followup интеграция с post-pipeline hooks + +**Решение:** `generate_followups()` вызывается из `run_pipeline()` в auto mode **после** `task_auto_approved` хука. + +Порядок событий в auto mode: +``` +1. pipeline успешно завершён +2. task.status = "done" +3. хук: task_auto_approved ← пользовательские хуки (rebuild-frontend и т.д.) +4. generate_followups() ← анализируем output, создаём followup задачи +5. хук: pipeline_completed ← финальное уведомление +``` + +В review mode: +``` +1. pipeline успешно завершён +2. task.status = "review" +3. хук: pipeline_completed + ← generate_followups() НЕ вызывается (ждём manual approve) +``` + +**Почему после task_auto_approved, а не до:** +Хуки типа `rebuild-frontend` (KIN-010) изменяют состояние файловой системы. Followup-агент должен видеть актуальное состояние проекта после всех хуков. + +--- + +### D5: Auto-resolution pending_actions в auto mode + +`generate_followups()` может вернуть `pending_actions` — элементы, заблокированные из-за permission issues. В auto mode нужна автоматическая стратегия. + +**Решение:** в auto mode `pending_actions` резолвятся как `"rerun"`. + +Обоснование: +- Auto mode = полный автопилот, пользователь не должен принимать решения +- "rerun" — наиболее агрессивная и полезная стратегия: повторяем шаг с `allow_write=True` +- Если rerun снова даёт permission error → создаётся manual_task (escalation) + +``` +auto mode + pending_action: + → resolve_pending_action(choice="rerun") + → если rerun провалился → create manual_task с тегом "auto_escalated" + → всё логируется + +review mode + pending_action: + → возвращается пользователю через API для ручного выбора +``` + +--- + +### D6: Наследование режима followup-задачами + +Задачи, созданные через `generate_followups()`, создаются с `execution_mode=None`. + +**Решение:** followup-задачи наследуют режим через проект (существующая иерархия D1). +Явно устанавливать `execution_mode` в followup-задачах **не нужно** — если проект в auto, все его задачи по умолчанию в auto. + +Исключение: если оригинальная задача была в `review` (ручной override), followup-задачи НЕ наследуют это — они создаются "чисто" от проекта. Это намеренное поведение: override в задаче — разовое действие. + +--- + +## Итоговая карта изменений (что нужно реализовать) + +| # | Файл | Изменение | Gap | +|---|------|----------|-----| +| 1 | `agents/runner.py` | Вызов `generate_followups()` в auto mode после `task_auto_approved` | D4 | +| 2 | `core/followup.py` | Auto-resolution `pending_actions` в `generate_followups()` при auto mode | D5 | +| 3 | `web/api.py` | Endpoint для смены `execution_mode` проекта/задачи | — | +| 4 | `web/frontend` | UI переключатель Auto/Review (project settings + task detail) | — | + +**Что НЕ нужно менять:** +- `core/db.py` — схема готова +- `core/models.py` — `get_effective_mode()` готов +- `core/hooks.py` — события готовы +- Permission detection в `runner.py` — готово + +--- + +## Риски и ограничения + +1. **Стоимость в auto mode**: `generate_followups()` добавляет один запуск агента после каждого пайплайна. При высокой нагрузке это существенный overhead. Митигация: `generate_followups()` можно сделать опциональным (флаг `auto_followup` в project settings). + +2. **Permission retry scope**: `allow_write=True` после первого retry применяется ко всем последующим шагам. Это агрессивно, но допустимо, т.к. агент уже начал писать файлы. + +3. **Infinite loop в auto-followup**: если followup создаёт задачи, а те создают ещё followup — нет механизма остановки. Митигация: `parent_task_id` позволяет отслеживать глубину. Задачи с `source: followup:*` глубже 1 уровня — не генерируют followup автоматически. + +4. **Race condition**: если два пайплайна запускаются для одной задачи одновременно — БД-уровень не блокирует. SQLite WAL + `task.status = 'in_progress'` в начале пайплайна дают частичную защиту, но не полную. + +--- + +## Статус реализации + +- [x] DB schema: `execution_mode` в `projects` и `tasks` +- [x] `get_effective_mode()` с приоритетом task > project > review +- [x] Auto-approve: `task.status = "done"` в auto mode +- [x] Permission retry: 1 попытка с `allow_write=True` +- [x] Хуки: `task_auto_approved`, `pipeline_completed`, `task_permission_retry` +- [ ] Auto-followup: вызов `generate_followups()` из `run_pipeline()` в auto mode (Gap 1) +- [ ] Auto-resolution `pending_actions` в auto mode (Gap 2) +- [ ] API endpoints для управления `execution_mode` +- [ ] Frontend UI для Auto/Review переключателя diff --git a/tests/test_api.py b/tests/test_api.py index d8939d1..3109486 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -105,6 +105,18 @@ def test_approve_not_found(client): assert r.status_code == 404 +def test_approve_fires_task_done_hooks(client): + """Ручной апрув задачи должен вызывать хуки с event='task_done'.""" + from unittest.mock import patch + with patch("core.hooks.run_hooks") as mock_hooks: + mock_hooks.return_value = [] + r = client.post("/api/tasks/P1-001/approve", json={}) + assert r.status_code == 200 + events_fired = [call[1].get("event") or call[0][3] + for call in mock_hooks.call_args_list] + assert "task_done" in events_fired + + def test_reject_task(client): from core.db import init_db from core import models @@ -173,14 +185,15 @@ def test_run_not_found(client): assert r.status_code == 404 -def test_run_with_allow_write(client): - """POST /run with allow_write=true should be accepted.""" - r = client.post("/api/tasks/P1-001/run", json={"allow_write": True}) +def test_run_kin_038_without_allow_write(client): + """Регрессионный тест KIN-038: allow_write удалён из схемы, + эндпоинт принимает запросы с пустым телом без этого параметра.""" + r = client.post("/api/tasks/P1-001/run", json={}) assert r.status_code == 202 def test_run_with_empty_body(client): - """POST /run with empty JSON body should default allow_write=false.""" + """POST /run with empty JSON body should be accepted.""" r = client.post("/api/tasks/P1-001/run", json={}) assert r.status_code == 202 @@ -256,14 +269,61 @@ def test_patch_task_status_persisted(client): assert r.json()["status"] == "blocked" -@pytest.mark.parametrize("status", ["pending", "in_progress", "review", "done", "blocked", "cancelled"]) +@pytest.mark.parametrize("status", ["pending", "in_progress", "review", "done", "blocked", "decomposed", "cancelled"]) def test_patch_task_all_valid_statuses(client, status): - """Все 6 допустимых статусов должны приниматься.""" + """Все 7 допустимых статусов должны приниматься (включая decomposed).""" r = client.patch("/api/tasks/P1-001", json={"status": status}) assert r.status_code == 200 assert r.json()["status"] == status +def test_patch_task_status_decomposed(client): + """Регрессионный тест KIN-033: API принимает статус 'decomposed'.""" + r = client.patch("/api/tasks/P1-001", json={"status": "decomposed"}) + assert r.status_code == 200 + assert r.json()["status"] == "decomposed" + + +def test_patch_task_status_decomposed_persisted(client): + """После установки 'decomposed' повторный GET возвращает этот статус.""" + client.patch("/api/tasks/P1-001", json={"status": "decomposed"}) + r = client.get("/api/tasks/P1-001") + assert r.status_code == 200 + assert r.json()["status"] == "decomposed" + + +# --------------------------------------------------------------------------- +# KIN-033 — единый источник истины для статусов +# --------------------------------------------------------------------------- + +def test_api_valid_statuses_match_models(): + """API использует models.VALID_TASK_STATUSES как единственный источник истины.""" + from core import models + import web.api as api_module + assert api_module.VALID_STATUSES == set(models.VALID_TASK_STATUSES) + + +def test_cli_valid_statuses_match_models(): + """CLI использует models.VALID_TASK_STATUSES как единственный источник истины.""" + from core import models + from cli.main import task_update + status_param = next(p for p in task_update.params if p.name == "status") + cli_choices = set(status_param.type.choices) + assert cli_choices == set(models.VALID_TASK_STATUSES) + + +def test_cli_and_api_statuses_are_identical(): + """Список статусов в CLI и API идентичен.""" + from core import models + import web.api as api_module + from cli.main import task_update + status_param = next(p for p in task_update.params if p.name == "status") + cli_choices = set(status_param.type.choices) + assert cli_choices == api_module.VALID_STATUSES + assert "decomposed" in cli_choices + assert "decomposed" in api_module.VALID_STATUSES + + def test_patch_task_invalid_status(client): """Недопустимый статус → 400.""" r = client.patch("/api/tasks/P1-001", json={"status": "flying"}) @@ -274,3 +334,258 @@ def test_patch_task_not_found(client): """Несуществующая задача → 404.""" r = client.patch("/api/tasks/NOPE-999", json={"status": "done"}) assert r.status_code == 404 + + +def test_patch_task_empty_body_returns_400(client): + """PATCH с пустым телом (нет status и нет execution_mode) → 400.""" + r = client.patch("/api/tasks/P1-001", json={}) + assert r.status_code == 400 + + +# --------------------------------------------------------------------------- +# KIN-022 — blocked_reason: регрессионные тесты +# --------------------------------------------------------------------------- + +def test_blocked_reason_saved_and_returned(client): + """При переходе в blocked с blocked_reason поле сохраняется и отдаётся в GET.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="blocked", + blocked_reason="Step 1/2 (debugger) failed") + conn.close() + + r = client.get("/api/tasks/P1-001") + assert r.status_code == 200 + data = r.json() + assert data["status"] == "blocked" + assert data["blocked_reason"] == "Step 1/2 (debugger) failed" + + +def test_blocked_reason_present_in_full(client): + """blocked_reason также присутствует в /full эндпоинте.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="blocked", + blocked_reason="tester agent crashed") + conn.close() + + r = client.get("/api/tasks/P1-001/full") + assert r.status_code == 200 + data = r.json() + assert data["status"] == "blocked" + assert data["blocked_reason"] == "tester agent crashed" + + +def test_blocked_reason_none_by_default(client): + """Новая задача не имеет blocked_reason.""" + r = client.get("/api/tasks/P1-001") + assert r.status_code == 200 + data = r.json() + assert data["blocked_reason"] is None + + +def test_blocked_without_reason_allowed(client): + """Переход в blocked без причины допустим (reason=None).""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="blocked") + conn.close() + + r = client.get("/api/tasks/P1-001") + assert r.status_code == 200 + data = r.json() + assert data["status"] == "blocked" + assert data["blocked_reason"] is None + + +def test_blocked_reason_cleared_on_retry(client): + """При повторном запуске (статус pending) blocked_reason сбрасывается.""" + from core.db import init_db + from core import models + conn = init_db(api_module.DB_PATH) + models.update_task(conn, "P1-001", status="blocked", + blocked_reason="failed once") + models.update_task(conn, "P1-001", status="pending", blocked_reason=None) + conn.close() + + r = client.get("/api/tasks/P1-001") + assert r.status_code == 200 + data = r.json() + assert data["status"] == "pending" + assert data["blocked_reason"] is None + + +# --------------------------------------------------------------------------- +# KIN-029 — DELETE /api/projects/{project_id}/decisions/{decision_id} +# --------------------------------------------------------------------------- + +def test_delete_decision_ok(client): + """Создаём decision через POST, удаляем DELETE → 200 с телом {"deleted": id}.""" + r = client.post("/api/decisions", json={ + "project_id": "p1", + "type": "decision", + "title": "Use SQLite", + "description": "Chosen for simplicity", + }) + assert r.status_code == 200 + decision_id = r.json()["id"] + + r = client.delete(f"/api/projects/p1/decisions/{decision_id}") + assert r.status_code == 200 + assert r.json() == {"deleted": decision_id} + + r = client.get("/api/decisions?project=p1") + assert r.status_code == 200 + ids = [d["id"] for d in r.json()] + assert decision_id not in ids + + +def test_delete_decision_not_found(client): + """DELETE несуществующего decision → 404.""" + r = client.delete("/api/projects/p1/decisions/99999") + assert r.status_code == 404 + + +def test_delete_decision_wrong_project(client): + """DELETE decision с чужим project_id → 404 (не раскрываем существование).""" + r = client.post("/api/decisions", json={ + "project_id": "p1", + "type": "decision", + "title": "Cross-project check", + "description": "Should not be deletable from p2", + }) + assert r.status_code == 200 + decision_id = r.json()["id"] + + r = client.delete(f"/api/projects/p2/decisions/{decision_id}") + assert r.status_code == 404 + + # Decision должен остаться нетронутым + r = client.get("/api/decisions?project=p1") + ids = [d["id"] for d in r.json()] + assert decision_id in ids + + +# --------------------------------------------------------------------------- +# KIN-035 — регрессионный тест: смена статуса на cancelled +# --------------------------------------------------------------------------- + +def test_patch_task_status_cancelled(client): + """Регрессионный тест KIN-035: PATCH /api/tasks/{id} с status='cancelled' → 200.""" + r = client.patch("/api/tasks/P1-001", json={"status": "cancelled"}) + assert r.status_code == 200 + assert r.json()["status"] == "cancelled" + + +def test_patch_task_status_cancelled_persisted(client): + """После установки 'cancelled' повторный GET возвращает этот статус.""" + client.patch("/api/tasks/P1-001", json={"status": "cancelled"}) + r = client.get("/api/tasks/P1-001") + assert r.status_code == 200 + assert r.json()["status"] == "cancelled" + + +def test_cancelled_in_valid_statuses(): + """'cancelled' присутствует в VALID_TASK_STATUSES модели и в VALID_STATUSES API.""" + from core import models + import web.api as api_module + assert "cancelled" in models.VALID_TASK_STATUSES + assert "cancelled" in api_module.VALID_STATUSES + + +# --------------------------------------------------------------------------- +# KIN-036 — регрессионный тест: --allow-write всегда в команде через web API +# --------------------------------------------------------------------------- + +def test_run_always_includes_allow_write_when_body_false(client): + """Регрессионный тест KIN-036: --allow-write присутствует в команде, + даже если allow_write=False в теле запроса. + + Баг: условие `if body and body.allow_write` не добавляло флаг при + allow_write=False, что приводило к блокировке агента на 300 с.""" + from unittest.mock import patch, MagicMock + with patch("web.api.subprocess.Popen") as mock_popen: + mock_proc = MagicMock() + mock_proc.pid = 12345 + mock_popen.return_value = mock_proc + + r = client.post("/api/tasks/P1-001/run", json={"allow_write": False}) + assert r.status_code == 202 + + cmd = mock_popen.call_args[0][0] + assert "--allow-write" in cmd, ( + "--allow-write обязан присутствовать всегда: без него агент зависает " + "при попытке записи, потому что stdin=DEVNULL и нет интерактивного подтверждения" + ) + + +def test_run_always_includes_allow_write_without_body(client): + """Регрессионный тест KIN-036: --allow-write присутствует даже без тела запроса.""" + from unittest.mock import patch, MagicMock + with patch("web.api.subprocess.Popen") as mock_popen: + mock_proc = MagicMock() + mock_proc.pid = 12345 + mock_popen.return_value = mock_proc + + r = client.post("/api/tasks/P1-001/run") + assert r.status_code == 202 + + cmd = mock_popen.call_args[0][0] + assert "--allow-write" in cmd + + +def test_run_sets_kin_noninteractive_env(client): + """Регрессионный тест KIN-036: KIN_NONINTERACTIVE=1 всегда устанавливается + при запуске через web API, что вместе с --allow-write предотвращает зависание.""" + from unittest.mock import patch, MagicMock + with patch("web.api.subprocess.Popen") as mock_popen: + mock_proc = MagicMock() + mock_proc.pid = 99 + mock_popen.return_value = mock_proc + + r = client.post("/api/tasks/P1-001/run") + assert r.status_code == 202 + + call_kwargs = mock_popen.call_args[1] + env = call_kwargs.get("env", {}) + assert env.get("KIN_NONINTERACTIVE") == "1" + + +def test_run_sets_stdin_devnull(client): + """Регрессионный тест KIN-036: stdin=DEVNULL всегда устанавливается, + что является причиной, по которой --allow-write обязателен.""" + import subprocess as _subprocess + from unittest.mock import patch, MagicMock + with patch("web.api.subprocess.Popen") as mock_popen: + mock_proc = MagicMock() + mock_proc.pid = 42 + mock_popen.return_value = mock_proc + + r = client.post("/api/tasks/P1-001/run") + assert r.status_code == 202 + + call_kwargs = mock_popen.call_args[1] + assert call_kwargs.get("stdin") == _subprocess.DEVNULL + + +# --------------------------------------------------------------------------- +# KIN-040 — регрессионные тесты: удаление TaskRun / allow_write из схемы +# --------------------------------------------------------------------------- + +def test_run_kin_040_no_taskrun_class(): + """Регрессионный тест KIN-040: класс TaskRun удалён из web/api.py. + allow_write больше не является частью схемы эндпоинта /run.""" + import web.api as api_module + assert not hasattr(api_module, "TaskRun"), ( + "Класс TaskRun должен быть удалён из web/api.py (KIN-040)" + ) + + +def test_run_kin_040_allow_write_true_ignored(client): + """Регрессионный тест KIN-040: allow_write=True в теле игнорируется (не 422). + Эндпоинт не имеет body-параметра, поэтому FastAPI не валидирует тело.""" + r = client.post("/api/tasks/P1-001/run", json={"allow_write": True}) + assert r.status_code == 202 diff --git a/tests/test_auto_mode.py b/tests/test_auto_mode.py new file mode 100644 index 0000000..e71c1e7 --- /dev/null +++ b/tests/test_auto_mode.py @@ -0,0 +1,478 @@ +""" +Tests for KIN-012 auto mode features: + +- TestAutoApprove: pipeline auto-approves (status → done) без ручного review +- TestAutoRerunOnPermissionDenied: runner делает retry при permission error, + останавливается после одного retry (лимит = 1) +- TestAutoFollowup: generate_followups вызывается сразу, без ожидания +- Регрессия: review-режим работает как раньше +""" + +import json +import pytest +from unittest.mock import patch, MagicMock, call + +from core.db import init_db +from core import models +from agents.runner import run_pipeline, _is_permission_error + + +# --------------------------------------------------------------------------- +# Fixtures & helpers +# --------------------------------------------------------------------------- + +@pytest.fixture +def conn(): + c = init_db(":memory:") + models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek", + tech_stack=["vue3"]) + models.create_task(c, "VDOL-001", "vdol", "Fix bug", + brief={"route_type": "debug"}) + yield c + c.close() + + +def _mock_success(output="done"): + """Мок успешного subprocess.run (claude).""" + mock = MagicMock() + mock.stdout = json.dumps({"result": output}) + mock.stderr = "" + mock.returncode = 0 + return mock + + +def _mock_permission_denied(): + """Мок subprocess.run, возвращающего permission denied.""" + mock = MagicMock() + mock.stdout = json.dumps({"result": "permission denied on write to config.js"}) + mock.stderr = "Error: permission denied" + mock.returncode = 1 + return mock + + +def _mock_failure(error="Agent failed"): + """Мок subprocess.run, возвращающего общую ошибку.""" + mock = MagicMock() + mock.stdout = "" + mock.stderr = error + mock.returncode = 1 + return mock + + +def _get_hook_events(mock_hooks): + """Извлечь список event из всех вызовов mock_hooks.""" + return [c[1].get("event") for c in mock_hooks.call_args_list] + + +# --------------------------------------------------------------------------- +# test_auto_approve +# --------------------------------------------------------------------------- + +class TestAutoApprove: + """Pipeline auto-approve: в auto-режиме задача переходит в done без ручного review.""" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_mode_sets_status_done(self, mock_run, mock_hooks, mock_followup, conn): + """Auto-режим: статус задачи становится 'done', а не 'review'.""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done", "Auto-mode должен auto-approve: status=done" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_mode_fires_task_auto_approved_hook(self, mock_run, mock_hooks, mock_followup, conn): + """В auto-режиме срабатывает хук task_auto_approved.""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find bug"}] + run_pipeline(conn, "VDOL-001", steps) + + events = _get_hook_events(mock_hooks) + assert "task_auto_approved" in events, "Хук task_auto_approved должен сработать" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_sets_status_review(self, mock_run, mock_hooks, mock_followup, conn): + """Регрессия: review-режим НЕ auto-approve — статус остаётся 'review'.""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Проект остаётся в default "review" mode + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review", "Review-mode НЕ должен auto-approve" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_does_not_fire_auto_approved_hook(self, mock_run, mock_hooks, mock_followup, conn): + """Регрессия: в review-режиме хук task_auto_approved НЕ срабатывает.""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + steps = [{"role": "debugger", "brief": "find bug"}] + run_pipeline(conn, "VDOL-001", steps) + + events = _get_hook_events(mock_hooks) + assert "task_auto_approved" not in events + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_task_level_auto_overrides_project_review(self, mock_run, mock_hooks, mock_followup, conn): + """Если у задачи execution_mode=auto, pipeline auto-approve, даже если проект в review.""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Проект в review, но задача — auto + models.update_task(conn, "VDOL-001", execution_mode="auto") + + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done", "Task-level auto должен override project review" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_pipeline_result_includes_mode(self, mock_run, mock_hooks, mock_followup, conn): + """Pipeline result должен содержать поле mode.""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result.get("mode") == "auto" + + +# --------------------------------------------------------------------------- +# test_auto_rerun_on_permission_denied +# --------------------------------------------------------------------------- + +class TestAutoRerunOnPermissionDenied: + """Runner повторяет шаг при permission issues, останавливается по лимиту (1 retry).""" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_mode_retries_on_permission_error(self, mock_run, mock_hooks, mock_followup, conn): + """Auto-режим: при permission denied runner делает 1 retry с allow_write=True.""" + mock_run.side_effect = [ + _mock_permission_denied(), # 1-й вызов: permission error + _mock_success("fixed"), # 2-й вызов (retry): успех + ] + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "fix file"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + assert mock_run.call_count == 2, "Должен быть ровно 1 retry" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_retry_uses_dangerously_skip_permissions(self, mock_run, mock_hooks, mock_followup, conn): + """Retry при permission error использует --dangerously-skip-permissions.""" + mock_run.side_effect = [ + _mock_permission_denied(), + _mock_success("fixed"), + ] + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "fix"}] + run_pipeline(conn, "VDOL-001", steps) + + # Второй вызов (retry) должен содержать --dangerously-skip-permissions + second_cmd = mock_run.call_args_list[1][0][0] + assert "--dangerously-skip-permissions" in second_cmd + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_retry_fires_permission_retry_hook(self, mock_run, mock_hooks, mock_followup, conn): + """При авто-retry срабатывает хук task_permission_retry.""" + mock_run.side_effect = [ + _mock_permission_denied(), + _mock_success(), + ] + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "fix"}] + run_pipeline(conn, "VDOL-001", steps) + + events = _get_hook_events(mock_hooks) + assert "task_permission_retry" in events, "Хук task_permission_retry должен сработать" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_retry_failure_blocks_task(self, mock_run, mock_hooks, mock_followup, conn): + """Если retry тоже провалился → задача blocked (лимит в 1 retry исчерпан).""" + mock_run.side_effect = [ + _mock_permission_denied(), # 1-й: permission error + _mock_failure("still denied"), # retry: снова ошибка + ] + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "fix"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + assert mock_run.call_count == 2, "Стоп после лимита: ровно 1 retry" + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "blocked" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_subsequent_steps_use_allow_write_after_retry(self, mock_run, mock_hooks, mock_followup, conn): + """После успешного retry все следующие шаги тоже используют allow_write.""" + mock_run.side_effect = [ + _mock_permission_denied(), # Шаг 1: permission error + _mock_success("fixed"), # Шаг 1 retry: успех + _mock_success("tested"), # Шаг 2: должен получить allow_write + ] + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [ + {"role": "debugger", "brief": "fix"}, + {"role": "tester", "brief": "test"}, + ] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + assert mock_run.call_count == 3 + + # Третий вызов (шаг 2) должен содержать --dangerously-skip-permissions + third_cmd = mock_run.call_args_list[2][0][0] + assert "--dangerously-skip-permissions" in third_cmd + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_normal_failure_does_not_trigger_retry(self, mock_run, mock_hooks, mock_followup, conn): + """Обычная ошибка (не permission) НЕ вызывает авто-retry даже в auto-режиме.""" + mock_run.return_value = _mock_failure("compilation error: undefined variable") + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "fix"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + assert mock_run.call_count == 1, "Retry не нужен для обычных ошибок" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_does_not_retry_on_permission_error(self, mock_run, mock_hooks, mock_followup, conn): + """В review-режиме при permission denied runner НЕ делает retry.""" + mock_run.return_value = _mock_permission_denied() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Проект в default review mode + steps = [{"role": "debugger", "brief": "fix file"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + assert mock_run.call_count == 1, "В review-режиме retry НЕ должен происходить" + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "blocked" + + +# --------------------------------------------------------------------------- +# test_auto_followup +# --------------------------------------------------------------------------- + +class TestAutoFollowup: + """Followup запускается без ожидания сразу после pipeline в auto-режиме.""" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_followup_triggered_immediately(self, mock_run, mock_hooks, mock_followup, conn): + """В auto-режиме generate_followups вызывается сразу после pipeline.""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + mock_followup.assert_called_once_with(conn, "VDOL-001") + + @patch("core.followup.auto_resolve_pending_actions") + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_followup_resolves_pending_actions( + self, mock_run, mock_hooks, mock_followup, mock_resolve, conn + ): + """Pending actions из followup авто-резолвятся без ожидания.""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + pending = [{"type": "permission_fix", "description": "Fix nginx.conf", + "original_item": {}, "options": ["rerun"]}] + mock_followup.return_value = {"created": [], "pending_actions": pending} + mock_resolve.return_value = [{"resolved": "rerun", "result": {}}] + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find"}] + run_pipeline(conn, "VDOL-001", steps) + + mock_resolve.assert_called_once_with(conn, "VDOL-001", pending) + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_no_auto_followup(self, mock_run, mock_hooks, mock_followup, conn): + """Регрессия: в review-режиме generate_followups НЕ вызывается.""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Проект в default review mode + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + mock_followup.assert_not_called() + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_followup_not_triggered_for_followup_tasks( + self, mock_run, mock_hooks, mock_followup, conn + ): + """Для followup-задач generate_followups НЕ вызывается (защита от рекурсии).""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + models.update_task(conn, "VDOL-001", brief={"source": "followup:VDOL-000"}) + + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + mock_followup.assert_not_called() + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_followup_exception_does_not_block_pipeline( + self, mock_run, mock_hooks, mock_followup, conn + ): + """Ошибка в followup не должна блокировать pipeline (success=True).""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.side_effect = Exception("followup PM crashed") + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True # Pipeline succeeded, followup failure absorbed + + @patch("core.followup.auto_resolve_pending_actions") + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_no_pending_actions_skips_auto_resolve( + self, mock_run, mock_hooks, mock_followup, mock_resolve, conn + ): + """Если pending_actions пустой, auto_resolve_pending_actions НЕ вызывается.""" + mock_run.return_value = _mock_success() + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + mock_resolve.return_value = [] + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find"}] + run_pipeline(conn, "VDOL-001", steps) + + mock_resolve.assert_not_called() + + +# --------------------------------------------------------------------------- +# _is_permission_error unit tests +# --------------------------------------------------------------------------- + +class TestIsPermissionError: + """Unit-тесты для функции _is_permission_error.""" + + def test_detects_permission_denied_in_raw_output(self): + result = {"raw_output": "Error: permission denied writing to nginx.conf", + "returncode": 1} + assert _is_permission_error(result) is True + + def test_detects_read_only_in_output(self): + result = {"raw_output": "File is read-only, cannot write", + "returncode": 1} + assert _is_permission_error(result) is True + + def test_detects_manual_apply_in_output(self): + result = {"raw_output": "Apply manually to /etc/nginx/nginx.conf", + "returncode": 1} + assert _is_permission_error(result) is True + + def test_normal_failure_not_permission_error(self): + result = {"raw_output": "Compilation error: undefined variable x", + "returncode": 1} + assert _is_permission_error(result) is False + + def test_empty_output_not_permission_error(self): + result = {"raw_output": "", "returncode": 1} + assert _is_permission_error(result) is False + + def test_success_with_permission_word_not_flagged(self): + """Если returncode=0 и текст содержит 'permission', это не ошибка.""" + # Функция проверяет только текст, не returncode + # Но с success output вряд ли содержит "permission denied" + result = {"raw_output": "All permissions granted, build successful", + "returncode": 0} + assert _is_permission_error(result) is False diff --git a/tests/test_cli.py b/tests/test_cli.py index f056f6d..a273cd1 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -333,7 +333,8 @@ def test_hook_setup_registers_rebuild_frontend(runner, tmp_path): r = invoke(runner, ["hook", "list", "--project", "p1"]) assert r.exit_code == 0 assert "rebuild-frontend" in r.output - assert "web/frontend/*" in r.output + # KIN-050: trigger_module_path должен быть NULL — хук срабатывает безусловно + assert "web/frontend/*" not in r.output def test_hook_setup_idempotent(runner, tmp_path): @@ -352,3 +353,123 @@ def test_hook_setup_project_not_found(runner): r = invoke(runner, ["hook", "setup", "--project", "nope"]) assert r.exit_code == 1 assert "not found" in r.output + + +# =========================================================================== +# KIN-018 — project set-mode / task update --mode / show with mode labels +# =========================================================================== + +def test_project_set_mode_auto(runner): + """project set-mode auto — обновляет режим, выводит подтверждение.""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + r = invoke(runner, ["project", "set-mode", "--project", "p1", "auto"]) + assert r.exit_code == 0 + assert "auto" in r.output + + +def test_project_set_mode_review(runner): + """project set-mode review — обновляет режим обратно в review.""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["project", "set-mode", "--project", "p1", "auto"]) + r = invoke(runner, ["project", "set-mode", "--project", "p1", "review"]) + assert r.exit_code == 0 + assert "review" in r.output + + +def test_project_set_mode_persisted(runner): + """После project set-mode режим сохраняется в БД и виден в project show.""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["project", "set-mode", "--project", "p1", "auto"]) + + r = invoke(runner, ["project", "show", "p1"]) + assert r.exit_code == 0 + assert "auto" in r.output + + +def test_project_set_mode_not_found(runner): + """project set-mode для несуществующего проекта → exit code 1.""" + r = invoke(runner, ["project", "set-mode", "--project", "nope", "auto"]) + assert r.exit_code == 1 + assert "not found" in r.output + + +def test_project_set_mode_invalid(runner): + """project set-mode с недопустимым значением → ошибка click.""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + r = invoke(runner, ["project", "set-mode", "--project", "p1", "turbo"]) + assert r.exit_code != 0 + + +def test_project_show_displays_mode(runner): + """project show отображает строку Mode: ...""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + r = invoke(runner, ["project", "show", "p1"]) + assert r.exit_code == 0 + assert "Mode:" in r.output + + +def test_task_update_mode_auto(runner): + """task update --mode auto задаёт execution_mode на задачу.""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["task", "add", "p1", "Fix bug"]) + r = invoke(runner, ["task", "update", "P1-001", "--mode", "auto"]) + assert r.exit_code == 0 + assert "auto" in r.output + + +def test_task_update_mode_review(runner): + """task update --mode review задаёт execution_mode=review на задачу.""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["task", "add", "p1", "Fix bug"]) + r = invoke(runner, ["task", "update", "P1-001", "--mode", "review"]) + assert r.exit_code == 0 + assert "review" in r.output + + +def test_task_update_mode_persisted(runner): + """После task update --mode режим сохраняется и виден в task show как (overridden).""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["task", "add", "p1", "Fix bug"]) + invoke(runner, ["task", "update", "P1-001", "--mode", "auto"]) + + r = invoke(runner, ["task", "show", "P1-001"]) + assert r.exit_code == 0 + assert "overridden" in r.output + + +def test_task_update_mode_invalid(runner): + """task update --mode с недопустимым значением → ошибка click.""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["task", "add", "p1", "Fix bug"]) + r = invoke(runner, ["task", "update", "P1-001", "--mode", "turbo"]) + assert r.exit_code != 0 + + +def test_task_show_mode_inherited(runner): + """task show без явного execution_mode показывает (inherited).""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["task", "add", "p1", "Fix bug"]) + r = invoke(runner, ["task", "show", "P1-001"]) + assert r.exit_code == 0 + assert "inherited" in r.output + + +def test_task_show_mode_overridden(runner): + """task show с task-level execution_mode показывает (overridden).""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["task", "add", "p1", "Fix bug"]) + invoke(runner, ["task", "update", "P1-001", "--mode", "review"]) + r = invoke(runner, ["task", "show", "P1-001"]) + assert r.exit_code == 0 + assert "overridden" in r.output + + +def test_task_show_mode_label_reflects_project_mode(runner): + """Если у проекта auto, у задачи нет mode — task show показывает 'auto (inherited)'.""" + invoke(runner, ["project", "add", "p1", "P1", "/p1"]) + invoke(runner, ["project", "set-mode", "--project", "p1", "auto"]) + invoke(runner, ["task", "add", "p1", "Fix bug"]) + r = invoke(runner, ["task", "show", "P1-001"]) + assert r.exit_code == 0 + assert "auto" in r.output + assert "inherited" in r.output diff --git a/tests/test_followup.py b/tests/test_followup.py index 9bf13c7..ec10d33 100644 --- a/tests/test_followup.py +++ b/tests/test_followup.py @@ -7,7 +7,7 @@ from unittest.mock import patch, MagicMock from core.db import init_db from core import models from core.followup import ( - generate_followups, resolve_pending_action, + generate_followups, resolve_pending_action, auto_resolve_pending_actions, _collect_pipeline_output, _next_task_id, _is_permission_blocked, ) @@ -222,3 +222,48 @@ class TestResolvePendingAction: def test_nonexistent_task(self, conn): action = {"type": "permission_fix", "original_item": {}} assert resolve_pending_action(conn, "NOPE", action, "skip") is None + + +class TestAutoResolvePendingActions: + @patch("agents.runner._run_claude") + def test_rerun_success_resolves_as_rerun(self, mock_claude, conn): + """Успешный rerun должен резолвиться как 'rerun'.""" + mock_claude.return_value = { + "output": json.dumps({"result": "fixed"}), + "returncode": 0, + } + action = { + "type": "permission_fix", + "description": "Fix X", + "original_item": {"title": "Fix X", "type": "frontend_dev", "brief": "Apply fix"}, + "options": ["rerun", "manual_task", "skip"], + } + results = auto_resolve_pending_actions(conn, "VDOL-001", [action]) + + assert len(results) == 1 + assert results[0]["resolved"] == "rerun" + + @patch("agents.runner._run_claude") + def test_rerun_failure_escalates_to_manual_task(self, mock_claude, conn): + """Провал rerun должен создавать manual_task для эскалации.""" + mock_claude.return_value = {"output": "", "returncode": 1} + action = { + "type": "permission_fix", + "description": "Fix X", + "original_item": {"title": "Fix X", "type": "frontend_dev", "brief": "Apply fix"}, + "options": ["rerun", "manual_task", "skip"], + } + results = auto_resolve_pending_actions(conn, "VDOL-001", [action]) + + assert len(results) == 1 + assert results[0]["resolved"] == "manual_task" + # Manual task должна быть создана в DB + tasks = models.list_tasks(conn, project_id="vdol") + assert len(tasks) == 2 # VDOL-001 + новая manual task + + @patch("agents.runner._run_claude") + def test_empty_pending_actions(self, mock_claude, conn): + """Пустой список — пустой результат.""" + results = auto_resolve_pending_actions(conn, "VDOL-001", []) + assert results == [] + mock_claude.assert_not_called() diff --git a/tests/test_hooks.py b/tests/test_hooks.py index 2778ee0..4a9d554 100644 --- a/tests/test_hooks.py +++ b/tests/test_hooks.py @@ -8,7 +8,7 @@ from core.db import init_db from core import models from core.hooks import ( create_hook, get_hooks, update_hook, delete_hook, - run_hooks, get_hook_logs, HookResult, + run_hooks, get_hook_logs, HookResult, _substitute_vars, ) @@ -273,3 +273,298 @@ class TestGetHookLogs: event="pipeline_completed", task_modules=modules) logs = get_hook_logs(conn, project_id="vdol", limit=3) assert len(logs) == 3 + + +# --------------------------------------------------------------------------- +# Variable substitution in hook commands +# --------------------------------------------------------------------------- + +class TestSubstituteVars: + def test_substitutes_task_id_and_title(self, conn): + result = _substitute_vars( + 'git commit -m "kin: {task_id} {title}"', + "VDOL-001", + conn, + ) + assert result == 'git commit -m "kin: VDOL-001 Fix bug"' + + def test_no_substitution_when_task_id_is_none(self, conn): + cmd = 'git commit -m "kin: {task_id} {title}"' + result = _substitute_vars(cmd, None, conn) + assert result == cmd + + def test_sanitizes_double_quotes_in_title(self, conn): + conn.execute('UPDATE tasks SET title = ? WHERE id = ?', + ('Fix "bug" here', "VDOL-001")) + conn.commit() + result = _substitute_vars( + 'git commit -m "kin: {task_id} {title}"', + "VDOL-001", + conn, + ) + assert '"' not in result.split('"kin:')[1].split('"')[0] + assert "Fix 'bug' here" in result + + def test_sanitizes_newlines_in_title(self, conn): + conn.execute('UPDATE tasks SET title = ? WHERE id = ?', + ("Fix\nbug\r\nhere", "VDOL-001")) + conn.commit() + result = _substitute_vars("{title}", "VDOL-001", conn) + assert "\n" not in result + assert "\r" not in result + + def test_unknown_task_id_uses_empty_title(self, conn): + result = _substitute_vars("{task_id} {title}", "NONEXISTENT", conn) + assert result == "NONEXISTENT " + + def test_no_placeholders_returns_command_unchanged(self, conn): + cmd = "npm run build" + result = _substitute_vars(cmd, "VDOL-001", conn) + assert result == cmd + + @patch("core.hooks.subprocess.run") + def test_autocommit_hook_command_substituted(self, mock_run, conn): + """auto-commit hook должен получать реальные task_id и title в команде.""" + mock_run.return_value = MagicMock(returncode=0, stdout="ok", stderr="") + create_hook(conn, "vdol", "auto-commit", "task_done", + 'git add -A && git commit -m "kin: {task_id} {title}"', + working_dir="/tmp") + run_hooks(conn, "vdol", "VDOL-001", event="task_done", task_modules=[]) + call_kwargs = mock_run.call_args[1] + # shell=True: command is the first positional arg + command = mock_run.call_args[0][0] + assert "VDOL-001" in command + assert "Fix bug" in command + + +# --------------------------------------------------------------------------- +# KIN-050: rebuild-frontend hook — unconditional firing after pipeline +# --------------------------------------------------------------------------- + +class TestRebuildFrontendHookSetup: + """Regression tests for KIN-050. + + Баг: rebuild-frontend не срабатывал, если pipeline не трогал web/frontend/*. + Фикс: убран trigger_module_path из hook_setup — хук должен срабатывать всегда. + """ + + def test_rebuild_frontend_created_without_trigger_module_path(self, conn): + """rebuild-frontend hook должен быть создан без trigger_module_path (KIN-050). + + Воспроизводит логику hook_setup: создаём хук без фильтра и убеждаемся, + что он сохраняется в БД с trigger_module_path=NULL. + """ + hook = create_hook( + conn, "vdol", + name="rebuild-frontend", + event="pipeline_completed", + command="scripts/rebuild-frontend.sh", + trigger_module_path=None, # фикс KIN-050: без фильтра + working_dir="/tmp", + timeout_seconds=300, + ) + + assert hook["trigger_module_path"] is None, ( + "trigger_module_path должен быть NULL — хук не должен фильтровать по модулям" + ) + + # Перечитываем из БД — убеждаемся, что NULL сохранился + hooks = get_hooks(conn, "vdol", enabled_only=False) + rebuild = next((h for h in hooks if h["name"] == "rebuild-frontend"), None) + assert rebuild is not None + assert rebuild["trigger_module_path"] is None + + @patch("core.hooks.subprocess.run") + def test_rebuild_frontend_fires_when_only_backend_modules_changed(self, mock_run, conn): + """Хук без trigger_module_path должен срабатывать при изменении backend-файлов. + + Регрессия KIN-050: раньше хук молчал, если не было web/frontend/* файлов. + """ + mock_run.return_value = MagicMock(returncode=0, stdout="built!", stderr="") + create_hook( + conn, "vdol", "rebuild-frontend", "pipeline_completed", + "npm run build", + trigger_module_path=None, # фикс: нет фильтра + working_dir="/tmp", + ) + + backend_modules = [ + {"path": "core/models.py", "name": "models"}, + {"path": "web/api.py", "name": "api"}, + ] + results = run_hooks(conn, "vdol", "VDOL-001", + event="pipeline_completed", task_modules=backend_modules) + + assert len(results) == 1, "Хук должен сработать несмотря на отсутствие frontend-файлов" + assert results[0].name == "rebuild-frontend" + assert results[0].success is True + mock_run.assert_called_once() + + @patch("core.hooks.subprocess.run") + def test_rebuild_frontend_fires_exactly_once_per_pipeline(self, mock_run, conn): + """Хук rebuild-frontend должен срабатывать ровно один раз за pipeline_completed.""" + mock_run.return_value = MagicMock(returncode=0, stdout="ok", stderr="") + create_hook( + conn, "vdol", "rebuild-frontend", "pipeline_completed", + "npm run build", + trigger_module_path=None, + working_dir="/tmp", + ) + + any_modules = [ + {"path": "core/hooks.py", "name": "hooks"}, + {"path": "web/frontend/App.vue", "name": "App"}, + {"path": "web/api.py", "name": "api"}, + ] + results = run_hooks(conn, "vdol", "VDOL-001", + event="pipeline_completed", task_modules=any_modules) + + assert len(results) == 1, "Хук должен выполниться ровно один раз" + mock_run.assert_called_once() + + @patch("core.hooks.subprocess.run") + def test_rebuild_frontend_fires_with_empty_module_list(self, mock_run, conn): + """Хук без trigger_module_path должен срабатывать даже с пустым списком модулей.""" + mock_run.return_value = MagicMock(returncode=0, stdout="ok", stderr="") + create_hook( + conn, "vdol", "rebuild-frontend", "pipeline_completed", + "npm run build", + trigger_module_path=None, + working_dir="/tmp", + ) + + results = run_hooks(conn, "vdol", "VDOL-001", + event="pipeline_completed", task_modules=[]) + + assert len(results) == 1 + assert results[0].name == "rebuild-frontend" + mock_run.assert_called_once() + + @patch("core.hooks.subprocess.run") + def test_rebuild_frontend_with_module_path_skips_non_frontend(self, mock_run, conn): + """Контрольный тест: хук С trigger_module_path НЕ срабатывает на backend-файлы. + + Подтверждает, что фикс (удаление trigger_module_path) был необходим. + """ + mock_run.return_value = MagicMock(returncode=0, stdout="ok", stderr="") + create_hook( + conn, "vdol", "rebuild-frontend-filtered", "pipeline_completed", + "npm run build", + trigger_module_path="web/frontend/*", # старое (сломанное) поведение + working_dir="/tmp", + ) + + backend_modules = [{"path": "core/models.py", "name": "models"}] + results = run_hooks(conn, "vdol", "VDOL-001", + event="pipeline_completed", task_modules=backend_modules) + + assert len(results) == 0, ( + "Хук с trigger_module_path НЕ должен срабатывать на backend-файлы — " + "именно это было первопричиной бага KIN-050" + ) + + +# --------------------------------------------------------------------------- +# KIN-052: rebuild-frontend hook — команда cd+&& и персистентность в БД +# --------------------------------------------------------------------------- + +class TestKIN052RebuildFrontendCommand: + """Регрессионные тесты для KIN-052. + + Хук rebuild-frontend использует команду вида: + cd /path/to/frontend && npm run build + — то есть цепочку shell-команд без working_dir. + Тесты проверяют, что такая форма работает корректно и хук переживает + пересоздание соединения с БД (симуляция рестарта). + """ + + @patch("core.hooks.subprocess.run") + def test_cd_chained_command_passes_as_string_to_shell(self, mock_run, conn): + """Команда с && должна передаваться в subprocess как строка (не список) с shell=True. + + Если передать список ['cd', '/path', '&&', 'npm', 'run', 'build'] с shell=True, + shell проигнорирует аргументы после первого. Строковая форма обязательна. + """ + mock_run.return_value = MagicMock(returncode=0, stdout="built!", stderr="") + cmd = "cd /Users/grosfrumos/projects/kin/web/frontend && npm run build" + create_hook(conn, "vdol", "rebuild-frontend", "pipeline_completed", cmd, + trigger_module_path=None, working_dir=None) + + run_hooks(conn, "vdol", "VDOL-001", event="pipeline_completed", task_modules=[]) + + call_args = mock_run.call_args + passed_cmd = call_args[0][0] + assert isinstance(passed_cmd, str), ( + "Команда с && должна передаваться как строка, иначе shell не раскроет &&" + ) + assert "&&" in passed_cmd + assert call_args[1].get("shell") is True + + @patch("core.hooks.subprocess.run") + def test_cd_command_without_working_dir_uses_cwd_none(self, mock_run, conn): + """Хук с cd-командой и working_dir=None должен вызывать subprocess с cwd=None. + + Директория смены задаётся через cd в самой команде, а не через cwd. + """ + mock_run.return_value = MagicMock(returncode=0, stdout="ok", stderr="") + cmd = "cd /Users/grosfrumos/projects/kin/web/frontend && npm run build" + create_hook(conn, "vdol", "rebuild-frontend", "pipeline_completed", cmd, + trigger_module_path=None, working_dir=None) + + run_hooks(conn, "vdol", "VDOL-001", event="pipeline_completed", task_modules=[]) + + cwd = mock_run.call_args[1].get("cwd") + assert cwd is None, ( + f"cwd должен быть None когда working_dir не задан, получили: {cwd!r}" + ) + + @patch("core.hooks.subprocess.run") + def test_cd_command_exits_zero_returns_success(self, mock_run, conn): + """Хук с cd+npm run build при returncode=0 должен вернуть success=True.""" + mock_run.return_value = MagicMock(returncode=0, stdout="✓ build complete", stderr="") + cmd = "cd /Users/grosfrumos/projects/kin/web/frontend && npm run build" + create_hook(conn, "vdol", "rebuild-frontend", "pipeline_completed", cmd, + trigger_module_path=None) + + results = run_hooks(conn, "vdol", "VDOL-001", + event="pipeline_completed", task_modules=[]) + + assert len(results) == 1 + assert results[0].success is True + assert results[0].name == "rebuild-frontend" + + @patch("core.hooks.subprocess.run") + def test_hook_persists_after_db_reconnect(self, mock_run): + """Хук должен сохраняться в файловой БД и быть доступен после пересоздания соединения. + + Симулирует рестарт: создаём хук, закрываем соединение, открываем новое — хук на месте. + """ + import tempfile + import os + from core.db import init_db + + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + # Первое соединение — создаём проект и хук + conn1 = init_db(db_path) + from core import models as _models + _models.create_project(conn1, "kin", "Kin", "/projects/kin", tech_stack=["vue3"]) + cmd = "cd /Users/grosfrumos/projects/kin/web/frontend && npm run build" + hook = create_hook(conn1, "kin", "rebuild-frontend", "pipeline_completed", cmd, + trigger_module_path=None) + hook_id = hook["id"] + conn1.close() + + # Второе соединение — «рестарт», хук должен быть на месте + conn2 = init_db(db_path) + hooks = get_hooks(conn2, "kin", event="pipeline_completed", enabled_only=True) + conn2.close() + + assert len(hooks) == 1, "После пересоздания соединения хук должен оставаться в БД" + assert hooks[0]["id"] == hook_id + assert hooks[0]["name"] == "rebuild-frontend" + assert hooks[0]["command"] == cmd + assert hooks[0]["trigger_module_path"] is None + finally: + os.unlink(db_path) diff --git a/tests/test_runner.py b/tests/test_runner.py index e05da75..bd7ac9b 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -289,6 +289,149 @@ class TestRunPipeline: assert result["success"] is True +# --------------------------------------------------------------------------- +# Auto mode +# --------------------------------------------------------------------------- + +class TestAutoMode: + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_mode_generates_followups(self, mock_run, mock_hooks, mock_followup, conn): + """Auto mode должен вызывать generate_followups после task_auto_approved.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + mock_followup.assert_called_once_with(conn, "VDOL-001") + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_skips_followups(self, mock_run, mock_hooks, mock_followup, conn): + """Review mode НЕ должен вызывать generate_followups автоматически.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Проект остаётся в default "review" mode + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + mock_followup.assert_not_called() + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_mode_skips_followups_for_followup_tasks(self, mock_run, mock_hooks, mock_followup, conn): + """Auto mode НЕ должен генерировать followups для followup-задач (предотвращение рекурсии).""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + models.update_task(conn, "VDOL-001", brief={"source": "followup:VDOL-000"}) + + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + mock_followup.assert_not_called() + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_mode_fires_task_done_event(self, mock_run, mock_hooks, mock_followup, conn): + """Auto mode должен вызывать run_hooks с event='task_done' после task_auto_approved.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + events_fired = [call[1].get("event") or call[0][3] + for call in mock_hooks.call_args_list] + assert "task_done" in events_fired + + @patch("core.followup.auto_resolve_pending_actions") + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_mode_resolves_pending_actions(self, mock_run, mock_hooks, mock_followup, mock_resolve, conn): + """Auto mode должен авто-резолвить pending_actions из followup generation.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + + pending = [{"type": "permission_fix", "description": "Fix X", + "original_item": {}, "options": ["rerun"]}] + mock_followup.return_value = {"created": [], "pending_actions": pending} + mock_resolve.return_value = [{"resolved": "rerun", "result": {}}] + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + mock_resolve.assert_called_once_with(conn, "VDOL-001", pending) + + +# --------------------------------------------------------------------------- +# Retry on permission error +# --------------------------------------------------------------------------- + +class TestRetryOnPermissionError: + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_retry_on_permission_error_auto_mode(self, mock_run, mock_hooks, mock_followup, conn): + """Auto mode: retry при permission error должен срабатывать.""" + permission_fail = _mock_claude_failure("permission denied: cannot write file") + retry_success = _mock_claude_success({"result": "fixed"}) + + mock_run.side_effect = [permission_fail, retry_success] + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto") + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + assert mock_run.call_count == 2 + # Second call must include --dangerously-skip-permissions + second_cmd = mock_run.call_args_list[1][0][0] + assert "--dangerously-skip-permissions" in second_cmd + + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_does_not_retry_on_permission_error(self, mock_run, mock_hooks, conn): + """Review mode: retry при permission error НЕ должен срабатывать.""" + permission_fail = _mock_claude_failure("permission denied: cannot write file") + + mock_run.return_value = permission_fail + mock_hooks.return_value = [] + + # Проект остаётся в default "review" mode + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + assert mock_run.call_count == 1 + + # --------------------------------------------------------------------------- # JSON parsing # --------------------------------------------------------------------------- @@ -336,20 +479,22 @@ class TestNonInteractive: call_kwargs = mock_run.call_args[1] assert call_kwargs.get("timeout") == 300 + @patch.dict("os.environ", {"KIN_NONINTERACTIVE": ""}) @patch("agents.runner.subprocess.run") def test_interactive_uses_600s_timeout(self, mock_run, conn): mock_run.return_value = _mock_claude_success({"result": "ok"}) run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False) call_kwargs = mock_run.call_args[1] - assert call_kwargs.get("timeout") == 300 + assert call_kwargs.get("timeout") == 600 + @patch.dict("os.environ", {"KIN_NONINTERACTIVE": ""}) @patch("agents.runner.subprocess.run") def test_interactive_no_stdin_override(self, mock_run, conn): """In interactive mode, stdin should not be set to DEVNULL.""" mock_run.return_value = _mock_claude_success({"result": "ok"}) run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False) call_kwargs = mock_run.call_args[1] - assert call_kwargs.get("stdin") == subprocess.DEVNULL + assert call_kwargs.get("stdin") is None @patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1"}) @patch("agents.runner.subprocess.run") @@ -501,3 +646,108 @@ class TestRunAudit: cmd = mock_run.call_args[0][0] assert "--dangerously-skip-permissions" in cmd + + +# --------------------------------------------------------------------------- +# KIN-019: Silent FAILED diagnostics (regression tests) +# --------------------------------------------------------------------------- + +class TestSilentFailedDiagnostics: + """Regression: агент падает без вывода — runner должен сохранять диагностику в БД.""" + + @patch("agents.runner.subprocess.run") + def test_agent_empty_stdout_saves_stderr_as_error_message_in_db(self, mock_run, conn): + """Когда stdout пустой и returncode != 0, stderr должен сохраняться как error_message в agent_logs.""" + mock = MagicMock() + mock.stdout = "" + mock.stderr = "API rate limit exceeded (429)" + mock.returncode = 1 + mock_run.return_value = mock + + run_agent(conn, "debugger", "VDOL-001", "vdol") + + log = conn.execute( + "SELECT error_message FROM agent_logs WHERE task_id='VDOL-001'" + ).fetchone() + assert log is not None + assert log["error_message"] is not None + assert "rate limit" in log["error_message"] + + @patch("agents.runner.subprocess.run") + def test_agent_empty_stdout_returns_error_key_with_stderr(self, mock_run, conn): + """run_agent должен вернуть ключ 'error' с содержимым stderr при пустом stdout и ненулевом returncode.""" + mock = MagicMock() + mock.stdout = "" + mock.stderr = "Permission denied: cannot write to /etc/hosts" + mock.returncode = 1 + mock_run.return_value = mock + + result = run_agent(conn, "debugger", "VDOL-001", "vdol") + + assert result["success"] is False + assert "error" in result + assert result["error"] is not None + assert "Permission denied" in result["error"] + + @patch("agents.runner.subprocess.run") + def test_pipeline_error_message_includes_agent_stderr(self, mock_run, conn): + """Сообщение об ошибке pipeline должно включать stderr агента, а не только generic 'step failed'.""" + mock = MagicMock() + mock.stdout = "" + mock.stderr = "Internal server error: unexpected EOF" + mock.returncode = 1 + mock_run.return_value = mock + + steps = [{"role": "tester", "brief": "run tests"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + assert "Internal server error" in result["error"] or "unexpected EOF" in result["error"] + + @patch("agents.runner.build_context") + def test_pipeline_exception_in_run_agent_marks_task_blocked(self, mock_ctx, conn): + """Исключение внутри run_agent (например, из build_context) должно ставить задачу в blocked.""" + mock_ctx.side_effect = RuntimeError("DB connection lost") + + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "blocked" + + @patch("agents.runner.build_context") + def test_pipeline_exception_logs_to_agent_logs(self, mock_ctx, conn): + """Исключение в run_agent должно быть залогировано в agent_logs с success=False.""" + mock_ctx.side_effect = ValueError("bad context data") + + steps = [{"role": "tester", "brief": "test"}] + run_pipeline(conn, "VDOL-001", steps) + + logs = conn.execute( + "SELECT * FROM agent_logs WHERE task_id='VDOL-001' AND success=0" + ).fetchall() + assert len(logs) >= 1 + + @patch("agents.runner.build_context") + def test_pipeline_exception_marks_pipeline_failed_in_db(self, mock_ctx, conn): + """При исключении запись pipeline должна существовать в БД и иметь статус failed.""" + mock_ctx.side_effect = RuntimeError("network timeout") + + steps = [{"role": "debugger", "brief": "find"}] + run_pipeline(conn, "VDOL-001", steps) + + pipe = conn.execute("SELECT * FROM pipelines WHERE task_id='VDOL-001'").fetchone() + assert pipe is not None + assert pipe["status"] == "failed" + + @patch("agents.runner.subprocess.run") + def test_agent_success_has_no_error_key_populated(self, mock_run, conn): + """При успешном запуске агента ключ 'error' в результате должен быть None (нет ложных срабатываний).""" + mock_run.return_value = _mock_claude_success({"result": "all good"}) + + result = run_agent(conn, "debugger", "VDOL-001", "vdol") + + assert result["success"] is True + assert result.get("error") is None diff --git a/tests/test_tech_researcher.py b/tests/test_tech_researcher.py new file mode 100644 index 0000000..7dd5f8d --- /dev/null +++ b/tests/test_tech_researcher.py @@ -0,0 +1,195 @@ +"""Tests for KIN-037: tech_researcher specialist — YAML validation and prompt structure.""" + +from pathlib import Path + +import yaml +import pytest + +SPECIALISTS_YAML = Path(__file__).parent.parent / "agents" / "specialists.yaml" +PROMPTS_DIR = Path(__file__).parent.parent / "agents" / "prompts" +TECH_RESEARCHER_PROMPT = PROMPTS_DIR / "tech_researcher.md" + +REQUIRED_SPECIALIST_FIELDS = {"name", "model", "tools", "description", "permissions"} +REQUIRED_OUTPUT_SCHEMA_FIELDS = { + "status", "api_overview", "endpoints", "rate_limits", "auth_method", + "data_schemas", "limitations", "gotchas", "codebase_diff", "notes", +} + + +@pytest.fixture(scope="module") +def spec(): + """Load and parse specialists.yaml once for all tests.""" + return yaml.safe_load(SPECIALISTS_YAML.read_text()) + + +@pytest.fixture(scope="module") +def tech_researcher(spec): + return spec["specialists"]["tech_researcher"] + + +@pytest.fixture(scope="module") +def prompt_text(): + return TECH_RESEARCHER_PROMPT.read_text() + + +# --------------------------------------------------------------------------- +# YAML validity +# --------------------------------------------------------------------------- + +class TestSpecialistsYaml: + def test_yaml_parses_without_error(self): + content = SPECIALISTS_YAML.read_text() + parsed = yaml.safe_load(content) + assert parsed is not None + + def test_yaml_has_specialists_key(self, spec): + assert "specialists" in spec + + def test_yaml_has_routes_key(self, spec): + assert "routes" in spec + + +# --------------------------------------------------------------------------- +# tech_researcher entry structure +# --------------------------------------------------------------------------- + +class TestTechResearcherEntry: + def test_tech_researcher_exists_in_specialists(self, spec): + assert "tech_researcher" in spec["specialists"] + + def test_tech_researcher_has_required_fields(self, tech_researcher): + missing = REQUIRED_SPECIALIST_FIELDS - set(tech_researcher.keys()) + assert not missing, f"Missing fields: {missing}" + + def test_tech_researcher_name_is_string(self, tech_researcher): + assert isinstance(tech_researcher["name"], str) + assert tech_researcher["name"].strip() + + def test_tech_researcher_model_is_sonnet(self, tech_researcher): + assert tech_researcher["model"] == "sonnet" + + def test_tech_researcher_tools_is_list(self, tech_researcher): + assert isinstance(tech_researcher["tools"], list) + assert len(tech_researcher["tools"]) > 0 + + def test_tech_researcher_tools_include_webfetch(self, tech_researcher): + assert "WebFetch" in tech_researcher["tools"] + + def test_tech_researcher_tools_include_read_grep_glob(self, tech_researcher): + for tool in ("Read", "Grep", "Glob"): + assert tool in tech_researcher["tools"], f"Missing tool: {tool}" + + def test_tech_researcher_permissions_is_read_only(self, tech_researcher): + assert tech_researcher["permissions"] == "read_only" + + def test_tech_researcher_description_is_non_empty_string(self, tech_researcher): + assert isinstance(tech_researcher["description"], str) + assert len(tech_researcher["description"]) > 10 + + def test_tech_researcher_has_output_schema(self, tech_researcher): + assert "output_schema" in tech_researcher + + def test_tech_researcher_output_schema_has_required_fields(self, tech_researcher): + schema = tech_researcher["output_schema"] + missing = REQUIRED_OUTPUT_SCHEMA_FIELDS - set(schema.keys()) + assert not missing, f"Missing output_schema fields: {missing}" + + def test_tech_researcher_context_rules_decisions_is_list(self, tech_researcher): + decisions = tech_researcher.get("context_rules", {}).get("decisions") + assert isinstance(decisions, list) + + def test_tech_researcher_context_rules_includes_gotcha(self, tech_researcher): + decisions = tech_researcher.get("context_rules", {}).get("decisions", []) + assert "gotcha" in decisions + + +# --------------------------------------------------------------------------- +# api_research route +# --------------------------------------------------------------------------- + +class TestApiResearchRoute: + def test_api_research_route_exists(self, spec): + assert "api_research" in spec["routes"] + + def test_api_research_route_has_steps(self, spec): + route = spec["routes"]["api_research"] + assert "steps" in route + assert isinstance(route["steps"], list) + assert len(route["steps"]) >= 1 + + def test_api_research_route_starts_with_tech_researcher(self, spec): + steps = spec["routes"]["api_research"]["steps"] + assert steps[0] == "tech_researcher" + + def test_api_research_route_includes_architect(self, spec): + steps = spec["routes"]["api_research"]["steps"] + assert "architect" in steps + + def test_api_research_route_has_description(self, spec): + route = spec["routes"]["api_research"] + assert "description" in route + assert isinstance(route["description"], str) + + +# --------------------------------------------------------------------------- +# Prompt file existence +# --------------------------------------------------------------------------- + +class TestTechResearcherPromptFile: + def test_prompt_file_exists(self): + assert TECH_RESEARCHER_PROMPT.exists(), ( + f"Prompt file not found: {TECH_RESEARCHER_PROMPT}" + ) + + def test_prompt_file_is_not_empty(self, prompt_text): + assert len(prompt_text.strip()) > 100 + + +# --------------------------------------------------------------------------- +# Prompt content — structured review instructions +# --------------------------------------------------------------------------- + +class TestTechResearcherPromptContent: + def test_prompt_contains_json_output_instruction(self, prompt_text): + assert "JSON" in prompt_text or "json" in prompt_text + + def test_prompt_defines_status_field(self, prompt_text): + assert '"status"' in prompt_text + + def test_prompt_defines_done_partial_blocked_statuses(self, prompt_text): + assert "done" in prompt_text + assert "partial" in prompt_text + assert "blocked" in prompt_text + + def test_prompt_defines_api_overview_field(self, prompt_text): + assert "api_overview" in prompt_text + + def test_prompt_defines_endpoints_field(self, prompt_text): + assert "endpoints" in prompt_text + + def test_prompt_defines_rate_limits_field(self, prompt_text): + assert "rate_limits" in prompt_text + + def test_prompt_defines_codebase_diff_field(self, prompt_text): + assert "codebase_diff" in prompt_text + + def test_prompt_defines_gotchas_field(self, prompt_text): + assert "gotchas" in prompt_text + + def test_prompt_contains_webfetch_instruction(self, prompt_text): + assert "WebFetch" in prompt_text + + def test_prompt_mentions_no_secrets_logging(self, prompt_text): + """Prompt must instruct agent not to log secret values.""" + lower = prompt_text.lower() + assert "secret" in lower or "credential" in lower or "token" in lower + + def test_prompt_specifies_readonly_bash(self, prompt_text): + """Bash must be restricted to read-only operations per rules.""" + assert "read-only" in prompt_text or "read only" in prompt_text or "GET" in prompt_text + + def test_prompt_defines_partial_reason_for_partial_status(self, prompt_text): + assert "partial_reason" in prompt_text + + def test_prompt_defines_blocked_reason_for_blocked_status(self, prompt_text): + assert "blocked_reason" in prompt_text diff --git a/web/api.py b/web/api.py index 52ebbe2..367063c 100644 --- a/web/api.py +++ b/web/api.py @@ -76,6 +76,25 @@ class ProjectCreate(BaseModel): priority: int = 5 +class ProjectPatch(BaseModel): + execution_mode: str + + +@app.patch("/api/projects/{project_id}") +def patch_project(project_id: str, body: ProjectPatch): + if body.execution_mode not in VALID_EXECUTION_MODES: + raise HTTPException(400, f"Invalid execution_mode '{body.execution_mode}'. Must be one of: {', '.join(VALID_EXECUTION_MODES)}") + conn = get_conn() + p = models.get_project(conn, project_id) + if not p: + conn.close() + raise HTTPException(404, f"Project '{project_id}' not found") + models.update_project(conn, project_id, execution_mode=body.execution_mode) + p = models.get_project(conn, project_id) + conn.close() + return p + + @app.post("/api/projects") def create_project(body: ProjectCreate): conn = get_conn() @@ -138,22 +157,33 @@ def create_task(body: TaskCreate): class TaskPatch(BaseModel): - status: str + status: str | None = None + execution_mode: str | None = None -VALID_STATUSES = {"pending", "in_progress", "review", "done", "blocked", "cancelled"} +VALID_STATUSES = set(models.VALID_TASK_STATUSES) +VALID_EXECUTION_MODES = {"auto", "review"} @app.patch("/api/tasks/{task_id}") def patch_task(task_id: str, body: TaskPatch): - if body.status not in VALID_STATUSES: + if body.status is not None and body.status not in VALID_STATUSES: raise HTTPException(400, f"Invalid status '{body.status}'. Must be one of: {', '.join(VALID_STATUSES)}") + if body.execution_mode is not None and body.execution_mode not in VALID_EXECUTION_MODES: + raise HTTPException(400, f"Invalid execution_mode '{body.execution_mode}'. Must be one of: {', '.join(VALID_EXECUTION_MODES)}") + if body.status is None and body.execution_mode is None: + raise HTTPException(400, "Nothing to update. Provide status or execution_mode.") conn = get_conn() t = models.get_task(conn, task_id) if not t: conn.close() raise HTTPException(404, f"Task '{task_id}' not found") - models.update_task(conn, task_id, status=body.status) + fields = {} + if body.status is not None: + fields["status"] = body.status + if body.execution_mode is not None: + fields["execution_mode"] = body.execution_mode + models.update_task(conn, task_id, **fields) t = models.get_task(conn, task_id) conn.close() return t @@ -218,6 +248,13 @@ def approve_task(task_id: str, body: TaskApprove | None = None): conn.close() raise HTTPException(404, f"Task '{task_id}' not found") models.update_task(conn, task_id, status="done") + try: + from core.hooks import run_hooks as _run_hooks + task_modules = models.get_modules(conn, t["project_id"]) + _run_hooks(conn, t["project_id"], task_id, + event="task_done", task_modules=task_modules) + except Exception: + pass decision = None if body and body.decision_title: decision = models.add_decision( @@ -298,12 +335,8 @@ def is_task_running(task_id: str): return {"running": False} -class TaskRun(BaseModel): - allow_write: bool = False - - @app.post("/api/tasks/{task_id}/run") -def run_task(task_id: str, body: TaskRun | None = None): +def run_task(task_id: str): """Launch pipeline for a task in background. Returns 202.""" conn = get_conn() t = models.get_task(conn, task_id) @@ -317,8 +350,7 @@ def run_task(task_id: str, body: TaskRun | None = None): kin_root = Path(__file__).parent.parent cmd = [sys.executable, "-m", "cli.main", "--db", str(DB_PATH), "run", task_id] - if body and body.allow_write: - cmd.append("--allow-write") + cmd.append("--allow-write") # always required: subprocess runs non-interactively (stdin=DEVNULL) import os env = os.environ.copy() @@ -383,6 +415,18 @@ def create_decision(body: DecisionCreate): return d +@app.delete("/api/projects/{project_id}/decisions/{decision_id}") +def delete_decision(project_id: str, decision_id: int): + conn = get_conn() + decision = models.get_decision(conn, decision_id) + if not decision or decision["project_id"] != project_id: + conn.close() + raise HTTPException(404, f"Decision #{decision_id} not found") + models.delete_decision(conn, decision_id) + conn.close() + return {"deleted": decision_id} + + # --------------------------------------------------------------------------- # Cost # --------------------------------------------------------------------------- diff --git a/web/frontend/package-lock.json b/web/frontend/package-lock.json index 06f1b0a..875eeaa 100644 --- a/web/frontend/package-lock.json +++ b/web/frontend/package-lock.json @@ -14,12 +14,15 @@ "devDependencies": { "@types/node": "^24.12.0", "@vitejs/plugin-vue": "^6.0.5", + "@vue/test-utils": "^2.4.6", "@vue/tsconfig": "^0.9.0", "autoprefixer": "^10.4.27", + "jsdom": "^29.0.0", "postcss": "^8.5.8", "tailwindcss": "^3.4.19", "typescript": "~5.9.3", "vite": "^8.0.0", + "vitest": "^4.1.0", "vue-tsc": "^3.2.5" } }, @@ -36,6 +39,47 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/@asamuzakjp/css-color": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-5.0.1.tgz", + "integrity": "sha512-2SZFvqMyvboVV1d15lMf7XiI3m7SDqXUuKaTymJYLN6dSGadqp+fVojqJlVoMlbZnlTmu3S0TLwLTJpvBMO1Aw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@csstools/css-calc": "^3.1.1", + "@csstools/css-color-parser": "^4.0.2", + "@csstools/css-parser-algorithms": "^4.0.0", + "@csstools/css-tokenizer": "^4.0.0", + "lru-cache": "^11.2.6" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + } + }, + "node_modules/@asamuzakjp/dom-selector": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/@asamuzakjp/dom-selector/-/dom-selector-7.0.3.tgz", + "integrity": "sha512-Q6mU0Z6bfj6YvnX2k9n0JxiIwrCFN59x/nWmYQnAqP000ruX/yV+5bp/GRcF5T8ncvfwJQ7fgfP74DlpKExILA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/nwsapi": "^2.3.9", + "bidi-js": "^1.0.3", + "css-tree": "^3.2.1", + "is-potential-custom-element-name": "^1.0.1", + "lru-cache": "^11.2.7" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + } + }, + "node_modules/@asamuzakjp/nwsapi": { + "version": "2.3.9", + "resolved": "https://registry.npmjs.org/@asamuzakjp/nwsapi/-/nwsapi-2.3.9.tgz", + "integrity": "sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==", + "dev": true, + "license": "MIT" + }, "node_modules/@babel/helper-string-parser": { "version": "7.27.1", "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", @@ -82,6 +126,159 @@ "node": ">=6.9.0" } }, + "node_modules/@bramus/specificity": { + "version": "2.4.2", + "resolved": "https://registry.npmjs.org/@bramus/specificity/-/specificity-2.4.2.tgz", + "integrity": "sha512-ctxtJ/eA+t+6q2++vj5j7FYX3nRu311q1wfYH3xjlLOsczhlhxAg2FWNUXhpGvAw3BWo1xBcvOV6/YLc2r5FJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "css-tree": "^3.0.0" + }, + "bin": { + "specificity": "bin/cli.js" + } + }, + "node_modules/@csstools/color-helpers": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-6.0.2.tgz", + "integrity": "sha512-LMGQLS9EuADloEFkcTBR3BwV/CGHV7zyDxVRtVDTwdI2Ca4it0CCVTT9wCkxSgokjE5Ho41hEPgb8OEUwoXr6Q==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=20.19.0" + } + }, + "node_modules/@csstools/css-calc": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-3.1.1.tgz", + "integrity": "sha512-HJ26Z/vmsZQqs/o3a6bgKslXGFAungXGbinULZO3eMsOyNJHeBBZfup5FiZInOghgoM4Hwnmw+OgbJCNg1wwUQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=20.19.0" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^4.0.0", + "@csstools/css-tokenizer": "^4.0.0" + } + }, + "node_modules/@csstools/css-color-parser": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-4.0.2.tgz", + "integrity": "sha512-0GEfbBLmTFf0dJlpsNU7zwxRIH0/BGEMuXLTCvFYxuL1tNhqzTbtnFICyJLTNK4a+RechKP75e7w42ClXSnJQw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/color-helpers": "^6.0.2", + "@csstools/css-calc": "^3.1.1" + }, + "engines": { + "node": ">=20.19.0" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^4.0.0", + "@csstools/css-tokenizer": "^4.0.0" + } + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-4.0.0.tgz", + "integrity": "sha512-+B87qS7fIG3L5h3qwJ/IFbjoVoOe/bpOdh9hAjXbvx0o8ImEmUsGXN0inFOnk2ChCFgqkkGFQ+TpM5rbhkKe4w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=20.19.0" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^4.0.0" + } + }, + "node_modules/@csstools/css-syntax-patches-for-csstree": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@csstools/css-syntax-patches-for-csstree/-/css-syntax-patches-for-csstree-1.1.1.tgz", + "integrity": "sha512-BvqN0AMWNAnLk9G8jnUT77D+mUbY/H2b3uDTvg2isJkHaOufUE2R3AOwxWo7VBQKT1lOdwdvorddo2B/lk64+w==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "peerDependencies": { + "css-tree": "^3.2.1" + }, + "peerDependenciesMeta": { + "css-tree": { + "optional": true + } + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-4.0.0.tgz", + "integrity": "sha512-QxULHAm7cNu72w97JUNCBFODFaXpbDg+dP8b/oWFAZ2MTRppA3U00Y2L1HqaS4J6yBqxwa/Y3nMBaxVKbB/NsA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=20.19.0" + } + }, "node_modules/@emnapi/core": { "version": "1.9.0", "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.0.tgz", @@ -116,6 +313,42 @@ "tslib": "^2.4.0" } }, + "node_modules/@exodus/bytes": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/@exodus/bytes/-/bytes-1.15.0.tgz", + "integrity": "sha512-UY0nlA+feH81UGSHv92sLEPLCeZFjXOuHhrIo0HQydScuQc8s0A7kL/UdgwgDq8g8ilksmuoF35YVTNphV2aBQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + }, + "peerDependencies": { + "@noble/hashes": "^1.8.0 || ^2.0.0" + }, + "peerDependenciesMeta": { + "@noble/hashes": { + "optional": true + } + } + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, "node_modules/@jridgewell/gen-mapping": { "version": "0.3.13", "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", @@ -209,6 +442,13 @@ "node": ">= 8" } }, + "node_modules/@one-ini/wasm": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@one-ini/wasm/-/wasm-0.1.1.tgz", + "integrity": "sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw==", + "dev": true, + "license": "MIT" + }, "node_modules/@oxc-project/runtime": { "version": "0.115.0", "resolved": "https://registry.npmjs.org/@oxc-project/runtime/-/runtime-0.115.0.tgz", @@ -229,6 +469,17 @@ "url": "https://github.com/sponsors/Boshen" } }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, "node_modules/@rolldown/binding-android-arm64": { "version": "1.0.0-rc.9", "resolved": "https://registry.npmjs.org/@rolldown/binding-android-arm64/-/binding-android-arm64-1.0.0-rc.9.tgz", @@ -491,6 +742,13 @@ "dev": true, "license": "MIT" }, + "node_modules/@standard-schema/spec": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@standard-schema/spec/-/spec-1.1.0.tgz", + "integrity": "sha512-l2aFy5jALhniG5HgqrD6jXLi/rUWrKvqN/qJx6yoJsgKhblVd+iqqU4RCXavm/jPityDo5TCvKMnpjKnOriy0w==", + "dev": true, + "license": "MIT" + }, "node_modules/@tybys/wasm-util": { "version": "0.10.1", "resolved": "https://registry.npmjs.org/@tybys/wasm-util/-/wasm-util-0.10.1.tgz", @@ -502,6 +760,31 @@ "tslib": "^2.4.0" } }, + "node_modules/@types/chai": { + "version": "5.2.3", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-5.2.3.tgz", + "integrity": "sha512-Mw558oeA9fFbv65/y4mHtXDs9bPnFMZAL/jxdPFUpOHHIXX91mcgEHbS5Lahr+pwZFR8A7GQleRWeI6cGFC2UA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/deep-eql": "*", + "assertion-error": "^2.0.1" + } + }, + "node_modules/@types/deep-eql": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/deep-eql/-/deep-eql-4.0.2.tgz", + "integrity": "sha512-c9h9dVVMigMPc4bwTvC5dxqtqJZwQPePsWjPlpSOnojbor6pGqdk541lfA7AqFQr5pB1BRdq0juY9db81BwyFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/node": { "version": "24.12.0", "resolved": "https://registry.npmjs.org/@types/node/-/node-24.12.0.tgz", @@ -529,6 +812,129 @@ "vue": "^3.2.25" } }, + "node_modules/@vitest/expect": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-4.1.0.tgz", + "integrity": "sha512-EIxG7k4wlWweuCLG9Y5InKFwpMEOyrMb6ZJ1ihYu02LVj/bzUwn2VMU+13PinsjRW75XnITeFrQBMH5+dLvCDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@standard-schema/spec": "^1.1.0", + "@types/chai": "^5.2.2", + "@vitest/spy": "4.1.0", + "@vitest/utils": "4.1.0", + "chai": "^6.2.2", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-4.1.0.tgz", + "integrity": "sha512-evxREh+Hork43+Y4IOhTo+h5lGmVRyjqI739Rz4RlUPqwrkFFDF6EMvOOYjTx4E8Tl6gyCLRL8Mu7Ry12a13Tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "4.1.0", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.21" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^6.0.0 || ^7.0.0 || ^8.0.0-0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/mocker/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/@vitest/pretty-format": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-4.1.0.tgz", + "integrity": "sha512-3RZLZlh88Ib0J7NQTRATfc/3ZPOnSUn2uDBUoGNn5T36+bALixmzphN26OUD3LRXWkJu4H0s5vvUeqBiw+kS0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-4.1.0.tgz", + "integrity": "sha512-Duvx2OzQ7d6OjchL+trw+aSrb9idh7pnNfxrklo14p3zmNL4qPCDeIJAK+eBKYjkIwG96Bc6vYuxhqDXQOWpoQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "4.1.0", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-4.1.0.tgz", + "integrity": "sha512-0Vy9euT1kgsnj1CHttwi9i9o+4rRLEaPRSOJ5gyv579GJkNpgJK+B4HSv/rAWixx2wdAFci1X4CEPjiu2bXIMg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.1.0", + "@vitest/utils": "4.1.0", + "magic-string": "^0.30.21", + "pathe": "^2.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-4.1.0.tgz", + "integrity": "sha512-pz77k+PgNpyMDv2FV6qmk5ZVau6c3R8HC8v342T2xlFxQKTrSeYw9waIJG8KgV9fFwAtTu4ceRzMivPTH6wSxw==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-4.1.0.tgz", + "integrity": "sha512-XfPXT6a8TZY3dcGY8EdwsBulFCIw+BeeX0RZn2x/BtiY/75YGh8FeWGG8QISN/WhaqSrE2OrlDgtF8q5uhOTmw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "4.1.0", + "convert-source-map": "^2.0.0", + "tinyrainbow": "^3.0.3" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, "node_modules/@volar/language-core": { "version": "2.4.28", "resolved": "https://registry.npmjs.org/@volar/language-core/-/language-core-2.4.28.tgz", @@ -680,6 +1086,17 @@ "integrity": "sha512-YXgQ7JjaO18NeK2K9VTbDHaFy62WrObMa6XERNfNOkAhD1F1oDSf3ZJ7K6GqabZ0BvSDHajp8qfS5Sa2I9n8uQ==", "license": "MIT" }, + "node_modules/@vue/test-utils": { + "version": "2.4.6", + "resolved": "https://registry.npmjs.org/@vue/test-utils/-/test-utils-2.4.6.tgz", + "integrity": "sha512-FMxEjOpYNYiFe0GkaHsnJPXFHxQ6m4t8vI/ElPGpMWxZKpmRvQ33OIrvRXemy6yha03RxhOlQuy+gZMC3CQSow==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-beautify": "^1.14.9", + "vue-component-type-helpers": "^2.0.0" + } + }, "node_modules/@vue/tsconfig": { "version": "0.9.0", "resolved": "https://registry.npmjs.org/@vue/tsconfig/-/tsconfig-0.9.0.tgz", @@ -699,6 +1116,16 @@ } } }, + "node_modules/abbrev": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-2.0.0.tgz", + "integrity": "sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, "node_modules/alien-signals": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/alien-signals/-/alien-signals-3.1.2.tgz", @@ -706,6 +1133,32 @@ "dev": true, "license": "MIT" }, + "node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, "node_modules/any-promise": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", @@ -747,6 +1200,16 @@ "dev": true, "license": "MIT" }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, "node_modules/autoprefixer": { "version": "10.4.27", "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.27.tgz", @@ -784,6 +1247,13 @@ "postcss": "^8.1.0" } }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, "node_modules/baseline-browser-mapping": { "version": "2.10.8", "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.8.tgz", @@ -797,6 +1267,16 @@ "node": ">=6.0.0" } }, + "node_modules/bidi-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/bidi-js/-/bidi-js-1.0.3.tgz", + "integrity": "sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "require-from-string": "^2.0.2" + } + }, "node_modules/binary-extensions": { "version": "2.3.0", "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", @@ -810,6 +1290,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, "node_modules/braces": { "version": "3.0.3", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", @@ -888,6 +1378,16 @@ ], "license": "CC-BY-4.0" }, + "node_modules/chai": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/chai/-/chai-6.2.2.tgz", + "integrity": "sha512-NUPRluOfOiTKBKvWPtSD4PhFvWCqOi0BGStNWs57X9js7XGTprSmFoz5F0tWhR4WPjNeR9jXqdC7/UpSJTnlRg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/chokidar": { "version": "3.6.0", "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", @@ -926,6 +1426,26 @@ "node": ">= 6" } }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, "node_modules/commander": { "version": "4.1.1", "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", @@ -936,6 +1456,53 @@ "node": ">= 6" } }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/css-tree": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-3.2.1.tgz", + "integrity": "sha512-X7sjQzceUhu1u7Y/ylrRZFU2FS6LRiFVp6rKLPg23y3x3c3DOKAwuXGDp+PAGjh6CSnCjYeAul8pcT8bAl+lSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "mdn-data": "2.27.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12.20.0 || ^14.13.0 || >=15.0.0" + } + }, "node_modules/cssesc": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", @@ -955,6 +1522,27 @@ "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", "license": "MIT" }, + "node_modules/data-urls": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-7.0.0.tgz", + "integrity": "sha512-23XHcCF+coGYevirZceTVD7NdJOqVn+49IHyxgszm+JIiHLoB2TkmPtsYkNWT1pvRSGkc35L6NHs0yHkN2SumA==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-mimetype": "^5.0.0", + "whatwg-url": "^16.0.0" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + } + }, + "node_modules/decimal.js": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", + "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", + "dev": true, + "license": "MIT" + }, "node_modules/detect-libc": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz", @@ -979,6 +1567,42 @@ "dev": true, "license": "MIT" }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/editorconfig": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/editorconfig/-/editorconfig-1.0.7.tgz", + "integrity": "sha512-e0GOtq/aTQhVdNyDU9e02+wz9oDDM+SIOQxWME2QRjzRX5yyLAuHDE+0aE8vHb9XRC8XD37eO2u57+F09JqFhw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@one-ini/wasm": "0.1.1", + "commander": "^10.0.0", + "minimatch": "^9.0.1", + "semver": "^7.5.3" + }, + "bin": { + "editorconfig": "bin/editorconfig" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/editorconfig/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + } + }, "node_modules/electron-to-chromium": { "version": "1.5.313", "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.313.tgz", @@ -986,6 +1610,13 @@ "dev": true, "license": "ISC" }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, "node_modules/entities": { "version": "7.0.1", "resolved": "https://registry.npmjs.org/entities/-/entities-7.0.1.tgz", @@ -998,6 +1629,13 @@ "url": "https://github.com/fb55/entities?sponsor=1" } }, + "node_modules/es-module-lexer": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-2.0.0.tgz", + "integrity": "sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==", + "dev": true, + "license": "MIT" + }, "node_modules/escalade": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", @@ -1014,6 +1652,16 @@ "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", "license": "MIT" }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/fast-glob": { "version": "3.3.3", "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", @@ -1085,6 +1733,23 @@ "node": ">=8" } }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/fraction.js": { "version": "5.3.4", "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", @@ -1124,6 +1789,28 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/glob": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "deprecated": "Old versions of glob are not supported, and contain widely publicized security vulnerabilities, which have been fixed in the current version. Please update. Support for old versions may be purchased (at exorbitant rates) by contacting i@izs.me", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/glob-parent": { "version": "6.0.2", "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", @@ -1150,6 +1837,26 @@ "node": ">= 0.4" } }, + "node_modules/html-encoding-sniffer": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-6.0.0.tgz", + "integrity": "sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@exodus/bytes": "^1.6.0" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + } + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true, + "license": "ISC" + }, "node_modules/is-binary-path": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", @@ -1189,6 +1896,16 @@ "node": ">=0.10.0" } }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/is-glob": { "version": "4.0.3", "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", @@ -1212,6 +1929,36 @@ "node": ">=0.12.0" } }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, "node_modules/jiti": { "version": "1.21.7", "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", @@ -1222,6 +1969,79 @@ "jiti": "bin/jiti.js" } }, + "node_modules/js-beautify": { + "version": "1.15.4", + "resolved": "https://registry.npmjs.org/js-beautify/-/js-beautify-1.15.4.tgz", + "integrity": "sha512-9/KXeZUKKJwqCXUdBxFJ3vPh467OCckSBmYDwSK/EtV090K+iMJ7zx2S3HLVDIWFQdqMIsZWbnaGiba18aWhaA==", + "dev": true, + "license": "MIT", + "dependencies": { + "config-chain": "^1.1.13", + "editorconfig": "^1.0.4", + "glob": "^10.4.2", + "js-cookie": "^3.0.5", + "nopt": "^7.2.1" + }, + "bin": { + "css-beautify": "js/bin/css-beautify.js", + "html-beautify": "js/bin/html-beautify.js", + "js-beautify": "js/bin/js-beautify.js" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/js-cookie": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/js-cookie/-/js-cookie-3.0.5.tgz", + "integrity": "sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + } + }, + "node_modules/jsdom": { + "version": "29.0.0", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-29.0.0.tgz", + "integrity": "sha512-9FshNB6OepopZ08unmmGpsF7/qCjxGPbo3NbgfJAnPeHXnsODE9WWffXZtRFRFe0ntzaAOcSKNJFz8wiyvF1jQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/css-color": "^5.0.1", + "@asamuzakjp/dom-selector": "^7.0.2", + "@bramus/specificity": "^2.4.2", + "@csstools/css-syntax-patches-for-csstree": "^1.1.1", + "@exodus/bytes": "^1.15.0", + "css-tree": "^3.2.1", + "data-urls": "^7.0.0", + "decimal.js": "^10.6.0", + "html-encoding-sniffer": "^6.0.0", + "is-potential-custom-element-name": "^1.0.1", + "lru-cache": "^11.2.7", + "parse5": "^8.0.0", + "saxes": "^6.0.0", + "symbol-tree": "^3.2.4", + "tough-cookie": "^6.0.1", + "undici": "^7.24.3", + "w3c-xmlserializer": "^5.0.0", + "webidl-conversions": "^8.0.1", + "whatwg-mimetype": "^5.0.0", + "whatwg-url": "^16.0.1", + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": "^20.19.0 || ^22.13.0 || >=24.0.0" + }, + "peerDependencies": { + "canvas": "^3.0.0" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, "node_modules/lightningcss": { "version": "1.32.0", "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.32.0.tgz", @@ -1503,6 +2323,16 @@ "dev": true, "license": "MIT" }, + "node_modules/lru-cache": { + "version": "11.2.7", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.7.tgz", + "integrity": "sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": "20 || >=22" + } + }, "node_modules/magic-string": { "version": "0.30.21", "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", @@ -1512,6 +2342,13 @@ "@jridgewell/sourcemap-codec": "^1.5.5" } }, + "node_modules/mdn-data": { + "version": "2.27.1", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.27.1.tgz", + "integrity": "sha512-9Yubnt3e8A0OKwxYSXyhLymGW4sCufcLG6VdiDdUGVkPhpqLxlvP5vl1983gQjJl3tqbrM731mjaZaP68AgosQ==", + "dev": true, + "license": "CC0-1.0" + }, "node_modules/merge2": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", @@ -1549,6 +2386,32 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/minimatch": { + "version": "9.0.9", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.9.tgz", + "integrity": "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.2" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minipass": { + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.3.tgz", + "integrity": "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==", + "dev": true, + "license": "BlueOak-1.0.0", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, "node_modules/muggle-string": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/muggle-string/-/muggle-string-0.4.1.tgz", @@ -1593,6 +2456,22 @@ "dev": true, "license": "MIT" }, + "node_modules/nopt": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-7.2.1.tgz", + "integrity": "sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w==", + "dev": true, + "license": "ISC", + "dependencies": { + "abbrev": "^2.0.0" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, "node_modules/normalize-path": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", @@ -1623,6 +2502,50 @@ "node": ">= 6" } }, + "node_modules/obug": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/obug/-/obug-2.1.1.tgz", + "integrity": "sha512-uTqF9MuPraAQ+IsnPf366RG4cP9RtUi7MLO1N3KEc+wb0a6yKpeL0lmk2IB1jY5KHPAlTc6T/JRdC/YqxHNwkQ==", + "dev": true, + "funding": [ + "https://github.com/sponsors/sxzz", + "https://opencollective.com/debug" + ], + "license": "MIT" + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/parse5": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-8.0.0.tgz", + "integrity": "sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==", + "dev": true, + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5/node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, "node_modules/path-browserify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", @@ -1630,6 +2553,16 @@ "dev": true, "license": "MIT" }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/path-parse": { "version": "1.0.7", "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", @@ -1637,6 +2570,37 @@ "dev": true, "license": "MIT" }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/pathe": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-2.0.3.tgz", + "integrity": "sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==", + "dev": true, + "license": "MIT" + }, "node_modules/picocolors": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", @@ -1838,6 +2802,23 @@ "dev": true, "license": "MIT" }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==", + "dev": true, + "license": "ISC" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -1895,6 +2876,16 @@ "url": "https://github.com/sponsors/jonschlinkert" } }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/resolve": { "version": "1.22.11", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", @@ -1992,6 +2983,75 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/saxes": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", + "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", + "dev": true, + "license": "ISC", + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=v12.22.7" + } + }, + "node_modules/semver": { + "version": "7.7.4", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", + "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, "node_modules/source-map-js": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", @@ -2001,6 +3061,124 @@ "node": ">=0.10.0" } }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-4.0.0.tgz", + "integrity": "sha512-zUMPtQ/HBY3/50VbpkupYHbRroTRZJPRLvreamgErJVys0ceuzMkD44J/QjqhHjOzK42GQ3QZIeFG1OYfOtKqQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/string-width-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", + "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.2.2" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, "node_modules/sucrase": { "version": "3.35.1", "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", @@ -2037,6 +3215,13 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true, + "license": "MIT" + }, "node_modules/tailwindcss": { "version": "3.4.19", "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", @@ -2098,6 +3283,23 @@ "node": ">=0.8" } }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-1.0.4.tgz", + "integrity": "sha512-u9r3uZC0bdpGOXtlxUIdwf9pkmvhqJdrVCH9fapQtgy/OeTTMZ1nqH7agtvEfmGui6e1XxjcdrlxvxJvc3sMqw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, "node_modules/tinyglobby": { "version": "0.2.15", "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", @@ -2115,6 +3317,36 @@ "url": "https://github.com/sponsors/SuperchupuDev" } }, + "node_modules/tinyrainbow": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-3.1.0.tgz", + "integrity": "sha512-Bf+ILmBgretUrdJxzXM0SgXLZ3XfiaUuOj/IKQHuTXip+05Xn+uyEYdVg0kYDipTBcLrCVyUzAPz7QmArb0mmw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tldts": { + "version": "7.0.25", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.25.tgz", + "integrity": "sha512-keinCnPbwXEUG3ilrWQZU+CqcTTzHq9m2HhoUP2l7Xmi8l1LuijAXLpAJ5zRW+ifKTNscs4NdCkfkDCBYm352w==", + "dev": true, + "license": "MIT", + "dependencies": { + "tldts-core": "^7.0.25" + }, + "bin": { + "tldts": "bin/cli.js" + } + }, + "node_modules/tldts-core": { + "version": "7.0.25", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.25.tgz", + "integrity": "sha512-ZjCZK0rppSBu7rjHYDYsEaMOIbbT+nWF57hKkv4IUmZWBNrBWBOjIElc0mKRgLM8bm7x/BBlof6t2gi/Oq/Asw==", + "dev": true, + "license": "MIT" + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", @@ -2128,6 +3360,32 @@ "node": ">=8.0" } }, + "node_modules/tough-cookie": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-6.0.1.tgz", + "integrity": "sha512-LktZQb3IeoUWB9lqR5EWTHgW/VTITCXg4D21M+lvybRVdylLrRMnqaIONLVb5mav8vM19m44HIcGq4qASeu2Qw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "tldts": "^7.0.5" + }, + "engines": { + "node": ">=16" + } + }, + "node_modules/tr46": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-6.0.0.tgz", + "integrity": "sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "engines": { + "node": ">=20" + } + }, "node_modules/ts-interface-checker": { "version": "0.1.13", "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", @@ -2157,6 +3415,16 @@ "node": ">=14.17" } }, + "node_modules/undici": { + "version": "7.24.3", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.24.3.tgz", + "integrity": "sha512-eJdUmK/Wrx2d+mnWWmwwLRyA7OQCkLap60sk3dOK4ViZR7DKwwptwuIvFBg2HaiP9ESaEdhtpSymQPvytpmkCA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20.18.1" + } + }, "node_modules/undici-types": { "version": "7.16.0", "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-7.16.0.tgz", @@ -2281,6 +3549,88 @@ } } }, + "node_modules/vitest": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-4.1.0.tgz", + "integrity": "sha512-YbDrMF9jM2Lqc++2530UourxZHmkKLxrs4+mYhEwqWS97WJ7wOYEkcr+QfRgJ3PW9wz3odRijLZjHEaRLTNbqw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "4.1.0", + "@vitest/mocker": "4.1.0", + "@vitest/pretty-format": "4.1.0", + "@vitest/runner": "4.1.0", + "@vitest/snapshot": "4.1.0", + "@vitest/spy": "4.1.0", + "@vitest/utils": "4.1.0", + "es-module-lexer": "^2.0.0", + "expect-type": "^1.3.0", + "magic-string": "^0.30.21", + "obug": "^2.1.1", + "pathe": "^2.0.3", + "picomatch": "^4.0.3", + "std-env": "^4.0.0-rc.1", + "tinybench": "^2.9.0", + "tinyexec": "^1.0.2", + "tinyglobby": "^0.2.15", + "tinyrainbow": "^3.0.3", + "vite": "^6.0.0 || ^7.0.0 || ^8.0.0-0", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^20.0.0 || ^22.0.0 || >=24.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@opentelemetry/api": "^1.9.0", + "@types/node": "^20.0.0 || ^22.0.0 || >=24.0.0", + "@vitest/browser-playwright": "4.1.0", + "@vitest/browser-preview": "4.1.0", + "@vitest/browser-webdriverio": "4.1.0", + "@vitest/ui": "4.1.0", + "happy-dom": "*", + "jsdom": "*", + "vite": "^6.0.0 || ^7.0.0 || ^8.0.0-0" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@opentelemetry/api": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser-playwright": { + "optional": true + }, + "@vitest/browser-preview": { + "optional": true + }, + "@vitest/browser-webdriverio": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + }, + "vite": { + "optional": false + } + } + }, "node_modules/vscode-uri": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.1.0.tgz", @@ -2309,6 +3659,13 @@ } } }, + "node_modules/vue-component-type-helpers": { + "version": "2.2.12", + "resolved": "https://registry.npmjs.org/vue-component-type-helpers/-/vue-component-type-helpers-2.2.12.tgz", + "integrity": "sha512-YbGqHZ5/eW4SnkPNR44mKVc6ZKQoRs/Rux1sxC6rdwXb4qpbOSYfDr9DsTHolOTGmIKgM9j141mZbBeg05R1pw==", + "dev": true, + "license": "MIT" + }, "node_modules/vue-router": { "version": "4.6.4", "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-4.6.4.tgz", @@ -2340,6 +3697,202 @@ "peerDependencies": { "typescript": ">=5.0.0" } + }, + "node_modules/w3c-xmlserializer": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", + "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/webidl-conversions": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-8.0.1.tgz", + "integrity": "sha512-BMhLD/Sw+GbJC21C/UgyaZX41nPt8bUTg+jWyDeg7e7YN4xOM05YPSIXceACnXVtqyEw/LMClUQMtMZ+PGGpqQ==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=20" + } + }, + "node_modules/whatwg-mimetype": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-5.0.0.tgz", + "integrity": "sha512-sXcNcHOC51uPGF0P/D4NVtrkjSU2fNsm9iog4ZvZJsL3rjoDAzXZhkm2MWt1y+PUdggKAYVoMAIYcs78wJ51Cw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=20" + } + }, + "node_modules/whatwg-url": { + "version": "16.0.1", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-16.0.1.tgz", + "integrity": "sha512-1to4zXBxmXHV3IiSSEInrreIlu02vUOvrhxJJH5vcxYTBDAx51cqZiKdyTxlecdKNSjj8EcxGBxNf6Vg+945gw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@exodus/bytes": "^1.11.0", + "tr46": "^6.0.0", + "webidl-conversions": "^8.0.1" + }, + "engines": { + "node": "^20.19.0 || ^22.12.0 || >=24.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/xml-name-validator": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", + "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true, + "license": "MIT" } } } diff --git a/web/frontend/package.json b/web/frontend/package.json index 203c214..b32ed6b 100644 --- a/web/frontend/package.json +++ b/web/frontend/package.json @@ -6,7 +6,9 @@ "scripts": { "dev": "vite", "build": "vue-tsc -b && vite build", - "preview": "vite preview" + "preview": "vite preview", + "test": "vitest run", + "test:watch": "vitest" }, "dependencies": { "vue": "^3.5.30", @@ -15,12 +17,15 @@ "devDependencies": { "@types/node": "^24.12.0", "@vitejs/plugin-vue": "^6.0.5", + "@vue/test-utils": "^2.4.6", "@vue/tsconfig": "^0.9.0", "autoprefixer": "^10.4.27", + "jsdom": "^29.0.0", "postcss": "^8.5.8", "tailwindcss": "^3.4.19", "typescript": "~5.9.3", "vite": "^8.0.0", + "vitest": "^4.1.0", "vue-tsc": "^3.2.5" } -} +} \ No newline at end of file diff --git a/web/frontend/src/__tests__/filter-persistence.test.ts b/web/frontend/src/__tests__/filter-persistence.test.ts new file mode 100644 index 0000000..1c0249f --- /dev/null +++ b/web/frontend/src/__tests__/filter-persistence.test.ts @@ -0,0 +1,511 @@ +/** + * KIN-011/KIN-014: Тесты фильтра статусов при навигации + * + * Проверяет: + * 1. Клик по кнопке статуса обновляет URL (?status=...) + * 2. Прямая ссылка с query param активирует нужную кнопку + * 3. Фильтр показывает только задачи с нужным статусом + * 4. Сброс фильтра (✕) удаляет param из URL + * 5. Без фильтра отображаются все задачи + * 6. goBack() вызывает router.back() при наличии истории + * 7. goBack() делает push на /project/:id без истории + * 8. После router.back() URL проекта восстанавливается с фильтром + */ + +import { describe, it, expect, vi, beforeEach } from 'vitest' +import { mount, flushPromises } from '@vue/test-utils' +import { createRouter, createMemoryHistory } from 'vue-router' +import ProjectView from '../views/ProjectView.vue' +import TaskDetail from '../views/TaskDetail.vue' + +// Мок api — factory без ссылок на внешние переменные (vi.mock хоистится) +vi.mock('../api', () => ({ + api: { + project: vi.fn(), + taskFull: vi.fn(), + runTask: vi.fn(), + auditProject: vi.fn(), + createTask: vi.fn(), + patchTask: vi.fn(), + }, +})) + +// Импортируем мок после объявления vi.mock +import { api } from '../api' + +const Stub = { template: '
' } + +const MOCK_PROJECT = { + id: 'KIN', + name: 'Kin', + path: '/projects/kin', + status: 'active', + priority: 5, + tech_stack: ['python', 'vue'], + created_at: '2024-01-01', + total_tasks: 3, + done_tasks: 1, + active_tasks: 1, + blocked_tasks: 0, + review_tasks: 0, + tasks: [ + { + id: 'KIN-001', project_id: 'KIN', title: 'Task 1', status: 'pending', + priority: 5, assigned_role: null, parent_task_id: null, + brief: null, spec: null, created_at: '2024-01-01', updated_at: '2024-01-01', + }, + { + id: 'KIN-002', project_id: 'KIN', title: 'Task 2', status: 'in_progress', + priority: 3, assigned_role: null, parent_task_id: null, + brief: null, spec: null, created_at: '2024-01-01', updated_at: '2024-01-01', + }, + { + id: 'KIN-003', project_id: 'KIN', title: 'Task 3', status: 'done', + priority: 1, assigned_role: null, parent_task_id: null, + brief: null, spec: null, created_at: '2024-01-01', updated_at: '2024-01-01', + }, + ], + decisions: [], + modules: [], +} + +const MOCK_TASK_FULL = { + id: 'KIN-002', + project_id: 'KIN', + title: 'Task 2', + status: 'in_progress', + priority: 3, + assigned_role: null, + parent_task_id: null, + brief: null, + spec: null, + created_at: '2024-01-01', + updated_at: '2024-01-01', + pipeline_steps: [], + related_decisions: [], +} + +function makeRouter() { + return createRouter({ + history: createMemoryHistory(), + routes: [ + { path: '/', component: Stub }, + { path: '/project/:id', component: ProjectView, props: true }, + { path: '/task/:id', component: TaskDetail, props: true }, + ], + }) +} + +// localStorage mock для jsdom-окружения +const localStorageMock = (() => { + let store: Record