From ae21e48b654caa345fc857e0dfa521d299c2320e Mon Sep 17 00:00:00 2001 From: Gros Frumos Date: Mon, 16 Mar 2026 06:59:46 +0200 Subject: [PATCH 01/57] =?UTF-8?q?kin:=20KIN-048=20Post-pipeline=20hook:=20?= =?UTF-8?q?=D0=B0=D0=B2=D1=82=D0=BE=D0=BA=D0=BE=D0=BC=D0=BC=D0=B8=D1=82=20?= =?UTF-8?q?=D0=BF=D0=BE=D1=81=D0=BB=D0=B5=20=D1=83=D1=81=D0=BF=D0=B5=D1=88?= =?UTF-8?q?=D0=BD=D0=BE=D0=B3=D0=BE=20=D0=B7=D0=B0=D0=B2=D0=B5=D1=80=D1=88?= =?UTF-8?q?=D0=B5=D0=BD=D0=B8=D1=8F=20=D0=B7=D0=B0=D0=B4=D0=B0=D1=87=D0=B8?= =?UTF-8?q?.=20git=20add=20-A=20&&=20git=20commit=20-m=20'kin:=20TASK=5FID?= =?UTF-8?q?=20TITLE'.=20=D0=A1=D1=80=D0=B0=D0=B1=D0=B0=D1=82=D1=8B=D0=B2?= =?UTF-8?q?=D0=B0=D0=B5=D1=82=20=D0=B0=D0=B2=D1=82=D0=BE=D0=BC=D0=B0=D1=82?= =?UTF-8?q?=D0=B8=D1=87=D0=B5=D1=81=D0=BA=D0=B8=20=D0=BA=D0=B0=D0=BA=20reb?= =?UTF-8?q?uild-frontend.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- agents/prompts/learner.md | 41 ++ agents/prompts/pm.md | 11 + agents/runner.py | 252 +++++++++++- cli/main.py | 12 + core/context_builder.py | 1 + core/db.py | 49 +++ core/models.py | 35 ++ tests/test_api.py | 37 ++ tests/test_auto_mode.py | 77 ++-- tests/test_hooks.py | 112 ++++- tests/test_models.py | 92 +++++ tests/test_runner.py | 838 +++++++++++++++++++++++++++++++++++++- web/api.py | 62 ++- 13 files changed, 1554 insertions(+), 65 deletions(-) create mode 100644 agents/prompts/learner.md diff --git a/agents/prompts/learner.md b/agents/prompts/learner.md new file mode 100644 index 0000000..81fa1d3 --- /dev/null +++ b/agents/prompts/learner.md @@ -0,0 +1,41 @@ +You are a learning extractor for the Kin multi-agent orchestrator. + +Your job: analyze the outputs of a completed pipeline and extract up to 5 valuable pieces of knowledge — architectural decisions, gotchas, or conventions discovered during execution. + +## Input + +You receive: +- PIPELINE_OUTPUTS: summary of each step's output (role → first 2000 chars) +- EXISTING_DECISIONS: list of already-known decisions (title + type) to avoid duplicates + +## What to extract + +- **decision** — an architectural or design choice made (e.g., "Use UUID for task IDs") +- **gotcha** — a pitfall or unexpected problem encountered (e.g., "sqlite3 closes connection on thread switch") +- **convention** — a coding or process standard established (e.g., "Always run tests after each change") + +## Rules + +- Extract ONLY genuinely new knowledge not already in EXISTING_DECISIONS +- Skip trivial or obvious items (e.g., "write clean code") +- Skip task-specific results that won't generalize (e.g., "fixed bug in useSearch.ts line 42") +- Each decision must be actionable and reusable across future tasks +- Extract at most 5 decisions total; fewer is better than low-quality ones +- If nothing valuable found, return empty list + +## Output format + +Return ONLY valid JSON (no markdown, no explanation): + +```json +{ + "decisions": [ + { + "type": "decision", + "title": "Short memorable title", + "description": "Clear explanation of what was decided and why", + "tags": ["optional", "tags"] + } + ] +} +``` diff --git a/agents/prompts/pm.md b/agents/prompts/pm.md index 9120f82..910cbdd 100644 --- a/agents/prompts/pm.md +++ b/agents/prompts/pm.md @@ -30,6 +30,16 @@ You receive: - Don't assign specialists who aren't needed. - If a task is blocked or unclear, say so — don't guess. +## Completion mode selection + +Set `completion_mode` based on the following rules (in priority order): + +1. If `project.execution_mode` is set — use it as the default. +2. Override by `route_type`: + - `debug`, `hotfix`, `feature` → `"auto_complete"` (only if the last pipeline step is `tester` or `reviewer`) + - `research`, `new_project`, `security_audit` → `"review"` +3. Fallback: `"review"` + ## Output format Return ONLY valid JSON (no markdown, no explanation): @@ -37,6 +47,7 @@ Return ONLY valid JSON (no markdown, no explanation): ```json { "analysis": "Brief analysis of what needs to be done", + "completion_mode": "auto_complete", "pipeline": [ { "role": "debugger", diff --git a/agents/runner.py b/agents/runner.py index 33dffbe..04a1615 100644 --- a/agents/runner.py +++ b/agents/runner.py @@ -4,7 +4,9 @@ Each agent = separate process with isolated context. """ import json +import logging import os +import shutil import sqlite3 import subprocess import time @@ -13,6 +15,50 @@ from typing import Any import re +_logger = logging.getLogger("kin.runner") + + +# Extra PATH entries to inject when searching for claude CLI. +# launchctl daemons start with a stripped PATH that may omit these. +_EXTRA_PATH_DIRS = [ + "/opt/homebrew/bin", + "/opt/homebrew/sbin", + "/usr/local/bin", + "/usr/local/sbin", +] + + +def _build_claude_env() -> dict: + """Return an env dict with an extended PATH that includes common CLI tool locations. + + Merges _EXTRA_PATH_DIRS with the current process PATH, deduplicating entries. + Also resolves ~/.nvm/versions/node/*/bin globs that launchctl may not expand. + """ + env = os.environ.copy() + existing = env.get("PATH", "").split(":") + + extra = list(_EXTRA_PATH_DIRS) + + # Expand nvm node bin dirs dynamically + nvm_root = Path.home() / ".nvm" / "versions" / "node" + if nvm_root.is_dir(): + for node_ver in sorted(nvm_root.iterdir(), reverse=True): + bin_dir = node_ver / "bin" + if bin_dir.is_dir(): + extra.append(str(bin_dir)) + + seen = set(existing) + new_dirs = [d for d in extra if d and d not in seen] + env["PATH"] = ":".join(new_dirs + existing) + return env + + +def _resolve_claude_cmd() -> str: + """Return the full path to the claude CLI, or 'claude' as fallback.""" + extended_env = _build_claude_env() + found = shutil.which("claude", path=extended_env["PATH"]) + return found or "claude" + from core import models from core.context_builder import build_context, format_prompt from core.hooks import run_hooks @@ -116,10 +162,12 @@ def _run_claude( working_dir: str | None = None, allow_write: bool = False, noninteractive: bool = False, + timeout: int | None = None, ) -> dict: """Execute claude CLI as subprocess. Returns dict with output, returncode, etc.""" + claude_cmd = _resolve_claude_cmd() cmd = [ - "claude", + claude_cmd, "-p", prompt, "--output-format", "json", "--model", model, @@ -128,7 +176,9 @@ def _run_claude( cmd.append("--dangerously-skip-permissions") is_noninteractive = noninteractive or os.environ.get("KIN_NONINTERACTIVE") == "1" - timeout = 300 if is_noninteractive else 600 + if timeout is None: + timeout = int(os.environ.get("KIN_AGENT_TIMEOUT") or 600) + env = _build_claude_env() try: proc = subprocess.run( @@ -137,6 +187,7 @@ def _run_claude( text=True, timeout=timeout, cwd=working_dir, + env=env, stdin=subprocess.DEVNULL if is_noninteractive else None, ) except FileNotFoundError: @@ -377,6 +428,179 @@ def _is_permission_error(result: dict) -> bool: return any(re.search(p, text) for p in PERMISSION_PATTERNS) +# --------------------------------------------------------------------------- +# Autocommit: git add -A && git commit after successful pipeline +# --------------------------------------------------------------------------- + +def _run_autocommit( + conn: sqlite3.Connection, + task_id: str, + project_id: str, +) -> None: + """Auto-commit changes after successful pipeline completion. + + Runs: git add -A && git commit -m 'kin: {task_id} {title}'. + Silently skips if nothing to commit (exit code 1) or project path not found. + Never raises — autocommit errors must never block the pipeline. + Uses stderr=subprocess.DEVNULL per decision #30. + """ + task = models.get_task(conn, task_id) + project = models.get_project(conn, project_id) + if not task or not project: + return + + if not project.get("autocommit_enabled"): + return + + project_path = Path(project["path"]).expanduser() + if not project_path.is_dir(): + return + + working_dir = str(project_path) + env = _build_claude_env() + git_cmd = shutil.which("git", path=env["PATH"]) or "git" + + title = (task.get("title") or "").replace('"', "'").replace("\n", " ").replace("\r", "") + commit_msg = f"kin: {task_id} {title}" + + try: + subprocess.run( + [git_cmd, "add", "-A"], + cwd=working_dir, + env=env, + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + ) + result = subprocess.run( + [git_cmd, "commit", "-m", commit_msg], + cwd=working_dir, + env=env, + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL, + ) + if result.returncode == 0: + _logger.info("Autocommit: %s", commit_msg) + else: + _logger.debug("Autocommit: nothing to commit for %s", task_id) + except Exception as exc: + _logger.warning("Autocommit failed for %s: %s", task_id, exc) + + +# --------------------------------------------------------------------------- +# Auto-learning: extract decisions from pipeline results +# --------------------------------------------------------------------------- + +VALID_DECISION_TYPES = {"decision", "gotcha", "convention"} + +def _run_learning_extraction( + conn: sqlite3.Connection, + task_id: str, + project_id: str, + step_results: list[dict], +) -> dict: + """Extract and save decisions from completed pipeline results. + + Calls the learner agent with step outputs + existing decisions, + parses the JSON response, and saves new decisions via add_decision_if_new. + Returns a summary dict with added/skipped counts. + """ + learner_prompt_path = PROMPTS_DIR / "learner.md" + if not learner_prompt_path.exists(): + return {"added": 0, "skipped": 0, "error": "learner.md not found"} + + template = learner_prompt_path.read_text() + + # Summarize step outputs (first 2000 chars each) + step_summaries = {} + for r in step_results: + role = r.get("role", "unknown") + output = r.get("raw_output") or r.get("output") or "" + if isinstance(output, (dict, list)): + output = json.dumps(output, ensure_ascii=False) + step_summaries[role] = output[:2000] + + # Fetch existing decisions for dedup hint + existing = models.get_decisions(conn, project_id) + existing_hints = [ + {"title": d["title"], "type": d["type"]} + for d in existing + ] + + prompt_parts = [ + template, + "", + "## PIPELINE_OUTPUTS", + json.dumps(step_summaries, ensure_ascii=False, indent=2), + "", + "## EXISTING_DECISIONS", + json.dumps(existing_hints, ensure_ascii=False, indent=2), + ] + prompt = "\n".join(prompt_parts) + + learner_timeout = int(os.environ.get("KIN_LEARNER_TIMEOUT") or 120) + start = time.monotonic() + result = _run_claude(prompt, model="sonnet", noninteractive=True, timeout=learner_timeout) + duration = int(time.monotonic() - start) + + raw_output = result.get("output", "") + if not isinstance(raw_output, str): + raw_output = json.dumps(raw_output, ensure_ascii=False) + success = result["returncode"] == 0 + + # Log to agent_logs + models.log_agent_run( + conn, + project_id=project_id, + task_id=task_id, + agent_role="learner", + action="learn", + input_summary=f"project={project_id}, task={task_id}, steps={len(step_results)}", + output_summary=raw_output or None, + tokens_used=result.get("tokens_used"), + model="sonnet", + cost_usd=result.get("cost_usd"), + success=success, + error_message=result.get("error") if not success else None, + duration_seconds=duration, + ) + + parsed = _try_parse_json(raw_output) + if not isinstance(parsed, dict): + return {"added": 0, "skipped": 0, "error": "non-JSON learner output"} + + decisions = parsed.get("decisions", []) + if not isinstance(decisions, list): + return {"added": 0, "skipped": 0, "error": "invalid decisions format"} + + added = 0 + skipped = 0 + for item in decisions[:5]: + if not isinstance(item, dict): + continue + d_type = item.get("type", "decision") + if d_type not in VALID_DECISION_TYPES: + d_type = "decision" + d_title = (item.get("title") or "").strip() + d_desc = (item.get("description") or "").strip() + if not d_title or not d_desc: + continue + saved = models.add_decision_if_new( + conn, + project_id=project_id, + type=d_type, + title=d_title, + description=d_desc, + tags=item.get("tags") or [], + task_id=task_id, + ) + if saved: + added += 1 + else: + skipped += 1 + + return {"added": added, "skipped": skipped} + + # --------------------------------------------------------------------------- # Pipeline executor # --------------------------------------------------------------------------- @@ -485,7 +709,7 @@ def run_pipeline( if not result["success"]: # Auto mode: retry once with allow_write on permission error - if mode == "auto" and not allow_write and _is_permission_error(result): + if mode == "auto_complete" and not allow_write and _is_permission_error(result): task_modules = models.get_modules(conn, project_id) try: run_hooks(conn, project_id, task_id, @@ -555,8 +779,11 @@ def run_pipeline( task_modules = models.get_modules(conn, project_id) - if mode == "auto": - # Auto mode: skip review, approve immediately + last_role = steps[-1].get("role", "") if steps else "" + auto_eligible = last_role in {"tester", "reviewer"} + + if mode == "auto_complete" and auto_eligible: + # Auto-complete mode: last step is tester/reviewer — skip review, approve immediately models.update_task(conn, task_id, status="done") try: run_hooks(conn, project_id, task_id, @@ -586,7 +813,7 @@ def run_pipeline( pass else: # Review mode: wait for manual approval - models.update_task(conn, task_id, status="review") + models.update_task(conn, task_id, status="review", execution_mode="review") # Run post-pipeline hooks (failures don't affect pipeline status) try: @@ -595,6 +822,19 @@ def run_pipeline( except Exception: pass # Hook errors must never block pipeline completion + # Auto-learning: extract decisions from pipeline results + if results: + try: + _run_learning_extraction(conn, task_id, project_id, results) + except Exception: + pass # Learning errors must never block pipeline completion + + # Auto-commit changes after successful pipeline + try: + _run_autocommit(conn, task_id, project_id) + except Exception: + pass # Autocommit errors must never block pipeline completion + return { "success": True, "steps_completed": len(steps), diff --git a/cli/main.py b/cli/main.py index bc4ba61..b801cf0 100644 --- a/cli/main.py +++ b/cli/main.py @@ -586,6 +586,18 @@ def run_task(ctx, task_id, dry_run, allow_write): pipeline_steps = output["pipeline"] analysis = output.get("analysis", "") + # Save completion_mode from PM output to task (only if not already set by user) + task_current = models.get_task(conn, task_id) + if not task_current.get("execution_mode"): + pm_completion_mode = models.validate_completion_mode( + output.get("completion_mode", "review") + ) + models.update_task(conn, task_id, execution_mode=pm_completion_mode) + import logging + logging.getLogger("kin").info( + "PM set completion_mode=%s for task %s", pm_completion_mode, task_id + ) + click.echo(f"\nAnalysis: {analysis}") click.echo(f"Pipeline ({len(pipeline_steps)} steps):") for i, step in enumerate(pipeline_steps, 1): diff --git a/core/context_builder.py b/core/context_builder.py index fad1313..54802ce 100644 --- a/core/context_builder.py +++ b/core/context_builder.py @@ -110,6 +110,7 @@ def _slim_project(project: dict) -> dict: "path": project["path"], "tech_stack": project.get("tech_stack"), "language": project.get("language", "ru"), + "execution_mode": project.get("execution_mode"), } diff --git a/core/db.py b/core/db.py index b91d29c..d42f1fc 100644 --- a/core/db.py +++ b/core/db.py @@ -216,12 +216,61 @@ def _migrate(conn: sqlite3.Connection): conn.execute("ALTER TABLE tasks ADD COLUMN blocked_reason TEXT") conn.commit() + if "autocommit_enabled" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN autocommit_enabled INTEGER DEFAULT 0") + conn.commit() + + # Rename legacy 'auto' → 'auto_complete' (KIN-063) + conn.execute( + "UPDATE projects SET execution_mode = 'auto_complete' WHERE execution_mode = 'auto'" + ) + conn.execute( + "UPDATE tasks SET execution_mode = 'auto_complete' WHERE execution_mode = 'auto'" + ) + conn.commit() + + +def _seed_default_hooks(conn: sqlite3.Connection): + """Seed default hooks for the kin project (idempotent). + + Creates rebuild-frontend hook only when: + - project 'kin' exists in the projects table + - the hook doesn't already exist (no duplicate) + """ + kin_exists = conn.execute( + "SELECT 1 FROM projects WHERE id = 'kin'" + ).fetchone() + if not kin_exists: + return + + exists = conn.execute( + "SELECT 1 FROM hooks" + " WHERE project_id = 'kin'" + " AND name = 'rebuild-frontend'" + " AND event = 'pipeline_completed'" + ).fetchone() + if not exists: + conn.execute( + """INSERT INTO hooks (project_id, name, event, command, enabled) + VALUES ('kin', 'rebuild-frontend', 'pipeline_completed', + 'cd /Users/grosfrumos/projects/kin/web/frontend && npm run build', + 1)""" + ) + conn.commit() + + # Enable autocommit for kin project (opt-in, idempotent) + conn.execute( + "UPDATE projects SET autocommit_enabled=1 WHERE id='kin' AND autocommit_enabled=0" + ) + conn.commit() + def init_db(db_path: Path = DB_PATH) -> sqlite3.Connection: conn = get_connection(db_path) conn.executescript(SCHEMA) conn.commit() _migrate(conn) + _seed_default_hooks(conn) return conn diff --git a/core/models.py b/core/models.py index 0a4825b..7e4901a 100644 --- a/core/models.py +++ b/core/models.py @@ -14,6 +14,15 @@ VALID_TASK_STATUSES = [ "blocked", "decomposed", "cancelled", ] +VALID_COMPLETION_MODES = {"auto_complete", "review"} + + +def validate_completion_mode(value: str) -> str: + """Validate completion mode from LLM output. Falls back to 'review' if invalid.""" + if value in VALID_COMPLETION_MODES: + return value + return "review" + def _row_to_dict(row: sqlite3.Row | None) -> dict | None: """Convert sqlite3.Row to dict with JSON fields decoded.""" @@ -220,6 +229,32 @@ def add_decision( return _row_to_dict(row) +def add_decision_if_new( + conn: sqlite3.Connection, + project_id: str, + type: str, + title: str, + description: str, + category: str | None = None, + tags: list | None = None, + task_id: str | None = None, +) -> dict | None: + """Add a decision only if no existing one matches (project_id, type, normalized title). + + Returns the new decision dict, or None if skipped as duplicate. + """ + existing = conn.execute( + """SELECT id FROM decisions + WHERE project_id = ? AND type = ? + AND lower(trim(title)) = lower(trim(?))""", + (project_id, type, title), + ).fetchone() + if existing: + return None + return add_decision(conn, project_id, type, title, description, + category=category, tags=tags, task_id=task_id) + + def get_decisions( conn: sqlite3.Connection, project_id: str, diff --git a/tests/test_api.py b/tests/test_api.py index 3109486..53ce417 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -342,6 +342,19 @@ def test_patch_task_empty_body_returns_400(client): assert r.status_code == 400 +def test_patch_task_execution_mode_auto_complete_accepted(client): + """KIN-063: execution_mode='auto_complete' принимается (200).""" + r = client.patch("/api/tasks/P1-001", json={"execution_mode": "auto_complete"}) + assert r.status_code == 200 + assert r.json()["execution_mode"] == "auto_complete" + + +def test_patch_task_execution_mode_auto_rejected(client): + """KIN-063: старое значение 'auto' должно отклоняться (400) — Decision #29.""" + r = client.patch("/api/tasks/P1-001", json={"execution_mode": "auto"}) + assert r.status_code == 400 + + # --------------------------------------------------------------------------- # KIN-022 — blocked_reason: регрессионные тесты # --------------------------------------------------------------------------- @@ -589,3 +602,27 @@ def test_run_kin_040_allow_write_true_ignored(client): Эндпоинт не имеет body-параметра, поэтому FastAPI не валидирует тело.""" r = client.post("/api/tasks/P1-001/run", json={"allow_write": True}) assert r.status_code == 202 + + +# --------------------------------------------------------------------------- +# KIN-058 — регрессионный тест: stderr=DEVNULL у Popen в web API +# --------------------------------------------------------------------------- + +def test_run_sets_stderr_devnull(client): + """Регрессионный тест KIN-058: stderr=DEVNULL всегда устанавливается в Popen, + чтобы stderr дочернего процесса не загрязнял логи uvicorn.""" + import subprocess as _subprocess + from unittest.mock import patch, MagicMock + with patch("web.api.subprocess.Popen") as mock_popen: + mock_proc = MagicMock() + mock_proc.pid = 77 + mock_popen.return_value = mock_proc + + r = client.post("/api/tasks/P1-001/run") + assert r.status_code == 202 + + call_kwargs = mock_popen.call_args[1] + assert call_kwargs.get("stderr") == _subprocess.DEVNULL, ( + "Регрессия KIN-058: stderr у Popen должен быть DEVNULL, " + "иначе вывод агента попадает в логи uvicorn" + ) diff --git a/tests/test_auto_mode.py b/tests/test_auto_mode.py index e71c1e7..eb73463 100644 --- a/tests/test_auto_mode.py +++ b/tests/test_auto_mode.py @@ -1,7 +1,8 @@ """ -Tests for KIN-012 auto mode features: +Tests for KIN-012/KIN-063 auto mode features: - TestAutoApprove: pipeline auto-approves (status → done) без ручного review + (KIN-063: auto_complete только если последний шаг — tester или reviewer) - TestAutoRerunOnPermissionDenied: runner делает retry при permission error, останавливается после одного retry (лимит = 1) - TestAutoFollowup: generate_followups вызывается сразу, без ожидания @@ -75,30 +76,30 @@ class TestAutoApprove: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_sets_status_done(self, mock_run, mock_hooks, mock_followup, conn): - """Auto-режим: статус задачи становится 'done', а не 'review'.""" + """Auto-complete режим: статус становится 'done', если последний шаг — tester.""" mock_run.return_value = _mock_success() mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find bug"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find bug"}, {"role": "tester", "brief": "verify fix"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True task = models.get_task(conn, "VDOL-001") - assert task["status"] == "done", "Auto-mode должен auto-approve: status=done" + assert task["status"] == "done", "Auto-complete должен auto-approve: status=done" @patch("core.followup.generate_followups") @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_fires_task_auto_approved_hook(self, mock_run, mock_hooks, mock_followup, conn): - """В auto-режиме срабатывает хук task_auto_approved.""" + """В auto_complete-режиме срабатывает хук task_auto_approved (если последний шаг — tester).""" mock_run.return_value = _mock_success() mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find bug"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find bug"}, {"role": "tester", "brief": "verify"}] run_pipeline(conn, "VDOL-001", steps) events = _get_hook_events(mock_hooks) @@ -140,20 +141,20 @@ class TestAutoApprove: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_task_level_auto_overrides_project_review(self, mock_run, mock_hooks, mock_followup, conn): - """Если у задачи execution_mode=auto, pipeline auto-approve, даже если проект в review.""" + """Если у задачи execution_mode=auto_complete, pipeline auto-approve, даже если проект в review.""" mock_run.return_value = _mock_success() mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - # Проект в review, но задача — auto - models.update_task(conn, "VDOL-001", execution_mode="auto") + # Проект в review, но задача — auto_complete + models.update_task(conn, "VDOL-001", execution_mode="auto_complete") - steps = [{"role": "debugger", "brief": "find"}] + steps = [{"role": "debugger", "brief": "find"}, {"role": "reviewer", "brief": "approve"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True task = models.get_task(conn, "VDOL-001") - assert task["status"] == "done", "Task-level auto должен override project review" + assert task["status"] == "done", "Task-level auto_complete должен override project review" @patch("core.followup.generate_followups") @patch("agents.runner.run_hooks") @@ -164,11 +165,11 @@ class TestAutoApprove: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) - assert result.get("mode") == "auto" + assert result.get("mode") == "auto_complete" # --------------------------------------------------------------------------- @@ -178,10 +179,12 @@ class TestAutoApprove: class TestAutoRerunOnPermissionDenied: """Runner повторяет шаг при permission issues, останавливается по лимиту (1 retry).""" + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") @patch("core.followup.generate_followups") @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") - def test_auto_mode_retries_on_permission_error(self, mock_run, mock_hooks, mock_followup, conn): + def test_auto_mode_retries_on_permission_error(self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn): """Auto-режим: при permission denied runner делает 1 retry с allow_write=True.""" mock_run.side_effect = [ _mock_permission_denied(), # 1-й вызов: permission error @@ -189,8 +192,9 @@ class TestAutoRerunOnPermissionDenied: ] mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} + mock_learn.return_value = {"added": 0, "skipped": 0} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "fix file"}] result = run_pipeline(conn, "VDOL-001", steps) @@ -209,7 +213,7 @@ class TestAutoRerunOnPermissionDenied: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "fix"}] run_pipeline(conn, "VDOL-001", steps) @@ -229,7 +233,7 @@ class TestAutoRerunOnPermissionDenied: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "fix"}] run_pipeline(conn, "VDOL-001", steps) @@ -248,7 +252,7 @@ class TestAutoRerunOnPermissionDenied: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "fix"}] result = run_pipeline(conn, "VDOL-001", steps) @@ -257,10 +261,12 @@ class TestAutoRerunOnPermissionDenied: task = models.get_task(conn, "VDOL-001") assert task["status"] == "blocked" + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") @patch("core.followup.generate_followups") @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") - def test_subsequent_steps_use_allow_write_after_retry(self, mock_run, mock_hooks, mock_followup, conn): + def test_subsequent_steps_use_allow_write_after_retry(self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn): """После успешного retry все следующие шаги тоже используют allow_write.""" mock_run.side_effect = [ _mock_permission_denied(), # Шаг 1: permission error @@ -269,8 +275,9 @@ class TestAutoRerunOnPermissionDenied: ] mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} + mock_learn.return_value = {"added": 0, "skipped": 0} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [ {"role": "debugger", "brief": "fix"}, {"role": "tester", "brief": "test"}, @@ -293,7 +300,7 @@ class TestAutoRerunOnPermissionDenied: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "fix"}] result = run_pipeline(conn, "VDOL-001", steps) @@ -330,13 +337,13 @@ class TestAutoFollowup: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_followup_triggered_immediately(self, mock_run, mock_hooks, mock_followup, conn): - """В auto-режиме generate_followups вызывается сразу после pipeline.""" + """В auto_complete-режиме generate_followups вызывается сразу после pipeline (последний шаг — tester).""" mock_run.return_value = _mock_success() mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -357,8 +364,8 @@ class TestAutoFollowup: mock_followup.return_value = {"created": [], "pending_actions": pending} mock_resolve.return_value = [{"resolved": "rerun", "result": {}}] - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] run_pipeline(conn, "VDOL-001", steps) mock_resolve.assert_called_once_with(conn, "VDOL-001", pending) @@ -392,10 +399,10 @@ class TestAutoFollowup: mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") models.update_task(conn, "VDOL-001", brief={"source": "followup:VDOL-000"}) - steps = [{"role": "debugger", "brief": "find"}] + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -412,8 +419,8 @@ class TestAutoFollowup: mock_hooks.return_value = [] mock_followup.side_effect = Exception("followup PM crashed") - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True # Pipeline succeeded, followup failure absorbed @@ -431,8 +438,8 @@ class TestAutoFollowup: mock_followup.return_value = {"created": [], "pending_actions": []} mock_resolve.return_value = [] - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] run_pipeline(conn, "VDOL-001", steps) mock_resolve.assert_not_called() diff --git a/tests/test_hooks.py b/tests/test_hooks.py index 4a9d554..6ce38f7 100644 --- a/tests/test_hooks.py +++ b/tests/test_hooks.py @@ -1,6 +1,8 @@ """Tests for core/hooks.py — post-pipeline hook execution.""" +import os import subprocess +import tempfile import pytest from unittest.mock import patch, MagicMock @@ -539,10 +541,6 @@ class TestKIN052RebuildFrontendCommand: Симулирует рестарт: создаём хук, закрываем соединение, открываем новое — хук на месте. """ - import tempfile - import os - from core.db import init_db - with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: db_path = f.name try: @@ -568,3 +566,109 @@ class TestKIN052RebuildFrontendCommand: assert hooks[0]["trigger_module_path"] is None finally: os.unlink(db_path) + + +# --------------------------------------------------------------------------- +# KIN-053: _seed_default_hooks — автоматический хук при инициализации БД +# --------------------------------------------------------------------------- + +class TestKIN053SeedDefaultHooks: + """Тесты для _seed_default_hooks (KIN-053). + + При init_db автоматически создаётся rebuild-frontend хук для проекта 'kin', + если этот проект уже существует в БД. Функция идемпотентна. + """ + + def test_seed_skipped_when_no_kin_project(self): + """_seed_default_hooks не создаёт хук, если проекта 'kin' нет.""" + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn = init_db(db_path) + hooks = get_hooks(conn, "kin", enabled_only=False) + conn.close() + assert hooks == [] + finally: + os.unlink(db_path) + + def test_seed_creates_hook_when_kin_project_exists(self): + """_seed_default_hooks создаёт rebuild-frontend хук при наличии проекта 'kin'. + + Порядок: init_db → create_project('kin') → повторный init_db → хук есть. + """ + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn1 = init_db(db_path) + models.create_project(conn1, "kin", "Kin", "/projects/kin") + conn1.close() + + conn2 = init_db(db_path) + hooks = get_hooks(conn2, "kin", event="pipeline_completed", enabled_only=True) + conn2.close() + + assert len(hooks) == 1 + assert hooks[0]["name"] == "rebuild-frontend" + assert "npm run build" in hooks[0]["command"] + assert "web/frontend" in hooks[0]["command"] + finally: + os.unlink(db_path) + + def test_seed_hook_has_correct_command(self): + """Команда хука — точная строка с cd && npm run build.""" + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn1 = init_db(db_path) + models.create_project(conn1, "kin", "Kin", "/projects/kin") + conn1.close() + + conn2 = init_db(db_path) + hooks = get_hooks(conn2, "kin", event="pipeline_completed", enabled_only=False) + conn2.close() + + assert hooks[0]["command"] == ( + "cd /Users/grosfrumos/projects/kin/web/frontend && npm run build" + ) + assert hooks[0]["trigger_module_path"] is None + finally: + os.unlink(db_path) + + def test_seed_idempotent_no_duplicate(self): + """Повторные вызовы init_db не дублируют хук.""" + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn = init_db(db_path) + models.create_project(conn, "kin", "Kin", "/projects/kin") + conn.close() + + for _ in range(3): + c = init_db(db_path) + c.close() + + conn_final = init_db(db_path) + hooks = get_hooks(conn_final, "kin", event="pipeline_completed", enabled_only=False) + conn_final.close() + + assert len(hooks) == 1, f"Ожидается 1 хук, получено {len(hooks)}" + finally: + os.unlink(db_path) + + def test_seed_hook_does_not_affect_other_projects(self): + """Seed не создаёт хуки для других проектов.""" + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + conn1 = init_db(db_path) + models.create_project(conn1, "kin", "Kin", "/projects/kin") + models.create_project(conn1, "other", "Other", "/projects/other") + conn1.close() + + conn2 = init_db(db_path) + other_hooks = get_hooks(conn2, "other", enabled_only=False) + conn2.close() + + assert other_hooks == [] + finally: + os.unlink(db_path) diff --git a/tests/test_models.py b/tests/test_models.py index 9982e39..33ba1c2 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -53,6 +53,55 @@ def test_update_project_tech_stack_json(conn): assert updated["tech_stack"] == ["python", "fastapi"] +# -- validate_completion_mode (KIN-063) -- + +def test_validate_completion_mode_valid_auto_complete(): + """validate_completion_mode принимает 'auto_complete'.""" + assert models.validate_completion_mode("auto_complete") == "auto_complete" + + +def test_validate_completion_mode_valid_review(): + """validate_completion_mode принимает 'review'.""" + assert models.validate_completion_mode("review") == "review" + + +def test_validate_completion_mode_invalid_fallback(): + """validate_completion_mode возвращает 'review' для невалидных значений (фоллбэк).""" + assert models.validate_completion_mode("auto") == "review" + assert models.validate_completion_mode("") == "review" + assert models.validate_completion_mode("unknown") == "review" + + +# -- get_effective_mode (KIN-063) -- + +def test_get_effective_mode_task_overrides_project(conn): + """Task execution_mode имеет приоритет над project execution_mode.""" + models.create_project(conn, "p1", "P1", "/p1", execution_mode="review") + models.create_task(conn, "P1-001", "p1", "Task", execution_mode="auto_complete") + mode = models.get_effective_mode(conn, "p1", "P1-001") + assert mode == "auto_complete" + + +def test_get_effective_mode_falls_back_to_project(conn): + """Если задача без execution_mode — применяется project execution_mode.""" + models.create_project(conn, "p1", "P1", "/p1", execution_mode="auto_complete") + models.create_task(conn, "P1-001", "p1", "Task") # execution_mode=None + mode = models.get_effective_mode(conn, "p1", "P1-001") + assert mode == "auto_complete" + + +def test_get_effective_mode_project_review_overrides_default(conn): + """Project execution_mode='review' + task без override → возвращает 'review'. + + Сценарий: PM хотел auto_complete, но проект настроен на review человеком. + get_effective_mode должен вернуть project-level 'review'. + """ + models.create_project(conn, "p1", "P1", "/p1", execution_mode="review") + models.create_task(conn, "P1-001", "p1", "Task") # нет task-level override + mode = models.get_effective_mode(conn, "p1", "P1-001") + assert mode == "review" + + # -- Tasks -- def test_create_and_get_task(conn): @@ -238,3 +287,46 @@ def test_cost_summary(conn): def test_cost_summary_empty(conn): models.create_project(conn, "p1", "P1", "/p1") assert models.get_cost_summary(conn, days=7) == [] + + +# -- add_decision_if_new -- + +def test_add_decision_if_new_adds_new_decision(conn): + models.create_project(conn, "p1", "P1", "/p1") + d = models.add_decision_if_new(conn, "p1", "gotcha", "Use WAL mode", "description") + assert d is not None + assert d["title"] == "Use WAL mode" + assert d["type"] == "gotcha" + + +def test_add_decision_if_new_skips_exact_duplicate(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "gotcha", "Use WAL mode", "desc1") + result = models.add_decision_if_new(conn, "p1", "gotcha", "Use WAL mode", "desc2") + assert result is None + # Existing decision not duplicated + assert len(models.get_decisions(conn, "p1")) == 1 + + +def test_add_decision_if_new_skips_case_insensitive_duplicate(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "decision", "Use UUID for task IDs", "desc") + result = models.add_decision_if_new(conn, "p1", "decision", "use uuid for task ids", "other desc") + assert result is None + assert len(models.get_decisions(conn, "p1")) == 1 + + +def test_add_decision_if_new_allows_same_title_different_type(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "gotcha", "SQLite WAL", "desc") + result = models.add_decision_if_new(conn, "p1", "convention", "SQLite WAL", "other desc") + assert result is not None + assert len(models.get_decisions(conn, "p1")) == 2 + + +def test_add_decision_if_new_skips_whitespace_duplicate(conn): + models.create_project(conn, "p1", "P1", "/p1") + models.add_decision(conn, "p1", "convention", "Run tests after each change", "desc") + result = models.add_decision_if_new(conn, "p1", "convention", " Run tests after each change ", "desc2") + assert result is None + assert len(models.get_decisions(conn, "p1")) == 1 diff --git a/tests/test_runner.py b/tests/test_runner.py index bd7ac9b..1e10e06 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -6,7 +6,10 @@ import pytest from unittest.mock import patch, MagicMock from core.db import init_db from core import models -from agents.runner import run_agent, run_pipeline, run_audit, _try_parse_json +from agents.runner import ( + run_agent, run_pipeline, run_audit, _try_parse_json, _run_learning_extraction, + _build_claude_env, _resolve_claude_cmd, _EXTRA_PATH_DIRS, _run_autocommit, +) @pytest.fixture @@ -155,8 +158,9 @@ class TestRunAgent: # --------------------------------------------------------------------------- class TestRunPipeline: + @patch("agents.runner._run_autocommit") # gotcha #41: мокируем в тестах не о autocommit @patch("agents.runner.subprocess.run") - def test_successful_pipeline(self, mock_run, conn): + def test_successful_pipeline(self, mock_run, mock_autocommit, conn): mock_run.return_value = _mock_claude_success({"result": "done"}) steps = [ @@ -298,13 +302,13 @@ class TestAutoMode: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_generates_followups(self, mock_run, mock_hooks, mock_followup, conn): - """Auto mode должен вызывать generate_followups после task_auto_approved.""" + """Auto_complete mode должен вызывать generate_followups (последний шаг — tester).""" mock_run.return_value = _mock_claude_success({"result": "done"}) mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -334,15 +338,15 @@ class TestAutoMode: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_skips_followups_for_followup_tasks(self, mock_run, mock_hooks, mock_followup, conn): - """Auto mode НЕ должен генерировать followups для followup-задач (предотвращение рекурсии).""" + """Auto_complete mode НЕ должен генерировать followups для followup-задач (предотвращение рекурсии).""" mock_run.return_value = _mock_claude_success({"result": "done"}) mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") models.update_task(conn, "VDOL-001", brief={"source": "followup:VDOL-000"}) - steps = [{"role": "debugger", "brief": "find"}] + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -352,13 +356,13 @@ class TestAutoMode: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_fires_task_done_event(self, mock_run, mock_hooks, mock_followup, conn): - """Auto mode должен вызывать run_hooks с event='task_done' после task_auto_approved.""" + """Auto_complete mode должен вызывать run_hooks с event='task_done' (последний шаг — tester).""" mock_run.return_value = _mock_claude_success({"result": "done"}) mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -371,7 +375,7 @@ class TestAutoMode: @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") def test_auto_mode_resolves_pending_actions(self, mock_run, mock_hooks, mock_followup, mock_resolve, conn): - """Auto mode должен авто-резолвить pending_actions из followup generation.""" + """Auto_complete mode должен авто-резолвить pending_actions (последний шаг — tester).""" mock_run.return_value = _mock_claude_success({"result": "done"}) mock_hooks.return_value = [] @@ -380,8 +384,8 @@ class TestAutoMode: mock_followup.return_value = {"created": [], "pending_actions": pending} mock_resolve.return_value = [{"resolved": "rerun", "result": {}}] - models.update_project(conn, "vdol", execution_mode="auto") - steps = [{"role": "debugger", "brief": "find"}] + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}] result = run_pipeline(conn, "VDOL-001", steps) assert result["success"] is True @@ -393,10 +397,12 @@ class TestAutoMode: # --------------------------------------------------------------------------- class TestRetryOnPermissionError: + @patch("agents.runner._run_autocommit") + @patch("agents.runner._run_learning_extraction") @patch("core.followup.generate_followups") @patch("agents.runner.run_hooks") @patch("agents.runner.subprocess.run") - def test_retry_on_permission_error_auto_mode(self, mock_run, mock_hooks, mock_followup, conn): + def test_retry_on_permission_error_auto_mode(self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn): """Auto mode: retry при permission error должен срабатывать.""" permission_fail = _mock_claude_failure("permission denied: cannot write file") retry_success = _mock_claude_success({"result": "fixed"}) @@ -404,8 +410,9 @@ class TestRetryOnPermissionError: mock_run.side_effect = [permission_fail, retry_success] mock_hooks.return_value = [] mock_followup.return_value = {"created": [], "pending_actions": []} + mock_learn.return_value = {"added": 0, "skipped": 0} - models.update_project(conn, "vdol", execution_mode="auto") + models.update_project(conn, "vdol", execution_mode="auto_complete") steps = [{"role": "debugger", "brief": "find"}] result = run_pipeline(conn, "VDOL-001", steps) @@ -472,12 +479,13 @@ class TestNonInteractive: call_kwargs = mock_run.call_args[1] assert call_kwargs.get("stdin") == subprocess.DEVNULL + @patch.dict("os.environ", {"KIN_AGENT_TIMEOUT": ""}, clear=False) @patch("agents.runner.subprocess.run") - def test_noninteractive_uses_300s_timeout(self, mock_run, conn): + def test_noninteractive_uses_600s_timeout(self, mock_run, conn): mock_run.return_value = _mock_claude_success({"result": "ok"}) run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=True) call_kwargs = mock_run.call_args[1] - assert call_kwargs.get("timeout") == 300 + assert call_kwargs.get("timeout") == 600 @patch.dict("os.environ", {"KIN_NONINTERACTIVE": ""}) @patch("agents.runner.subprocess.run") @@ -504,7 +512,16 @@ class TestNonInteractive: run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False) call_kwargs = mock_run.call_args[1] assert call_kwargs.get("stdin") == subprocess.DEVNULL - assert call_kwargs.get("timeout") == 300 + assert call_kwargs.get("timeout") == 600 + + @patch.dict("os.environ", {"KIN_AGENT_TIMEOUT": "900"}) + @patch("agents.runner.subprocess.run") + def test_custom_timeout_via_env_var(self, mock_run, conn): + """KIN_AGENT_TIMEOUT overrides the default 600s timeout.""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 900 @patch("agents.runner.subprocess.run") def test_allow_write_adds_skip_permissions(self, mock_run, conn): @@ -751,3 +768,786 @@ class TestSilentFailedDiagnostics: assert result["success"] is True assert result.get("error") is None + + +# --------------------------------------------------------------------------- +# Auto-learning: _run_learning_extraction +# --------------------------------------------------------------------------- + +class TestRunLearningExtraction: + @patch("agents.runner.subprocess.run") + def test_extracts_and_saves_decisions(self, mock_run, conn): + """Успешный сценарий: learner возвращает JSON с decisions, они сохраняются в БД.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "title": "SQLite WAL mode needed", "description": "Without WAL concurrent reads fail", "tags": ["sqlite", "db"]}, + {"type": "convention", "title": "Always run tests after change", "description": "Prevents regressions", "tags": ["testing"]}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [ + {"role": "debugger", "raw_output": "Found issue with sqlite concurrent access"}, + ] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 2 + assert result["skipped"] == 0 + + decisions = conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall() + assert len(decisions) == 2 + titles = {d["title"] for d in decisions} + assert "SQLite WAL mode needed" in titles + assert "Always run tests after change" in titles + + @patch("agents.runner.subprocess.run") + def test_skips_duplicate_decisions(self, mock_run, conn): + """Дедупликация: если decision с таким title+type уже есть, пропускается.""" + from core import models as m + m.add_decision(conn, "vdol", "gotcha", "SQLite WAL mode needed", "existing desc") + + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "title": "SQLite WAL mode needed", "description": "duplicate", "tags": []}, + {"type": "convention", "title": "New convention here", "description": "new desc", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [{"role": "tester", "raw_output": "test output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 1 + assert result["skipped"] == 1 + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 2 + + @patch("agents.runner.subprocess.run") + def test_limits_to_5_decisions(self, mock_run, conn): + """Learner не должен сохранять более 5 decisions даже если агент вернул больше.""" + decisions_list = [ + {"type": "decision", "title": f"Decision {i}", "description": f"desc {i}", "tags": []} + for i in range(8) + ] + learner_output = json.dumps({"decisions": decisions_list}) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [{"role": "architect", "raw_output": "long output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 5 + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 5 + + @patch("agents.runner.subprocess.run") + def test_non_json_output_returns_error(self, mock_run, conn): + """Если learner вернул не-JSON, функция возвращает error, не бросает исключение.""" + mock_run.return_value = _mock_claude_success({"result": "plain text, not json"}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 0 + assert "error" in result + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0 + + @patch("agents.runner.subprocess.run") + def test_decisions_linked_to_task(self, mock_run, conn): + """Сохранённые decisions должны быть привязаны к task_id.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "title": "Important gotcha", "description": "desc", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + d = conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchone() + assert d["task_id"] == "VDOL-001" + + @patch("agents.runner._run_learning_extraction") + @patch("agents.runner.subprocess.run") + def test_pipeline_triggers_learning_after_completion(self, mock_run, mock_learn, conn): + """run_pipeline должен вызывать _run_learning_extraction после успешного завершения.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_learn.return_value = {"added": 1, "skipped": 0} + + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + mock_learn.assert_called_once() + call_args = mock_learn.call_args[0] + assert call_args[1] == "VDOL-001" # task_id + assert call_args[2] == "vdol" # project_id + + @patch("agents.runner._run_learning_extraction") + @patch("agents.runner.subprocess.run") + def test_learning_error_does_not_break_pipeline(self, mock_run, mock_learn, conn): + """Если _run_learning_extraction бросает исключение, pipeline не падает.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_learn.side_effect = Exception("learning failed") + + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + + def test_pipeline_dry_run_skips_learning(self, conn): + """Dry run не должен вызывать _run_learning_extraction.""" + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(conn, "VDOL-001", steps, dry_run=True) + + assert result["dry_run"] is True + # No decisions saved (dry run — no DB activity) + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0 + + @patch("agents.runner.subprocess.run") + def test_empty_learner_output_returns_no_decisions(self, mock_run, conn): + """Пустой stdout от learner (subprocess вернул "") — не бросает исключение, возвращает error.""" + # Используем пустую строку как stdout (не dict), чтобы raw_output оказался пустым + mock_run.return_value = _mock_claude_success("") + + step_results = [{"role": "debugger", "raw_output": "output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 0 + assert "error" in result + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0 + + @patch("agents.runner.subprocess.run") + def test_empty_decisions_list_returns_zero_counts(self, mock_run, conn): + """Learner возвращает {"decisions": []} — added=0, skipped=0, без ошибки.""" + mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 0 + assert result["skipped"] == 0 + assert "error" not in result + + @patch("agents.runner.subprocess.run") + def test_decision_missing_title_is_skipped(self, mock_run, conn): + """Decision без title молча пропускается, не вызывает исключение.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "description": "no title here", "tags": []}, + {"type": "convention", "title": "Valid decision", "description": "desc", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 1 + assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 1 + + @patch("agents.runner.subprocess.run") + def test_decisions_field_not_list_returns_error(self, mock_run, conn): + """Если поле decisions не является списком — возвращается error dict.""" + mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": "not a list"})}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + assert result["added"] == 0 + assert "error" in result + + @patch("agents.runner.subprocess.run") + def test_logs_agent_run_to_db(self, mock_run, conn): + """KIN-060: _run_learning_extraction должна писать запись в agent_logs.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "title": "Log test", "description": "desc", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + logs = conn.execute( + "SELECT * FROM agent_logs WHERE agent_role='learner' AND project_id='vdol'" + ).fetchall() + assert len(logs) == 1 + log = logs[0] + assert log["task_id"] == "VDOL-001" + assert log["action"] == "learn" + assert log["model"] == "sonnet" + + @patch("agents.runner.subprocess.run") + def test_learner_cost_included_in_cost_summary(self, mock_run, conn): + """KIN-060: get_cost_summary() включает затраты learner-агента.""" + learner_output = json.dumps({"decisions": []}) + mock_run.return_value = _mock_claude_success({ + "result": learner_output, + "cost_usd": 0.042, + "usage": {"total_tokens": 3000}, + }) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + costs = models.get_cost_summary(conn, days=1) + assert len(costs) == 1 + assert costs[0]["project_id"] == "vdol" + assert costs[0]["total_cost_usd"] == pytest.approx(0.042) + assert costs[0]["total_tokens"] == 3000 + + # ----------------------------------------------------------------------- + # KIN-061: Regression — валидация поля type в decision + # ----------------------------------------------------------------------- + + @patch("agents.runner.subprocess.run") + def test_valid_type_gotcha_is_saved_as_is(self, mock_run, conn): + """KIN-061: валидный тип 'gotcha' сохраняется без изменений.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "gotcha", "title": "Use WAL mode", "description": "Concurrent reads need WAL", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}]) + + assert result["added"] == 1 + d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone() + assert d["type"] == "gotcha" + + @patch("agents.runner.subprocess.run") + def test_invalid_type_falls_back_to_decision(self, mock_run, conn): + """KIN-061: невалидный тип 'unknown_type' заменяется на 'decision'.""" + learner_output = json.dumps({ + "decisions": [ + {"type": "unknown_type", "title": "Some title", "description": "Some desc", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}]) + + assert result["added"] == 1 + d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone() + assert d["type"] == "decision" + + @patch("agents.runner.subprocess.run") + def test_missing_type_falls_back_to_decision(self, mock_run, conn): + """KIN-061: отсутствующий ключ 'type' в decision заменяется на 'decision'.""" + learner_output = json.dumps({ + "decisions": [ + {"title": "No type key here", "description": "desc without type", "tags": []}, + ] + }) + mock_run.return_value = _mock_claude_success({"result": learner_output}) + + result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}]) + + assert result["added"] == 1 + d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone() + assert d["type"] == "decision" + + + # ----------------------------------------------------------------------- + # KIN-062: KIN_LEARNER_TIMEOUT — отдельный таймаут для learner-агента + # ----------------------------------------------------------------------- + + @patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": ""}, clear=False) + @patch("agents.runner.subprocess.run") + def test_learner_uses_120s_default_timeout(self, mock_run, conn): + """KIN-062: по умолчанию learner использует таймаут 120s (KIN_LEARNER_TIMEOUT не задан).""" + mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 120 + + @patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": "300"}, clear=False) + @patch("agents.runner.subprocess.run") + def test_learner_uses_custom_timeout_from_env(self, mock_run, conn): + """KIN-062: KIN_LEARNER_TIMEOUT переопределяет дефолтный таймаут learner-агента.""" + mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 300 + + @patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": "60", "KIN_AGENT_TIMEOUT": "900"}, clear=False) + @patch("agents.runner.subprocess.run") + def test_learner_timeout_independent_of_agent_timeout(self, mock_run, conn): + """KIN-062: KIN_LEARNER_TIMEOUT не зависит от KIN_AGENT_TIMEOUT.""" + mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})}) + + step_results = [{"role": "debugger", "raw_output": "output"}] + _run_learning_extraction(conn, "VDOL-001", "vdol", step_results) + + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 60 + + +# --------------------------------------------------------------------------- +# KIN-056: Regression — web path timeout parity with CLI +# --------------------------------------------------------------------------- + +class TestRegressionKIN056: + """Регрессионные тесты KIN-056: агенты таймаутили через 300s из web, но не из CLI. + + Причина: noninteractive режим использовал timeout=300s. + Web API всегда устанавливает KIN_NONINTERACTIVE=1, поэтому таймаут был 300s. + Фикс: единый timeout=600s независимо от noninteractive (переопределяется KIN_AGENT_TIMEOUT). + + Каждый тест ПАДАЛ бы со старым кодом (timeout=300 для noninteractive) + и ПРОХОДИТ после фикса. + """ + + @patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""}) + @patch("agents.runner.subprocess.run") + def test_web_noninteractive_env_does_not_use_300s(self, mock_run, conn): + """Web путь устанавливает KIN_NONINTERACTIVE=1. До фикса это давало timeout=300s.""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") != 300, ( + "Регрессия KIN-056: timeout не должен быть 300s в noninteractive режиме" + ) + + @patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""}) + @patch("agents.runner.subprocess.run") + def test_web_noninteractive_timeout_is_600(self, mock_run, conn): + """Web путь: KIN_NONINTERACTIVE=1 → timeout = 600s (не 300s).""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 600 + + @patch("agents.runner.subprocess.run") + def test_web_and_cli_paths_use_same_timeout(self, mock_run, conn): + """Таймаут через web-путь (KIN_NONINTERACTIVE=1) == таймаут CLI (noninteractive=True).""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + + # Web path: env var KIN_NONINTERACTIVE=1, noninteractive param not set + with patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""}): + run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False) + web_timeout = mock_run.call_args[1].get("timeout") + + mock_run.reset_mock() + + # CLI path: noninteractive=True, no env var + with patch.dict("os.environ", {"KIN_NONINTERACTIVE": "", "KIN_AGENT_TIMEOUT": ""}): + run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=True) + cli_timeout = mock_run.call_args[1].get("timeout") + + assert web_timeout == cli_timeout, ( + f"Таймаут web ({web_timeout}s) != CLI ({cli_timeout}s) — регрессия KIN-056" + ) + + @patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": "900"}) + @patch("agents.runner.subprocess.run") + def test_web_noninteractive_respects_kin_agent_timeout_override(self, mock_run, conn): + """Web путь: KIN_AGENT_TIMEOUT переопределяет дефолтный таймаут даже при KIN_NONINTERACTIVE=1.""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + assert call_kwargs.get("timeout") == 900 + + +# --------------------------------------------------------------------------- +# KIN-057: claude CLI в PATH при запуске через launchctl +# --------------------------------------------------------------------------- + +class TestClaudePath: + """Регрессионные тесты KIN-057: launchctl-демоны могут не видеть claude в PATH.""" + + def test_build_claude_env_contains_extra_paths(self): + """_build_claude_env должен добавить /opt/homebrew/bin и /usr/local/bin в PATH.""" + env = _build_claude_env() + path_dirs = env["PATH"].split(":") + for extra_dir in _EXTRA_PATH_DIRS: + assert extra_dir in path_dirs, ( + f"Регрессия KIN-057: {extra_dir} не найден в PATH, сгенерированном _build_claude_env" + ) + + def test_build_claude_env_no_duplicate_paths(self): + """_build_claude_env не должен дублировать уже существующие пути.""" + env = _build_claude_env() + path_dirs = env["PATH"].split(":") + seen = set() + for d in path_dirs: + assert d not in seen, f"Дублирующийся PATH entry: {d}" + seen.add(d) + + def test_build_claude_env_preserves_existing_path(self): + """_build_claude_env должен сохранять уже существующие пути.""" + with patch.dict("os.environ", {"PATH": "/custom/bin:/usr/bin:/bin"}): + env = _build_claude_env() + path_dirs = env["PATH"].split(":") + assert "/custom/bin" in path_dirs + assert "/usr/bin" in path_dirs + + def test_resolve_claude_cmd_returns_string(self): + """_resolve_claude_cmd должен всегда возвращать строку.""" + cmd = _resolve_claude_cmd() + assert isinstance(cmd, str) + assert len(cmd) > 0 + + def test_resolve_claude_cmd_fallback_when_not_found(self): + """_resolve_claude_cmd должен вернуть 'claude' если CLI не найден в PATH.""" + with patch("agents.runner.shutil.which", return_value=None): + cmd = _resolve_claude_cmd() + assert cmd == "claude" + + def test_resolve_claude_cmd_returns_full_path_when_found(self): + """_resolve_claude_cmd должен вернуть полный путь если claude найден.""" + with patch("agents.runner.shutil.which", return_value="/opt/homebrew/bin/claude"): + cmd = _resolve_claude_cmd() + assert cmd == "/opt/homebrew/bin/claude" + + @patch("agents.runner.subprocess.run") + def test_run_claude_passes_env_to_subprocess(self, mock_run, conn): + """_run_claude должен передавать env= в subprocess.run (а не наследовать голый PATH).""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + assert "env" in call_kwargs, ( + "Регрессия KIN-057: subprocess.run должен получать явный env с расширенным PATH" + ) + assert call_kwargs["env"] is not None + + @patch("agents.runner.subprocess.run") + def test_run_claude_env_has_homebrew_in_path(self, mock_run, conn): + """env переданный в subprocess.run должен содержать /opt/homebrew/bin в PATH.""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + run_agent(conn, "debugger", "VDOL-001", "vdol") + call_kwargs = mock_run.call_args[1] + env = call_kwargs.get("env", {}) + assert "/opt/homebrew/bin" in env.get("PATH", ""), ( + "Регрессия KIN-057: /opt/homebrew/bin не найден в env['PATH'] subprocess.run" + ) + + @patch("agents.runner.subprocess.run") + def test_file_not_found_returns_127(self, mock_run, conn): + """Если claude не найден (FileNotFoundError), должен вернуться returncode 127.""" + mock_run.side_effect = FileNotFoundError("claude not found") + result = run_agent(conn, "debugger", "VDOL-001", "vdol") + assert result["success"] is False + assert "not found" in (result.get("error") or "").lower() + + @patch.dict("os.environ", {"PATH": ""}) + def test_launchctl_empty_path_build_env_adds_extra_dirs(self): + """Регрессия KIN-057: когда launchctl запускает с пустым PATH, + _build_claude_env должен добавить _EXTRA_PATH_DIRS чтобы claude был доступен. + Без фикса: os.environ["PATH"]="" → shutil.which("claude") → None → FileNotFoundError. + После фикса: _build_claude_env строит PATH с /opt/homebrew/bin и др. + """ + env = _build_claude_env() + path_dirs = env["PATH"].split(":") + # Явная проверка каждой критичной директории + for extra_dir in _EXTRA_PATH_DIRS: + assert extra_dir in path_dirs, ( + f"KIN-057: при пустом os PATH директория {extra_dir} должна быть добавлена" + ) + + @patch.dict("os.environ", {"PATH": ""}) + def test_launchctl_empty_path_shutil_which_fails_without_fix(self): + """Воспроизводит сломанное поведение: при PATH='' shutil.which возвращает None. + Это точно то, что происходило до фикса — launchctl не видел claude. + Тест документирует, ПОЧЕМУ нужен _build_claude_env вместо прямого os.environ. + """ + import shutil + # Без фикса: поиск с пустым PATH не найдёт claude + result_without_fix = shutil.which("claude", path="") + assert result_without_fix is None, ( + "Если этот assert упал — shutil.which нашёл claude в пустом PATH, " + "что невозможно. Ожидаем None — именно поэтому нужен _build_claude_env." + ) + # С фиксом: _resolve_claude_cmd строит расширенный PATH и находит claude + # (или возвращает fallback "claude", но не бросает FileNotFoundError) + cmd = _resolve_claude_cmd() + assert isinstance(cmd, str) and len(cmd) > 0, ( + "KIN-057: _resolve_claude_cmd должен возвращать строку даже при пустом os PATH" + ) + + +# --------------------------------------------------------------------------- +# KIN-063: TestCompletionMode — auto_complete + last-step role check +# --------------------------------------------------------------------------- + +class TestCompletionMode: + """auto_complete mode срабатывает только если последний шаг — tester или reviewer.""" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_complete_with_tester_last_sets_done(self, mock_run, mock_hooks, mock_followup, conn): + """auto_complete + последний шаг tester → status=done (Decision #29).""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_complete_with_reviewer_last_sets_done(self, mock_run, mock_hooks, mock_followup, conn): + """auto_complete + последний шаг reviewer → status=done.""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "developer", "brief": "fix"}, {"role": "reviewer", "brief": "review"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done" + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_complete_without_tester_last_sets_review(self, mock_run, mock_hooks, mock_followup, conn): + """auto_complete + последний шаг НЕ tester/reviewer → status=review (Decision #29).""" + mock_run.return_value = _mock_claude_success({"result": "ok"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + steps = [{"role": "developer", "brief": "fix"}, {"role": "debugger", "brief": "debug"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review", ( + "Регрессия KIN-063: auto_complete без tester/reviewer последним НЕ должен авто-завершать" + ) + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_legacy_auto_mode_value_not_recognized(self, mock_run, mock_hooks, mock_followup, conn): + """Регрессия: старое значение 'auto' больше не является валидным режимом. + + После KIN-063 'auto' → 'auto_complete'. Если в DB осталось 'auto' (без миграции), + runner НЕ должен авто-завершать — это 'review'-ветка (безопасный fallback). + (Decision #29) + """ + mock_run.return_value = _mock_claude_success({"result": "ok"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Прямой SQL-апдейт, обходя validate_completion_mode, чтобы симулировать + # старую запись в БД без миграции + conn.execute("UPDATE projects SET execution_mode='auto' WHERE id='vdol'") + conn.commit() + steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review", ( + "Регрессия: 'auto' (старый формат) не должен срабатывать как auto_complete" + ) + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_with_tester_last_keeps_task_in_review(self, mock_run, mock_hooks, mock_followup, conn): + """review mode + последний шаг tester → task.status == 'review', НЕ done (ждёт ручного approve).""" + mock_run.return_value = _mock_claude_success({"result": "all tests pass"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Проект и задача остаются в дефолтном 'review' mode + steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review" + assert task["status"] != "done", ( + "KIN-063: review mode не должен авто-завершать задачу даже если tester последний" + ) + + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_project_review_overrides_no_task_completion_mode(self, mock_run, mock_hooks, mock_followup, conn): + """Project execution_mode='review' + задача без override → pipeline завершается в 'review'. + + Сценарий: PM выбрал auto_complete, но проект настроен на 'review' (ручной override человека). + Задача не имеет task-level execution_mode, поэтому get_effective_mode возвращает project-level 'review'. + """ + mock_run.return_value = _mock_claude_success({"result": "ok"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + # Проект явно в 'review', задача без execution_mode + models.update_project(conn, "vdol", execution_mode="review") + # task VDOL-001 создана без execution_mode (None) — fixture + steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + assert result["mode"] == "review" + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review", ( + "KIN-063: project-level 'review' должен применяться когда задача не имеет override" + ) + + +# --------------------------------------------------------------------------- +# KIN-048: _run_autocommit — флаг, git path, env= +# --------------------------------------------------------------------------- + +class TestAutocommit: + """KIN-048: _run_autocommit — autocommit_enabled флаг, shutil.which, env= regression.""" + + def test_disabled_project_skips_subprocess(self, conn): + """autocommit_enabled=0 (дефолт) → subprocess не вызывается.""" + with patch("agents.runner.subprocess.run") as mock_run: + _run_autocommit(conn, "VDOL-001", "vdol") + mock_run.assert_not_called() + + @patch("agents.runner.subprocess.run") + @patch("agents.runner.shutil.which") + def test_enabled_calls_git_add_and_commit(self, mock_which, mock_run, conn, tmp_path): + """autocommit_enabled=1 → вызываются git add -A и git commit с task_id и title.""" + mock_which.return_value = "/usr/bin/git" + mock_run.return_value = MagicMock(returncode=0) + models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path)) + + _run_autocommit(conn, "VDOL-001", "vdol") + + assert mock_run.call_count == 2 + add_cmd = mock_run.call_args_list[0][0][0] + assert add_cmd == ["/usr/bin/git", "add", "-A"] + commit_cmd = mock_run.call_args_list[1][0][0] + assert commit_cmd[0] == "/usr/bin/git" + assert commit_cmd[1] == "commit" + assert "VDOL-001" in commit_cmd[-1] + assert "Fix bug" in commit_cmd[-1] + + @patch("agents.runner.subprocess.run") + def test_nothing_to_commit_no_exception(self, mock_run, conn, tmp_path): + """returncode=1 (nothing to commit) → исключение не бросается.""" + mock_run.return_value = MagicMock(returncode=1) + models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path)) + + _run_autocommit(conn, "VDOL-001", "vdol") # must not raise + + @patch("agents.runner.subprocess.run") + def test_passes_env_to_subprocess(self, mock_run, conn, tmp_path): + """Regression #33: env= должен передаваться в subprocess.run.""" + mock_run.return_value = MagicMock(returncode=0) + models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path)) + + _run_autocommit(conn, "VDOL-001", "vdol") + + for call in mock_run.call_args_list: + kwargs = call[1] + assert "env" in kwargs, "Regression #33: subprocess.run должен получать env=" + assert "/opt/homebrew/bin" in kwargs["env"].get("PATH", "") + + @patch("agents.runner.subprocess.run") + @patch("agents.runner.shutil.which") + def test_resolves_git_via_shutil_which(self, mock_which, mock_run, conn, tmp_path): + """Regression #32: git резолвится через shutil.which, а не hardcoded 'git'.""" + mock_which.return_value = "/opt/homebrew/bin/git" + mock_run.return_value = MagicMock(returncode=0) + models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path)) + + _run_autocommit(conn, "VDOL-001", "vdol") + + git_which_calls = [c for c in mock_which.call_args_list if c[0][0] == "git"] + assert len(git_which_calls) > 0, "Regression #32: shutil.which должен вызываться для git" + first_cmd = mock_run.call_args_list[0][0][0] + assert first_cmd[0] == "/opt/homebrew/bin/git" + + @patch("agents.runner.subprocess.run") + @patch("agents.runner.shutil.which") + def test_git_not_found_no_crash_logs_warning(self, mock_which, mock_run, conn, tmp_path): + """shutil.which(git) → None → fallback 'git' → FileNotFoundError → no crash, WARNING logged.""" + mock_which.return_value = None # git не найден в PATH + mock_run.side_effect = FileNotFoundError("git: command not found") + models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path)) + + with patch("agents.runner._logger") as mock_logger: + _run_autocommit(conn, "VDOL-001", "vdol") # не должен бросать исключение + + mock_logger.warning.assert_called_once() + + @patch("agents.runner._run_autocommit") + @patch("agents.runner.subprocess.run") + def test_autocommit_not_called_on_failed_pipeline(self, mock_run, mock_autocommit, conn): + """Pipeline failure → _run_autocommit must NOT be called (gotcha #41).""" + mock_run.return_value = _mock_claude_failure("compilation error") + + steps = [{"role": "debugger", "brief": "find"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is False + mock_autocommit.assert_not_called() + + +# --------------------------------------------------------------------------- +# KIN-055: execution_mode='review' при переводе задачи в статус review +# --------------------------------------------------------------------------- + +class TestReviewModeExecutionMode: + """Регрессия KIN-055: execution_mode должен быть 'review', а не NULL после pipeline в review mode.""" + + def test_task_execution_mode_is_null_before_pipeline(self, conn): + """Граничный случай: execution_mode IS NULL до запуска pipeline (задача только создана).""" + task = models.get_task(conn, "VDOL-001") + assert task["execution_mode"] is None, ( + "Задача должна иметь NULL execution_mode до выполнения pipeline" + ) + + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_sets_execution_mode_review(self, mock_run, mock_hooks, conn): + """После pipeline в review mode task.execution_mode должно быть 'review', а не NULL.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + + steps = [{"role": "debugger", "brief": "find bug"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "review" + # Регрессионная проверка KIN-055: execution_mode не должен быть NULL + assert task["execution_mode"] is not None, ( + "Регрессия KIN-055: execution_mode не должен быть NULL после перевода задачи в статус review" + ) + assert task["execution_mode"] == "review" + + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_review_mode_execution_mode_persisted_in_db(self, mock_run, mock_hooks, conn): + """execution_mode='review' должно сохраняться в SQLite напрямую, минуя ORM-слой.""" + mock_run.return_value = _mock_claude_success({"result": "done"}) + mock_hooks.return_value = [] + + steps = [{"role": "debugger", "brief": "find"}] + run_pipeline(conn, "VDOL-001", steps) + + row = conn.execute( + "SELECT execution_mode FROM tasks WHERE id='VDOL-001'" + ).fetchone() + assert row is not None + assert row["execution_mode"] == "review", ( + "Регрессия KIN-055: execution_mode должен быть 'review' в SQLite после pipeline" + ) diff --git a/web/api.py b/web/api.py index 367063c..3fe79c7 100644 --- a/web/api.py +++ b/web/api.py @@ -3,6 +3,8 @@ Kin Web API — FastAPI backend reading ~/.kin/kin.db via core.models. Run: uvicorn web.api:app --reload --port 8420 """ +import logging +import shutil import subprocess import sys from pathlib import Path @@ -18,6 +20,7 @@ from pydantic import BaseModel from core.db import init_db from core import models +from core.models import VALID_COMPLETION_MODES from agents.bootstrap import ( detect_tech_stack, detect_modules, extract_decisions_from_claude_md, find_vault_root, scan_obsidian, save_to_db, @@ -25,6 +28,62 @@ from agents.bootstrap import ( DB_PATH = Path.home() / ".kin" / "kin.db" +_logger = logging.getLogger("kin") + +# --------------------------------------------------------------------------- +# Startup: verify claude CLI is available in PATH +# --------------------------------------------------------------------------- + +def _check_claude_available() -> None: + """Warn at startup if claude CLI cannot be found in PATH. + + launchctl daemons run with a stripped environment and may not see + /opt/homebrew/bin where claude is typically installed. + See Decision #28. + """ + from agents.runner import _build_claude_env # avoid circular import at module level + env = _build_claude_env() + claude_path = shutil.which("claude", path=env["PATH"]) + if claude_path: + _logger.info("claude CLI found: %s", claude_path) + else: + _logger.warning( + "WARNING: claude CLI not found in PATH (%s). " + "Agent pipelines will fail with returncode 127. " + "Fix: add /opt/homebrew/bin to EnvironmentVariables.PATH in " + "~/Library/LaunchAgents/com.kin.api.plist and reload with: " + "launchctl unload ~/Library/LaunchAgents/com.kin.api.plist && " + "launchctl load ~/Library/LaunchAgents/com.kin.api.plist", + env.get("PATH", ""), + ) + + +def _check_git_available() -> None: + """Warn at startup if git cannot be found in PATH. + + launchctl daemons run with a stripped environment and may not see + git in the standard directories. See Decision #28. + """ + from agents.runner import _build_claude_env # avoid circular import at module level + env = _build_claude_env() + git_path = shutil.which("git", path=env["PATH"]) + if git_path: + _logger.info("git found: %s", git_path) + else: + _logger.warning( + "WARNING: git not found in PATH (%s). " + "Autocommit will fail silently. " + "Fix: add git directory to EnvironmentVariables.PATH in " + "~/Library/LaunchAgents/com.kin.api.plist and reload with: " + "launchctl unload ~/Library/LaunchAgents/com.kin.api.plist && " + "launchctl load ~/Library/LaunchAgents/com.kin.api.plist", + env.get("PATH", ""), + ) + + +_check_claude_available() +_check_git_available() + app = FastAPI(title="Kin API", version="0.1.0") app.add_middleware( @@ -162,7 +221,7 @@ class TaskPatch(BaseModel): VALID_STATUSES = set(models.VALID_TASK_STATUSES) -VALID_EXECUTION_MODES = {"auto", "review"} +VALID_EXECUTION_MODES = VALID_COMPLETION_MODES @app.patch("/api/tasks/{task_id}") @@ -361,6 +420,7 @@ def run_task(task_id: str): cmd, cwd=str(kin_root), stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL, env=env, ) From 756f9e65abd8700feefee671455657ba7b36bd7f Mon Sep 17 00:00:00 2001 From: Gros Frumos Date: Mon, 16 Mar 2026 07:06:34 +0200 Subject: [PATCH 02/57] =?UTF-8?q?kin:=20KIN-054=20=D0=98=D1=81=D0=BF=D1=80?= =?UTF-8?q?=D0=B0=D0=B2=D0=B8=D1=82=D1=8C=20race=20condition=20=D0=B2=20lo?= =?UTF-8?q?adMode()=20=D0=BF=D1=80=D0=B8=20=D0=B8=D0=BD=D0=B8=D1=86=D0=B8?= =?UTF-8?q?=D0=B0=D0=BB=D0=B8=D0=B7=D0=B0=D1=86=D0=B8=D0=B8=20ProjectView?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- agents/runner.py | 10 +++++++--- tests/test_runner.py | 10 ++++++++-- web/api.py | 14 +++++++++++--- web/frontend/src/api.ts | 3 ++- web/frontend/src/views/ProjectView.vue | 20 +++++++++++++++++++- 5 files changed, 47 insertions(+), 10 deletions(-) diff --git a/agents/runner.py b/agents/runner.py index 04a1615..cf8b02d 100644 --- a/agents/runner.py +++ b/agents/runner.py @@ -47,9 +47,13 @@ def _build_claude_env() -> dict: if bin_dir.is_dir(): extra.append(str(bin_dir)) - seen = set(existing) - new_dirs = [d for d in extra if d and d not in seen] - env["PATH"] = ":".join(new_dirs + existing) + seen: set[str] = set() + deduped: list[str] = [] + for d in extra + existing: + if d and d not in seen: + seen.add(d) + deduped.append(d) + env["PATH"] = ":".join(deduped) return env diff --git a/tests/test_runner.py b/tests/test_runner.py index 1e10e06..720a870 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -1173,8 +1173,14 @@ class TestClaudePath: ) def test_build_claude_env_no_duplicate_paths(self): - """_build_claude_env не должен дублировать уже существующие пути.""" - env = _build_claude_env() + """_build_claude_env не должен дублировать уже существующие пути. + + Мокируем PATH на фиксированное значение, чтобы тест не зависел от + реального окружения (решение #48). + """ + fixed_path = "/usr/bin:/bin" + with patch.dict("os.environ", {"PATH": fixed_path}, clear=False): + env = _build_claude_env() path_dirs = env["PATH"].split(":") seen = set() for d in path_dirs: diff --git a/web/api.py b/web/api.py index 3fe79c7..4ac7a5c 100644 --- a/web/api.py +++ b/web/api.py @@ -136,19 +136,27 @@ class ProjectCreate(BaseModel): class ProjectPatch(BaseModel): - execution_mode: str + execution_mode: str | None = None + autocommit_enabled: bool | None = None @app.patch("/api/projects/{project_id}") def patch_project(project_id: str, body: ProjectPatch): - if body.execution_mode not in VALID_EXECUTION_MODES: + if body.execution_mode is None and body.autocommit_enabled is None: + raise HTTPException(400, "Nothing to update. Provide execution_mode or autocommit_enabled.") + if body.execution_mode is not None and body.execution_mode not in VALID_EXECUTION_MODES: raise HTTPException(400, f"Invalid execution_mode '{body.execution_mode}'. Must be one of: {', '.join(VALID_EXECUTION_MODES)}") conn = get_conn() p = models.get_project(conn, project_id) if not p: conn.close() raise HTTPException(404, f"Project '{project_id}' not found") - models.update_project(conn, project_id, execution_mode=body.execution_mode) + fields = {} + if body.execution_mode is not None: + fields["execution_mode"] = body.execution_mode + if body.autocommit_enabled is not None: + fields["autocommit_enabled"] = int(body.autocommit_enabled) + models.update_project(conn, project_id, **fields) p = models.get_project(conn, project_id) conn.close() return p diff --git a/web/frontend/src/api.ts b/web/frontend/src/api.ts index d4b8274..080bfcf 100644 --- a/web/frontend/src/api.ts +++ b/web/frontend/src/api.ts @@ -40,6 +40,7 @@ export interface Project { priority: number tech_stack: string[] | null execution_mode: string | null + autocommit_enabled: number | null created_at: string total_tasks: number done_tasks: number @@ -169,7 +170,7 @@ export const api = { post<{ updated: string[]; count: number }>(`/projects/${projectId}/audit/apply`, { task_ids: taskIds }), patchTask: (id: string, data: { status?: string; execution_mode?: string }) => patch(`/tasks/${id}`, data), - patchProject: (id: string, data: { execution_mode: string }) => + patchProject: (id: string, data: { execution_mode?: string; autocommit_enabled?: boolean }) => patch(`/projects/${id}`, data), deleteDecision: (projectId: string, decisionId: number) => del<{ deleted: number }>(`/projects/${projectId}/decisions/${decisionId}`), diff --git a/web/frontend/src/views/ProjectView.vue b/web/frontend/src/views/ProjectView.vue index 744fdf2..0f5877b 100644 --- a/web/frontend/src/views/ProjectView.vue +++ b/web/frontend/src/views/ProjectView.vue @@ -62,6 +62,24 @@ async function toggleMode() { } } +// Autocommit toggle +const autocommit = ref(false) + +function loadAutocommit() { + autocommit.value = !!(project.value?.autocommit_enabled) +} + +async function toggleAutocommit() { + autocommit.value = !autocommit.value + try { + await api.patchProject(props.id, { autocommit_enabled: autocommit.value }) + if (project.value) project.value = { ...project.value, autocommit_enabled: autocommit.value ? 1 : 0 } + } catch (e: any) { + error.value = e.message + autocommit.value = !autocommit.value + } +} + // Audit const auditLoading = ref(false) const auditResult = ref(null) @@ -124,7 +142,7 @@ watch(selectedStatuses, (val) => { router.replace({ query: { ...route.query, status: val.length ? val.join(',') : undefined } }) }, { deep: true }) -onMounted(() => { load(); loadMode() }) +onMounted(async () => { await load(); loadMode(); loadAutocommit() }) const filteredTasks = computed(() => { if (!project.value) return [] From 67071c757d6f33912b14c1c37ffac3ecad210cf9 Mon Sep 17 00:00:00 2001 From: Gros Frumos Date: Mon, 16 Mar 2026 07:06:53 +0200 Subject: [PATCH 03/57] =?UTF-8?q?kin:=20KIN-064=20=D0=9F=D0=BE=D1=87=D0=B8?= =?UTF-8?q?=D0=BD=D0=B8=D1=82=D1=8C=20=D1=84=D0=BB=D0=B0=D0=BA=D1=83=D1=8E?= =?UTF-8?q?=D1=89=D0=B8=D0=B9=20=D1=82=D0=B5=D1=81=D1=82=20test=5Fbuild=5F?= =?UTF-8?q?claude=5Fenv=5Fno=5Fduplicate=5Fpaths?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- web/frontend/src/views/ProjectView.vue | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/web/frontend/src/views/ProjectView.vue b/web/frontend/src/views/ProjectView.vue index 0f5877b..57fae30 100644 --- a/web/frontend/src/views/ProjectView.vue +++ b/web/frontend/src/views/ProjectView.vue @@ -308,6 +308,14 @@ async function addDecision() { :title="autoMode ? 'Auto mode: agents can write files' : 'Review mode: agents read-only'"> {{ autoMode ? '🔓 Auto' : '🔒 Review' }} + + +
+
+ ⚠ Требуют ручного решения + ({{ manualEscalationTasks.length }}) +
+
+ +
+ {{ t.id }} + + {{ t.title }} + escalated from {{ t.parent_task_id }} +
+
+ {{ t.brief.description }} + pri {{ t.priority }} +
+
+
+
+
No tasks.
{{ t.assigned_role }} - pri {{ t.priority }} + + + + + +
From 4fd825dc58dd458ae2aa478a94f77d3fdc91a60c Mon Sep 17 00:00:00 2001 From: Gros Frumos Date: Mon, 16 Mar 2026 07:19:59 +0200 Subject: [PATCH 09/57] =?UTF-8?q?kin:=20KIN-013=20=D0=9D=D0=B0=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=BE=D0=B9=D0=BA=D0=B8=20=D0=B2=20GUI:=20=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=B0=D0=BD=D0=B8=D1=86=D0=B0=20Settings=20=D1=81=20?= =?UTF-8?q?=D0=BA=D0=BE=D0=BD=D1=84=D0=B8=D0=B3=D1=83=D1=80=D0=B0=D1=86?= =?UTF-8?q?=D0=B8=D0=B5=D0=B9=20=D0=BF=D1=80=D0=BE=D0=B5=D0=BA=D1=82=D0=BE?= =?UTF-8?q?=D0=B2.=20=D0=9F=D1=83=D1=82=D1=8C=20=D0=BA=20Obsidian=20vault?= =?UTF-8?q?=20=D0=B4=D0=BB=D1=8F=20=D1=81=D0=B8=D0=BD=D1=85=D1=80=D0=BE?= =?UTF-8?q?=D0=BD=D0=B8=D0=B7=D0=B0=D1=86=D0=B8=D0=B8=20decisions/tasks/ka?= =?UTF-8?q?nban.=20=D0=94=D0=B2=D1=83=D1=81=D1=82=D0=BE=D1=80=D0=BE=D0=BD?= =?UTF-8?q?=D0=BD=D0=B8=D0=B9=20sync:=20decisions=20=E2=86=92=20Obsidian?= =?UTF-8?q?=20.md,=20Obsidian=20=D1=87=D0=B5=D0=BA=D0=B1=D0=BE=D0=BA=D1=81?= =?UTF-8?q?=D1=8B=20=E2=86=92=20tasks.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/test_obsidian_sync.py | 73 ++++++++ .../src/__tests__/filter-persistence.test.ts | 168 ++++++++++++++++++ 2 files changed, 241 insertions(+) diff --git a/tests/test_obsidian_sync.py b/tests/test_obsidian_sync.py index 0b5eeea..b0e3027 100644 --- a/tests/test_obsidian_sync.py +++ b/tests/test_obsidian_sync.py @@ -15,6 +15,18 @@ from core.obsidian_sync import ( from core import models +# --------------------------------------------------------------------------- +# 0. Migration — obsidian_vault_path column must exist after init_db +# --------------------------------------------------------------------------- + +def test_migration_obsidian_vault_path_column_exists(): + """init_db создаёт или мигрирует колонку obsidian_vault_path в таблице projects.""" + conn = init_db(db_path=":memory:") + cols = {r[1] for r in conn.execute("PRAGMA table_info(projects)").fetchall()} + conn.close() + assert "obsidian_vault_path" in cols + + @pytest.fixture def tmp_vault(tmp_path): """Returns a temporary vault root directory.""" @@ -184,3 +196,64 @@ def test_sync_no_vault_path(db): # project exists but obsidian_vault_path is NULL with pytest.raises(ValueError, match="obsidian_vault_path not set"): sync_obsidian(db, "proj1") + + +# --------------------------------------------------------------------------- +# 8. export — frontmatter обёрнут в разделители --- +# --------------------------------------------------------------------------- + +def test_export_frontmatter_has_yaml_delimiters(tmp_vault): + """Экспортированный файл начинается с '---' и содержит закрывающий '---'.""" + decisions = [ + { + "id": 99, + "project_id": "p", + "type": "decision", + "category": None, + "title": "YAML Delimiter Test", + "description": "Verifying frontmatter delimiters.", + "tags": [], + "created_at": "2026-01-01", + } + ] + tmp_vault.mkdir(parents=True) + created = export_decisions_to_md("p", decisions, tmp_vault) + content = created[0].read_text(encoding="utf-8") + + assert content.startswith("---\n"), "Frontmatter должен начинаться с '---\\n'" + # первые --- открывают, вторые --- закрывают frontmatter + parts = content.split("---\n") + assert len(parts) >= 3, "Должно быть минимум два разделителя '---'" + + +# --------------------------------------------------------------------------- +# 9. sync_obsidian — несуществующий vault_path → ошибка в errors, не исключение +# --------------------------------------------------------------------------- + +def test_sync_nonexistent_vault_records_error(db, tmp_path): + """Если vault_path не существует, sync возвращает ошибку в errors без raise.""" + nonexistent = tmp_path / "ghost_vault" + models.update_project(db, "proj1", obsidian_vault_path=str(nonexistent)) + + result = sync_obsidian(db, "proj1") + + assert len(result["errors"]) > 0 + assert "does not exist" in result["errors"][0].lower() or "not exist" in result["errors"][0].lower() + assert result["exported_decisions"] == 0 + assert result["tasks_updated"] == 0 + + +# --------------------------------------------------------------------------- +# 10. sync_obsidian — пустой vault → 0 экспортов, 0 обновлений, нет ошибок +# --------------------------------------------------------------------------- + +def test_sync_empty_vault_no_errors(db, tmp_vault): + """Пустой vault (нет decisions, нет task-файлов) → exported=0, updated=0, errors=[].""" + tmp_vault.mkdir(parents=True) + models.update_project(db, "proj1", obsidian_vault_path=str(tmp_vault)) + + result = sync_obsidian(db, "proj1") + + assert result["exported_decisions"] == 0 + assert result["tasks_updated"] == 0 + assert result["errors"] == [] diff --git a/web/frontend/src/__tests__/filter-persistence.test.ts b/web/frontend/src/__tests__/filter-persistence.test.ts index e788444..bc40bf0 100644 --- a/web/frontend/src/__tests__/filter-persistence.test.ts +++ b/web/frontend/src/__tests__/filter-persistence.test.ts @@ -623,3 +623,171 @@ describe('KIN-065: ProjectView — Autocommit toggle', () => { expect(wrapper.text()).toContain('Network error') }) }) + +// ───────────────────────────────────────────────────────────── +// KIN-015: TaskDetail — Edit button и форма редактирования +// ───────────────────────────────────────────────────────────── + +describe('KIN-015: TaskDetail — Edit button и форма редактирования', () => { + function makePendingTask(overrides: Record = {}) { + return { + ...MOCK_TASK_FULL, + id: 'KIN-015', + project_id: 'KIN', + title: 'Pending Task', + status: 'pending', + priority: 5, + brief: { text: 'Описание задачи', route_type: 'feature' }, + execution_mode: null, + ...overrides, + } + } + + beforeEach(() => { + vi.mocked(api.patchTask).mockReset() + }) + + it('Кнопка Edit видна для задачи со статусом pending', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makePendingTask() as any) + const router = makeRouter() + await router.push('/task/KIN-015') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-015' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const editBtn = wrapper.findAll('button').find(b => b.text().includes('Edit')) + expect(editBtn?.exists(), 'Кнопка Edit должна быть видна для pending').toBe(true) + }) + + it('Кнопка Edit скрыта для статуса in_progress', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makePendingTask({ status: 'in_progress' }) as any) + const router = makeRouter() + await router.push('/task/KIN-015') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-015' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const editBtn = wrapper.findAll('button').find(b => b.text().includes('Edit')) + expect(editBtn?.exists(), 'Кнопка Edit не должна быть видна для in_progress').toBe(false) + }) + + it('Кнопка Edit скрыта для статуса done', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makePendingTask({ status: 'done' }) as any) + const router = makeRouter() + await router.push('/task/KIN-015') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-015' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const editBtn = wrapper.findAll('button').find(b => b.text().includes('Edit')) + expect(editBtn?.exists(), 'Кнопка Edit не должна быть видна для done').toBe(false) + }) + + it('Клик по Edit открывает форму с заполненным заголовком задачи', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makePendingTask() as any) + const router = makeRouter() + await router.push('/task/KIN-015') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-015' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const editBtn = wrapper.findAll('button').find(b => b.text().includes('Edit')) + await editBtn!.trigger('click') + await flushPromises() + + // Модал открыт — поле title (input без type) содержит текущий заголовок + const titleInput = wrapper.find('input:not([type])') + expect(titleInput.exists(), 'Поле Title должно быть видно в модале').toBe(true) + expect((titleInput.element as HTMLInputElement).value).toBe('Pending Task') + }) + + it('saveEdit вызывает patchTask только с изменёнными полями (только title)', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makePendingTask() as any) + vi.mocked(api.patchTask).mockResolvedValue(makePendingTask({ title: 'Новый заголовок' }) as any) + const router = makeRouter() + await router.push('/task/KIN-015') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-015' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const editBtn = wrapper.findAll('button').find(b => b.text().includes('Edit')) + await editBtn!.trigger('click') + await flushPromises() + + // Меняем только title + const titleInput = wrapper.find('input:not([type])') + await titleInput.setValue('Новый заголовок') + + const saveBtn = wrapper.findAll('button').find(b => b.text().includes('Save')) + await saveBtn!.trigger('click') + await flushPromises() + + expect(api.patchTask).toHaveBeenCalledWith('KIN-015', { title: 'Новый заголовок' }) + }) + + it('saveEdit не вызывает patchTask если данные не изменились', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makePendingTask() as any) + const router = makeRouter() + await router.push('/task/KIN-015') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-015' }, + global: { plugins: [router] }, + }) + await flushPromises() + + // Открываем модал без изменений + const editBtn = wrapper.findAll('button').find(b => b.text().includes('Edit')) + await editBtn!.trigger('click') + await flushPromises() + + // Сохраняем без изменений — должен тихо закрыться без API-вызова + const saveBtn = wrapper.findAll('button').find(b => b.text().includes('Save')) + await saveBtn!.trigger('click') + await flushPromises() + + expect(api.patchTask, 'patchTask не должен вызываться при пустом diff').not.toHaveBeenCalled() + }) + + it('После успешного сохранения модал закрывается', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makePendingTask() as any) + vi.mocked(api.patchTask).mockResolvedValue(makePendingTask({ title: 'Обновлённый заголовок' }) as any) + const router = makeRouter() + await router.push('/task/KIN-015') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-015' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const editBtn = wrapper.findAll('button').find(b => b.text().includes('Edit')) + await editBtn!.trigger('click') + await flushPromises() + + const titleInput = wrapper.find('input:not([type])') + await titleInput.setValue('Обновлённый заголовок') + + const saveBtn = wrapper.findAll('button').find(b => b.text().includes('Save')) + await saveBtn!.trigger('click') + await flushPromises() + + // Модал закрыт — форма с title-input больше не в DOM + expect(wrapper.find('input:not([type])').exists(), 'Форма должна закрыться после сохранения').toBe(false) + }) +}) From 01c39cc45c3949bbf0d8ae0c44c66fb37cd20b99 Mon Sep 17 00:00:00 2001 From: Gros Frumos Date: Mon, 16 Mar 2026 07:21:36 +0200 Subject: [PATCH 10/57] =?UTF-8?q?kin:=20KIN-045=20=D0=B4=D0=BE=D0=B1=D0=B0?= =?UTF-8?q?=D0=B2=D0=B8=D1=82=D1=8C=20=D0=B2=20GUI=20=D1=82=D1=80=D0=B5?= =?UTF-8?q?=D1=82=D1=8C=D1=8E=20=D0=BA=D0=BD=D0=BE=D0=BF=D0=BA=D1=83=20Rev?= =?UTF-8?q?ise=20(=F0=9F=94=84)=20=D1=80=D1=8F=D0=B4=D0=BE=D0=BC=20=D1=81?= =?UTF-8?q?=20Approve/Reject.=20Revise=20=3D=20=D0=B2=D0=B5=D1=80=D0=BD?= =?UTF-8?q?=D1=83=D1=82=D1=8C=20=D0=B7=D0=B0=D0=B4=D0=B0=D1=87=D1=83=20?= =?UTF-8?q?=D0=B0=D0=B3=D0=B5=D0=BD=D1=82=D1=83=20=D1=81=20=D0=BA=D0=BE?= =?UTF-8?q?=D0=BC=D0=BC=D0=B5=D0=BD=D1=82=D0=B0=D1=80=D0=B8=D0=B5=D0=BC=20?= =?UTF-8?q?=D1=87=D0=B5=D0=BB=D0=BE=D0=B2=D0=B5=D0=BA=D0=B0.=20=D0=9C?= =?UTF-8?q?=D0=BE=D0=B4=D0=B0=D0=BB=D0=BA=D0=B0=20=D1=81=20textarea=20'?= =?UTF-8?q?=D1=87=D1=82=D0=BE=20=D0=B4=D0=BE=D0=B8=D1=81=D1=81=D0=BB=D0=B5?= =?UTF-8?q?=D0=B4=D0=BE=D0=B2=D0=B0=D1=82=D1=8C/=D0=B4=D0=BE=D1=80=D0=B0?= =?UTF-8?q?=D0=B1=D0=BE=D1=82=D0=B0=D1=82=D1=8C'.=20=D0=97=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B0=20=D0=B2=D0=BE=D0=B7=D0=B2=D1=80=D0=B0=D1=89?= =?UTF-8?q?=D0=B0=D0=B5=D1=82=D1=81=D1=8F=20=D0=B2=20in=5Fprogress,=20?= =?UTF-8?q?=D0=B0=D0=B3=D0=B5=D0=BD=D1=82=20=D0=BF=D0=BE=D0=BB=D1=83=D1=87?= =?UTF-8?q?=D0=B0=D0=B5=D1=82=20=D1=81=D0=B2=D0=BE=D0=B9=20=D0=BF=D1=80?= =?UTF-8?q?=D0=B5=D0=B4=D1=8B=D0=B4=D1=83=D1=89=D0=B8=D0=B9=20output=20+?= =?UTF-8?q?=20=D0=BA=D0=BE=D0=BC=D0=BC=D0=B5=D0=BD=D1=82=D0=B0=D1=80=D0=B8?= =?UTF-8?q?=D0=B9=20=D0=B4=D0=B8=D1=80=D0=B5=D0=BA=D1=82=D0=BE=D1=80=D0=B0?= =?UTF-8?q?=20=D0=B8=20=D0=B4=D0=BE=D1=80=D0=B0=D0=B1=D0=B0=D1=82=D1=8B?= =?UTF-8?q?=D0=B2=D0=B0=D0=B5=D1=82?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- tests/test_api.py | 13 +++++ tests/test_context_builder.py | 104 ++++++++++++++++++++++++++++++++++ tests/test_runner.py | 30 ++++++++++ 3 files changed, 147 insertions(+) diff --git a/tests/test_api.py b/tests/test_api.py index 75c87fc..3de2c47 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -168,6 +168,19 @@ def test_revise_not_found(client): assert r.status_code == 404 +def test_revise_task_response_includes_comment(client): + """Ответ /revise содержит поле comment с переданным текстом.""" + r = client.post("/api/tasks/P1-001/revise", json={"comment": "Уточни требования"}) + assert r.status_code == 200 + assert r.json()["comment"] == "Уточни требования" + + +def test_revise_task_missing_comment_returns_422(client): + """Запрос /revise без поля comment → 422 Unprocessable Entity (Pydantic validation).""" + r = client.post("/api/tasks/P1-001/revise", json={}) + assert r.status_code == 422 + + def test_task_pipeline_not_found(client): r = client.get("/api/tasks/NOPE/pipeline") assert r.status_code == 404 diff --git a/tests/test_context_builder.py b/tests/test_context_builder.py index 64bf732..9b78a25 100644 --- a/tests/test_context_builder.py +++ b/tests/test_context_builder.py @@ -161,3 +161,107 @@ class TestLanguageInProject: def test_context_carries_language(self, conn): ctx = build_context(conn, "VDOL-001", "pm", "vdol") assert ctx["project"]["language"] == "ru" + + +# --------------------------------------------------------------------------- +# KIN-045: Revise context — revise_comment + last agent output injection +# --------------------------------------------------------------------------- + +class TestReviseContext: + """build_context и format_prompt корректно инжектируют контекст ревизии.""" + + def test_build_context_includes_revise_comment_in_task(self, conn): + """Если у задачи есть revise_comment, он попадает в ctx['task'].""" + conn.execute( + "UPDATE tasks SET revise_comment=? WHERE id='VDOL-001'", + ("Доисследуй edge case с пустым массивом",), + ) + conn.commit() + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + assert ctx["task"]["revise_comment"] == "Доисследуй edge case с пустым массивом" + + def test_build_context_fetches_last_agent_output_when_revise_comment_set(self, conn): + """При revise_comment build_context достаёт last_agent_output из agent_logs.""" + from core import models + models.log_agent_run( + conn, "vdol", "developer", "execute", + task_id="VDOL-001", + output_summary="Реализован endpoint POST /api/items", + success=True, + ) + conn.execute( + "UPDATE tasks SET revise_comment=? WHERE id='VDOL-001'", + ("Добавь валидацию входных данных",), + ) + conn.commit() + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + assert ctx.get("last_agent_output") == "Реализован endpoint POST /api/items" + + def test_build_context_no_last_agent_output_when_no_successful_logs(self, conn): + """revise_comment есть, но нет успешных логов — last_agent_output отсутствует.""" + from core import models + models.log_agent_run( + conn, "vdol", "developer", "execute", + task_id="VDOL-001", + output_summary="Permission denied", + success=False, + ) + conn.execute( + "UPDATE tasks SET revise_comment=? WHERE id='VDOL-001'", + ("Повтори без ошибок",), + ) + conn.commit() + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + assert "last_agent_output" not in ctx + + def test_build_context_no_revise_fields_when_no_revise_comment(self, conn): + """Обычная задача без revise_comment не получает last_agent_output в контексте.""" + from core import models + models.log_agent_run( + conn, "vdol", "developer", "execute", + task_id="VDOL-001", + output_summary="Всё готово", + success=True, + ) + # revise_comment не устанавливаем + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + assert "last_agent_output" not in ctx + assert ctx["task"].get("revise_comment") is None + + def test_format_prompt_includes_director_revision_request(self, conn): + """format_prompt содержит секцию '## Director's revision request:' при revise_comment.""" + conn.execute( + "UPDATE tasks SET revise_comment=? WHERE id='VDOL-001'", + ("Обработай случай пустого списка",), + ) + conn.commit() + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + prompt = format_prompt(ctx, "backend_dev", "You are a developer.") + assert "## Director's revision request:" in prompt + assert "Обработай случай пустого списка" in prompt + + def test_format_prompt_includes_previous_output_before_revision(self, conn): + """format_prompt содержит '## Your previous output (before revision):' при last_agent_output.""" + from core import models + models.log_agent_run( + conn, "vdol", "developer", "execute", + task_id="VDOL-001", + output_summary="Сделал миграцию БД", + success=True, + ) + conn.execute( + "UPDATE tasks SET revise_comment=? WHERE id='VDOL-001'", + ("Ещё добавь индекс",), + ) + conn.commit() + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + prompt = format_prompt(ctx, "backend_dev", "You are a developer.") + assert "## Your previous output (before revision):" in prompt + assert "Сделал миграцию БД" in prompt + + def test_format_prompt_no_revision_sections_when_no_revise_comment(self, conn): + """Без revise_comment в prompt нет секций ревизии.""" + ctx = build_context(conn, "VDOL-001", "backend_dev", "vdol") + prompt = format_prompt(ctx, "backend_dev", "You are a developer.") + assert "## Director's revision request:" not in prompt + assert "## Your previous output (before revision):" not in prompt diff --git a/tests/test_runner.py b/tests/test_runner.py index d79746b..61fce2b 100644 --- a/tests/test_runner.py +++ b/tests/test_runner.py @@ -1412,6 +1412,36 @@ class TestCompletionMode: "KIN-063: project-level 'review' должен применяться когда задача не имеет override" ) + @patch("core.followup.generate_followups") + @patch("agents.runner.run_hooks") + @patch("agents.runner.subprocess.run") + def test_auto_complete_not_broken_by_revise_comment(self, mock_run, mock_hooks, mock_followup, conn): + """Регрессия KIN-045: revise_comment в задаче не ломает auto_complete flow. + + Задача прошла ревизию (revise_comment != None, status=in_progress), + затем повторно запускается пайплайн в auto_complete режиме. + Последний шаг — tester → задача должна получить status='done'. + """ + mock_run.return_value = _mock_claude_success({"result": "all tests pass"}) + mock_hooks.return_value = [] + mock_followup.return_value = {"created": [], "pending_actions": []} + + models.update_project(conn, "vdol", execution_mode="auto_complete") + models.update_task( + conn, "VDOL-001", + status="in_progress", + revise_comment="Добавь тест для пустого массива", + ) + + steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}] + result = run_pipeline(conn, "VDOL-001", steps) + + assert result["success"] is True + task = models.get_task(conn, "VDOL-001") + assert task["status"] == "done", ( + "KIN-045: revise_comment не должен мешать auto_complete авто-завершению" + ) + # --------------------------------------------------------------------------- # KIN-048: _run_autocommit — флаг, git path, env= From 860ef3f6c94635464af8793f0e409f66aa5f2a84 Mon Sep 17 00:00:00 2001 From: Gros Frumos Date: Mon, 16 Mar 2026 07:23:04 +0200 Subject: [PATCH 11/57] =?UTF-8?q?kin:=20KIN-015=20=D0=A1=D0=B4=D0=B5=D0=BB?= =?UTF-8?q?=D0=B0=D1=82=D1=8C=20=D0=B2=D0=BE=D0=B7=D0=BC=D0=BE=D0=B6=D0=BD?= =?UTF-8?q?=D0=BE=D1=81=D1=82=D1=8C=20=D1=80=D0=B5=D0=B4=D0=B0=D0=BA=D1=82?= =?UTF-8?q?=D0=B8=D1=80=D0=BE=D0=B2=D0=B0=D1=82=D1=8C=20=D0=B7=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=D0=B8=20=D0=B4=D0=BB=D1=8F=20=D0=B7=D0=B0=D0=B4?= =?UTF-8?q?=D0=B0=D1=87=20=D0=BD=D0=B5=20=D0=B2=D0=B7=D1=8F=D1=82=D1=8B?= =?UTF-8?q?=D1=85=20=D0=B2=20=D1=80=D0=B0=D0=B1=D0=BE=D1=82=D1=83=20pendin?= =?UTF-8?q?g?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/__tests__/filter-persistence.test.ts | 24 +++++++------------ 1 file changed, 9 insertions(+), 15 deletions(-) diff --git a/web/frontend/src/__tests__/filter-persistence.test.ts b/web/frontend/src/__tests__/filter-persistence.test.ts index bc40bf0..95114ee 100644 --- a/web/frontend/src/__tests__/filter-persistence.test.ts +++ b/web/frontend/src/__tests__/filter-persistence.test.ts @@ -673,8 +673,8 @@ describe('KIN-015: TaskDetail — Edit button и форма редактиров }) await flushPromises() - const editBtn = wrapper.findAll('button').find(b => b.text().includes('Edit')) - expect(editBtn?.exists(), 'Кнопка Edit не должна быть видна для in_progress').toBe(false) + const hasEditBtn = wrapper.findAll('button').some(b => b.text().includes('Edit')) + expect(hasEditBtn, 'Кнопка Edit не должна быть видна для in_progress').toBe(false) }) it('Кнопка Edit скрыта для статуса done', async () => { @@ -688,8 +688,8 @@ describe('KIN-015: TaskDetail — Edit button и форма редактиров }) await flushPromises() - const editBtn = wrapper.findAll('button').find(b => b.text().includes('Edit')) - expect(editBtn?.exists(), 'Кнопка Edit не должна быть видна для done').toBe(false) + const hasEditBtn = wrapper.findAll('button').some(b => b.text().includes('Edit')) + expect(hasEditBtn, 'Кнопка Edit не должна быть видна для done').toBe(false) }) it('Клик по Edit открывает форму с заполненным заголовком задачи', async () => { @@ -729,12 +729,10 @@ describe('KIN-015: TaskDetail — Edit button и форма редактиров await editBtn!.trigger('click') await flushPromises() - // Меняем только title + // Меняем только title и сабмитим форму const titleInput = wrapper.find('input:not([type])') await titleInput.setValue('Новый заголовок') - - const saveBtn = wrapper.findAll('button').find(b => b.text().includes('Save')) - await saveBtn!.trigger('click') + await wrapper.find('form').trigger('submit') await flushPromises() expect(api.patchTask).toHaveBeenCalledWith('KIN-015', { title: 'Новый заголовок' }) @@ -751,14 +749,12 @@ describe('KIN-015: TaskDetail — Edit button и форма редактиров }) await flushPromises() - // Открываем модал без изменений + // Открываем модал без изменений и сабмитим форму const editBtn = wrapper.findAll('button').find(b => b.text().includes('Edit')) await editBtn!.trigger('click') await flushPromises() - // Сохраняем без изменений — должен тихо закрыться без API-вызова - const saveBtn = wrapper.findAll('button').find(b => b.text().includes('Save')) - await saveBtn!.trigger('click') + await wrapper.find('form').trigger('submit') await flushPromises() expect(api.patchTask, 'patchTask не должен вызываться при пустом diff').not.toHaveBeenCalled() @@ -782,9 +778,7 @@ describe('KIN-015: TaskDetail — Edit button и форма редактиров const titleInput = wrapper.find('input:not([type])') await titleInput.setValue('Обновлённый заголовок') - - const saveBtn = wrapper.findAll('button').find(b => b.text().includes('Save')) - await saveBtn!.trigger('click') + await wrapper.find('form').trigger('submit') await flushPromises() // Модал закрыт — форма с title-input больше не в DOM From d50bd703ae40df70318c02b51c6a681850432c67 Mon Sep 17 00:00:00 2001 From: Gros Frumos Date: Mon, 16 Mar 2026 08:21:13 +0200 Subject: [PATCH 12/57] =?UTF-8?q?kin:=20KIN-049=20=D0=9A=D0=BD=D0=BE=D0=BF?= =?UTF-8?q?=D0=BA=D0=B0=20Deploy=20=D0=BD=D0=B0=20=D1=81=D1=82=D1=80=D0=B0?= =?UTF-8?q?=D0=BD=D0=B8=D1=86=D0=B5=20=D0=B7=D0=B0=D0=B4=D0=B0=D1=87=D0=B8?= =?UTF-8?q?=20=D0=BF=D0=BE=D1=81=D0=BB=D0=B5=20approve.=20=D0=94=D0=BB?= =?UTF-8?q?=D1=8F=20=D0=BA=D0=B0=D0=B6=D0=B4=D0=BE=D0=B3=D0=BE=20=D0=BF?= =?UTF-8?q?=D1=80=D0=BE=D0=B5=D0=BA=D1=82=D0=B0=20=D0=BD=D0=B0=D1=81=D1=82?= =?UTF-8?q?=D1=80=D0=B0=D0=B8=D0=B2=D0=B0=D0=B5=D1=82=D1=81=D1=8F=20deploy?= =?UTF-8?q?-=D0=BA=D0=BE=D0=BC=D0=B0=D0=BD=D0=B4=D0=B0=20(git=20push,=20sc?= =?UTF-8?q?p,=20ssh=20restart).=20=D0=92=20Settings=20=D0=BF=D1=80=D0=BE?= =?UTF-8?q?=D0=B5=D0=BA=D1=82=D0=B0.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- agents/prompts/pm.md | 20 +++ cli/main.py | 47 ++++--- core/db.py | 10 ++ core/followup.py | 19 +-- core/models.py | 50 ++++++- tests/test_api.py | 132 ++++++++++++++++++ web/api.py | 76 +++++++--- .../src/__tests__/filter-persistence.test.ts | 124 ++++++++++++++++ web/frontend/src/api.ts | 17 ++- web/frontend/src/views/SettingsView.vue | 43 +++++- web/frontend/src/views/TaskDetail.vue | 40 +++++- 11 files changed, 517 insertions(+), 61 deletions(-) diff --git a/agents/prompts/pm.md b/agents/prompts/pm.md index 910cbdd..16c9a72 100644 --- a/agents/prompts/pm.md +++ b/agents/prompts/pm.md @@ -40,6 +40,25 @@ Set `completion_mode` based on the following rules (in priority order): - `research`, `new_project`, `security_audit` → `"review"` 3. Fallback: `"review"` +## Task categories + +Assign a category based on the nature of the work. Choose ONE from this list: + +| Code | Meaning | +|------|---------| +| SEC | Security, auth, permissions | +| UI | Frontend, styles, UX | +| API | Integrations, endpoints, external APIs | +| INFRA| Infrastructure, DevOps, deployment | +| BIZ | Business logic, workflows | +| DB | Database schema, migrations, queries | +| ARCH | Architecture decisions, refactoring | +| TEST | Tests, QA, coverage | +| PERF | Performance optimizations | +| DOCS | Documentation | +| FIX | Hotfixes, bug fixes | +| OBS | Monitoring, observability, logging | + ## Output format Return ONLY valid JSON (no markdown, no explanation): @@ -48,6 +67,7 @@ Return ONLY valid JSON (no markdown, no explanation): { "analysis": "Brief analysis of what needs to be done", "completion_mode": "auto_complete", + "category": "FIX", "pipeline": [ { "role": "debugger", diff --git a/cli/main.py b/cli/main.py index b801cf0..3f6da06 100644 --- a/cli/main.py +++ b/cli/main.py @@ -53,21 +53,6 @@ def _table(headers: list[str], rows: list[list[str]], min_width: int = 6): return "\n".join(lines) -def _auto_task_id(conn, project_id: str) -> str: - """Generate next task ID like PROJ-001.""" - prefix = project_id.upper() - existing = models.list_tasks(conn, project_id=project_id) - max_num = 0 - for t in existing: - tid = t["id"] - if tid.startswith(prefix + "-"): - try: - num = int(tid.split("-", 1)[1]) - max_num = max(max_num, num) - except ValueError: - pass - return f"{prefix}-{max_num + 1:03d}" - # =========================================================================== # Root group @@ -178,18 +163,28 @@ def task(): @click.argument("title") @click.option("--type", "route_type", type=click.Choice(["debug", "feature", "refactor", "hotfix"]), default=None) @click.option("--priority", type=int, default=5) +@click.option("--category", "-c", default=None, + help=f"Task category: {', '.join(models.TASK_CATEGORIES)}") @click.pass_context -def task_add(ctx, project_id, title, route_type, priority): - """Add a task to a project. ID is auto-generated (PROJ-001).""" +def task_add(ctx, project_id, title, route_type, priority, category): + """Add a task to a project. ID is auto-generated (PROJ-001 or PROJ-CAT-001).""" conn = ctx.obj["conn"] p = models.get_project(conn, project_id) if not p: click.echo(f"Project '{project_id}' not found.", err=True) raise SystemExit(1) - task_id = _auto_task_id(conn, project_id) + if category: + category = category.upper() + if category not in models.TASK_CATEGORIES: + click.echo( + f"Invalid category '{category}'. Must be one of: {', '.join(models.TASK_CATEGORIES)}", + err=True, + ) + raise SystemExit(1) + task_id = models.next_task_id(conn, project_id, category=category) brief = {"route_type": route_type} if route_type else None t = models.create_task(conn, task_id, project_id, title, - priority=priority, brief=brief) + priority=priority, brief=brief, category=category) click.echo(f"Created task: {t['id']} — {t['title']}") @@ -588,16 +583,28 @@ def run_task(ctx, task_id, dry_run, allow_write): # Save completion_mode from PM output to task (only if not already set by user) task_current = models.get_task(conn, task_id) + update_fields = {} if not task_current.get("execution_mode"): pm_completion_mode = models.validate_completion_mode( output.get("completion_mode", "review") ) - models.update_task(conn, task_id, execution_mode=pm_completion_mode) + update_fields["execution_mode"] = pm_completion_mode import logging logging.getLogger("kin").info( "PM set completion_mode=%s for task %s", pm_completion_mode, task_id ) + # Save category from PM output (only if task has no category yet) + if not task_current.get("category"): + pm_category = output.get("category") + if pm_category and isinstance(pm_category, str): + pm_category = pm_category.upper() + if pm_category in models.TASK_CATEGORIES: + update_fields["category"] = pm_category + + if update_fields: + models.update_task(conn, task_id, **update_fields) + click.echo(f"\nAnalysis: {analysis}") click.echo(f"Pipeline ({len(pipeline_steps)} steps):") for i, step in enumerate(pipeline_steps, 1): diff --git a/core/db.py b/core/db.py index 4aacfb7..6e4c769 100644 --- a/core/db.py +++ b/core/db.py @@ -22,6 +22,7 @@ CREATE TABLE IF NOT EXISTS projects ( forgejo_repo TEXT, language TEXT DEFAULT 'ru', execution_mode TEXT NOT NULL DEFAULT 'review', + deploy_command TEXT, created_at DATETIME DEFAULT CURRENT_TIMESTAMP ); @@ -44,6 +45,7 @@ CREATE TABLE IF NOT EXISTS tasks ( blocked_reason TEXT, dangerously_skipped BOOLEAN DEFAULT 0, revise_comment TEXT, + category TEXT DEFAULT NULL, created_at DATETIME DEFAULT CURRENT_TIMESTAMP, updated_at DATETIME DEFAULT CURRENT_TIMESTAMP ); @@ -244,10 +246,18 @@ def _migrate(conn: sqlite3.Connection): conn.execute("ALTER TABLE tasks ADD COLUMN revise_comment TEXT") conn.commit() + if "category" not in task_cols: + conn.execute("ALTER TABLE tasks ADD COLUMN category TEXT DEFAULT NULL") + conn.commit() + if "obsidian_vault_path" not in proj_cols: conn.execute("ALTER TABLE projects ADD COLUMN obsidian_vault_path TEXT") conn.commit() + if "deploy_command" not in proj_cols: + conn.execute("ALTER TABLE projects ADD COLUMN deploy_command TEXT") + conn.commit() + # Migrate audit_log table (KIN-021) existing_tables = {r[0] for r in conn.execute( "SELECT name FROM sqlite_master WHERE type='table'" diff --git a/core/followup.py b/core/followup.py index ed5d464..cb4c054 100644 --- a/core/followup.py +++ b/core/followup.py @@ -48,21 +48,6 @@ def _collect_pipeline_output(conn: sqlite3.Connection, task_id: str) -> str: return "\n".join(parts) -def _next_task_id(conn: sqlite3.Connection, project_id: str) -> str: - """Generate the next sequential task ID for a project.""" - prefix = project_id.upper() - existing = models.list_tasks(conn, project_id=project_id) - max_num = 0 - for t in existing: - tid = t["id"] - if tid.startswith(prefix + "-"): - try: - num = int(tid.split("-", 1)[1]) - max_num = max(max_num, num) - except ValueError: - pass - return f"{prefix}-{max_num + 1:03d}" - def generate_followups( conn: sqlite3.Connection, @@ -154,7 +139,7 @@ def generate_followups( "options": ["rerun", "manual_task", "skip"], }) else: - new_id = _next_task_id(conn, project_id) + new_id = models.next_task_id(conn, project_id) brief_dict = {"source": f"followup:{task_id}"} if item.get("type"): brief_dict["route_type"] = item["type"] @@ -206,7 +191,7 @@ def resolve_pending_action( return None if choice == "manual_task": - new_id = _next_task_id(conn, project_id) + new_id = models.next_task_id(conn, project_id) brief_dict = {"source": f"followup:{task_id}", "task_type": "manual_escalation"} if item.get("type"): brief_dict["route_type"] = item["type"] diff --git a/core/models.py b/core/models.py index 93d0db3..c536b9b 100644 --- a/core/models.py +++ b/core/models.py @@ -16,6 +16,11 @@ VALID_TASK_STATUSES = [ VALID_COMPLETION_MODES = {"auto_complete", "review"} +TASK_CATEGORIES = [ + "SEC", "UI", "API", "INFRA", "BIZ", "DB", + "ARCH", "TEST", "PERF", "DOCS", "FIX", "OBS", +] + def validate_completion_mode(value: str) -> str: """Validate completion mode from LLM output. Falls back to 'review' if invalid.""" @@ -132,6 +137,44 @@ def update_project(conn: sqlite3.Connection, id: str, **fields) -> dict: # Tasks # --------------------------------------------------------------------------- +def next_task_id( + conn: sqlite3.Connection, + project_id: str, + category: str | None = None, +) -> str: + """Generate next task ID. + + Without category: PROJ-001 (backward-compatible old format) + With category: PROJ-CAT-001 (new format, per-category counter) + """ + prefix = project_id.upper() + existing = list_tasks(conn, project_id=project_id) + + if category: + cat_prefix = f"{prefix}-{category}-" + max_num = 0 + for t in existing: + tid = t["id"] + if tid.startswith(cat_prefix): + try: + max_num = max(max_num, int(tid[len(cat_prefix):])) + except ValueError: + pass + return f"{prefix}-{category}-{max_num + 1:03d}" + else: + # Old format: global max across project (integers only, skip CAT-NNN) + max_num = 0 + for t in existing: + tid = t["id"] + if tid.startswith(prefix + "-"): + suffix = tid[len(prefix) + 1:] + try: + max_num = max(max_num, int(suffix)) + except ValueError: + pass + return f"{prefix}-{max_num + 1:03d}" + + def create_task( conn: sqlite3.Connection, id: str, @@ -145,16 +188,17 @@ def create_task( spec: dict | None = None, forgejo_issue_id: int | None = None, execution_mode: str | None = None, + category: str | None = None, ) -> dict: """Create a task linked to a project.""" conn.execute( """INSERT INTO tasks (id, project_id, title, status, priority, assigned_role, parent_task_id, brief, spec, forgejo_issue_id, - execution_mode) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", + execution_mode, category) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)""", (id, project_id, title, status, priority, assigned_role, parent_task_id, _json_encode(brief), _json_encode(spec), - forgejo_issue_id, execution_mode), + forgejo_issue_id, execution_mode, category), ) conn.commit() return get_task(conn, id) diff --git a/tests/test_api.py b/tests/test_api.py index 3de2c47..ba194a9 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -939,3 +939,135 @@ def test_patch_task_title_and_brief_text_together(client): data = r.json() assert data["title"] == "Совместное" assert data["brief"]["text"] == "и описание" + + +# --------------------------------------------------------------------------- +# KIN-049 — Deploy: миграция, PATCH deploy_command, POST /deploy +# --------------------------------------------------------------------------- + +def test_deploy_command_column_exists_in_schema(client): + """Миграция: PRAGMA table_info(projects) подтверждает наличие deploy_command (decision #74).""" + from core.db import init_db + conn = init_db(api_module.DB_PATH) + cols = {row[1] for row in conn.execute("PRAGMA table_info(projects)").fetchall()} + conn.close() + assert "deploy_command" in cols + + +def test_patch_project_deploy_command_persisted_via_sql(client): + """PATCH с deploy_command сохраняется в БД — прямой SQL (decision #55).""" + client.patch("/api/projects/p1", json={"deploy_command": "echo hello"}) + + from core.db import init_db + conn = init_db(api_module.DB_PATH) + row = conn.execute("SELECT deploy_command FROM projects WHERE id = 'p1'").fetchone() + conn.close() + assert row is not None + assert row[0] == "echo hello" + + +def test_patch_project_deploy_command_returned_in_response(client): + """После PATCH ответ содержит обновлённый deploy_command.""" + r = client.patch("/api/projects/p1", json={"deploy_command": "git push origin main"}) + assert r.status_code == 200 + assert r.json()["deploy_command"] == "git push origin main" + + +def test_patch_project_deploy_command_empty_string_clears_to_null(client): + """PATCH с пустой строкой очищает deploy_command → NULL (decision #68).""" + client.patch("/api/projects/p1", json={"deploy_command": "echo hello"}) + client.patch("/api/projects/p1", json={"deploy_command": ""}) + + from core.db import init_db + conn = init_db(api_module.DB_PATH) + row = conn.execute("SELECT deploy_command FROM projects WHERE id = 'p1'").fetchone() + conn.close() + assert row[0] is None + + +def test_deploy_project_executes_command_returns_stdout(client): + """POST /deploy — команда echo → stdout присутствует в ответе.""" + from unittest.mock import patch, MagicMock + + client.patch("/api/projects/p1", json={"deploy_command": "echo deployed"}) + + mock_result = MagicMock() + mock_result.returncode = 0 + mock_result.stdout = "deployed\n" + mock_result.stderr = "" + + with patch("web.api.subprocess.run", return_value=mock_result): + r = client.post("/api/projects/p1/deploy") + + assert r.status_code == 200 + data = r.json() + assert data["success"] is True + assert data["exit_code"] == 0 + assert "deployed" in data["stdout"] + assert "duration_seconds" in data + + +def test_deploy_project_without_deploy_command_returns_400(client): + """POST /deploy для проекта без deploy_command → 400.""" + r = client.post("/api/projects/p1/deploy") + assert r.status_code == 400 + + +def test_deploy_project_not_found_returns_404(client): + """POST /deploy для несуществующего проекта → 404.""" + r = client.post("/api/projects/NOPE/deploy") + assert r.status_code == 404 + + +def test_deploy_project_failed_command_returns_success_false(client): + """POST /deploy — ненулевой exit_code → success=False (команда выполнилась, но упала).""" + from unittest.mock import patch, MagicMock + + client.patch("/api/projects/p1", json={"deploy_command": "exit 1"}) + + mock_result = MagicMock() + mock_result.returncode = 1 + mock_result.stdout = "" + mock_result.stderr = "error occurred" + + with patch("web.api.subprocess.run", return_value=mock_result): + r = client.post("/api/projects/p1/deploy") + + assert r.status_code == 200 + data = r.json() + assert data["success"] is False + assert data["exit_code"] == 1 + assert "error occurred" in data["stderr"] + + +def test_deploy_project_timeout_returns_504(client): + """POST /deploy — timeout → 504.""" + from unittest.mock import patch + import subprocess + + client.patch("/api/projects/p1", json={"deploy_command": "sleep 100"}) + + with patch("web.api.subprocess.run", side_effect=subprocess.TimeoutExpired("sleep 100", 60)): + r = client.post("/api/projects/p1/deploy") + + assert r.status_code == 504 + + +def test_task_full_includes_project_deploy_command(client): + """GET /api/tasks/{id}/full включает project_deploy_command из таблицы projects.""" + client.patch("/api/projects/p1", json={"deploy_command": "git push"}) + + r = client.get("/api/tasks/P1-001/full") + assert r.status_code == 200 + data = r.json() + assert "project_deploy_command" in data + assert data["project_deploy_command"] == "git push" + + +def test_task_full_project_deploy_command_none_when_not_set(client): + """GET /api/tasks/{id}/full возвращает project_deploy_command=None когда не задана.""" + r = client.get("/api/tasks/P1-001/full") + assert r.status_code == 200 + data = r.json() + assert "project_deploy_command" in data + assert data["project_deploy_command"] is None diff --git a/web/api.py b/web/api.py index 4c9fa49..56ff9f7 100644 --- a/web/api.py +++ b/web/api.py @@ -20,7 +20,7 @@ from pydantic import BaseModel from core.db import init_db from core import models -from core.models import VALID_COMPLETION_MODES +from core.models import VALID_COMPLETION_MODES, TASK_CATEGORIES from agents.bootstrap import ( detect_tech_stack, detect_modules, extract_decisions_from_claude_md, find_vault_root, scan_obsidian, save_to_db, @@ -139,12 +139,13 @@ class ProjectPatch(BaseModel): execution_mode: str | None = None autocommit_enabled: bool | None = None obsidian_vault_path: str | None = None + deploy_command: str | None = None @app.patch("/api/projects/{project_id}") def patch_project(project_id: str, body: ProjectPatch): - if body.execution_mode is None and body.autocommit_enabled is None and body.obsidian_vault_path is None: - raise HTTPException(400, "Nothing to update. Provide execution_mode, autocommit_enabled, or obsidian_vault_path.") + if body.execution_mode is None and body.autocommit_enabled is None and body.obsidian_vault_path is None and body.deploy_command is None: + raise HTTPException(400, "Nothing to update. Provide execution_mode, autocommit_enabled, obsidian_vault_path, or deploy_command.") if body.execution_mode is not None and body.execution_mode not in VALID_EXECUTION_MODES: raise HTTPException(400, f"Invalid execution_mode '{body.execution_mode}'. Must be one of: {', '.join(VALID_EXECUTION_MODES)}") conn = get_conn() @@ -159,6 +160,9 @@ def patch_project(project_id: str, body: ProjectPatch): fields["autocommit_enabled"] = int(body.autocommit_enabled) if body.obsidian_vault_path is not None: fields["obsidian_vault_path"] = body.obsidian_vault_path + if body.deploy_command is not None: + # Empty string = sentinel for clearing (decision #68) + fields["deploy_command"] = None if body.deploy_command == "" else body.deploy_command models.update_project(conn, project_id, **fields) p = models.get_project(conn, project_id) conn.close() @@ -183,6 +187,46 @@ def sync_obsidian_endpoint(project_id: str): return result +@app.post("/api/projects/{project_id}/deploy") +def deploy_project(project_id: str): + """Execute deploy_command for a project. Returns stdout/stderr/exit_code. + + # WARNING: shell=True — deploy_command is admin-only, set in Settings by the project owner. + """ + import time + conn = get_conn() + p = models.get_project(conn, project_id) + conn.close() + if not p: + raise HTTPException(404, f"Project '{project_id}' not found") + deploy_command = p.get("deploy_command") + if not deploy_command: + raise HTTPException(400, "deploy_command not set for this project") + cwd = p.get("path") or None + start = time.monotonic() + try: + result = subprocess.run( + deploy_command, + shell=True, # WARNING: shell=True — command is admin-only + cwd=cwd, + capture_output=True, + text=True, + timeout=60, + ) + except subprocess.TimeoutExpired: + raise HTTPException(504, "Deploy command timed out after 60 seconds") + except Exception as e: + raise HTTPException(500, f"Deploy failed: {e}") + duration = round(time.monotonic() - start, 2) + return { + "success": result.returncode == 0, + "exit_code": result.returncode, + "stdout": result.stdout, + "stderr": result.stderr, + "duration_seconds": duration, + } + + @app.post("/api/projects") def create_project(body: ProjectCreate): conn = get_conn() @@ -216,6 +260,7 @@ class TaskCreate(BaseModel): title: str priority: int = 5 route_type: str | None = None + category: str | None = None @app.post("/api/tasks") @@ -225,21 +270,16 @@ def create_task(body: TaskCreate): if not p: conn.close() raise HTTPException(404, f"Project '{body.project_id}' not found") - # Auto-generate task ID - existing = models.list_tasks(conn, project_id=body.project_id) - prefix = body.project_id.upper() - max_num = 0 - for t in existing: - if t["id"].startswith(prefix + "-"): - try: - num = int(t["id"].split("-", 1)[1]) - max_num = max(max_num, num) - except ValueError: - pass - task_id = f"{prefix}-{max_num + 1:03d}" + category = None + if body.category: + category = body.category.upper() + if category not in TASK_CATEGORIES: + conn.close() + raise HTTPException(400, f"Invalid category '{category}'. Must be one of: {', '.join(TASK_CATEGORIES)}") + task_id = models.next_task_id(conn, body.project_id, category=category) brief = {"route_type": body.route_type} if body.route_type else None t = models.create_task(conn, task_id, body.project_id, body.title, - priority=body.priority, brief=brief) + priority=body.priority, brief=brief, category=category) conn.close() return t @@ -344,8 +384,10 @@ def get_task_full(task_id: str): decisions = models.get_decisions(conn, t["project_id"]) # Filter to decisions linked to this task task_decisions = [d for d in decisions if d.get("task_id") == task_id] + p = models.get_project(conn, t["project_id"]) + project_deploy_command = p.get("deploy_command") if p else None conn.close() - return {**t, "pipeline_steps": steps, "related_decisions": task_decisions} + return {**t, "pipeline_steps": steps, "related_decisions": task_decisions, "project_deploy_command": project_deploy_command} class TaskApprove(BaseModel): diff --git a/web/frontend/src/__tests__/filter-persistence.test.ts b/web/frontend/src/__tests__/filter-persistence.test.ts index 95114ee..85aa031 100644 --- a/web/frontend/src/__tests__/filter-persistence.test.ts +++ b/web/frontend/src/__tests__/filter-persistence.test.ts @@ -28,6 +28,7 @@ vi.mock('../api', () => ({ createTask: vi.fn(), patchTask: vi.fn(), patchProject: vi.fn(), + deployProject: vi.fn(), }, })) @@ -785,3 +786,126 @@ describe('KIN-015: TaskDetail — Edit button и форма редактиров expect(wrapper.find('input:not([type])').exists(), 'Форма должна закрыться после сохранения').toBe(false) }) }) + +// ───────────────────────────────────────────────────────────── +// KIN-049: TaskDetail — кнопка Deploy +// ───────────────────────────────────────────────────────────── + +describe('KIN-049: TaskDetail — кнопка Deploy', () => { + function makeDeployTask(status: string, deployCommand: string | null) { + return { + id: 'KIN-049', + project_id: 'KIN', + title: 'Deploy Task', + status, + priority: 3, + assigned_role: null, + parent_task_id: null, + brief: null, + spec: null, + execution_mode: null, + project_deploy_command: deployCommand, + created_at: '2024-01-01', + updated_at: '2024-01-01', + pipeline_steps: [], + related_decisions: [], + } + } + + it('Кнопка Deploy видна при status=done и project_deploy_command задан', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makeDeployTask('done', 'git push origin main') as any) + const router = makeRouter() + await router.push('/task/KIN-049') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-049' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const deployBtn = wrapper.findAll('button').find(b => b.text().includes('Deploy')) + expect(deployBtn?.exists(), 'Кнопка Deploy должна быть видна при done + deploy_command').toBe(true) + }) + + it('Кнопка Deploy скрыта при status=done но без project_deploy_command', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makeDeployTask('done', null) as any) + const router = makeRouter() + await router.push('/task/KIN-049') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-049' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const hasDeployBtn = wrapper.findAll('button').some(b => b.text().includes('Deploy')) + expect(hasDeployBtn, 'Deploy не должна быть видна без deploy_command').toBe(false) + }) + + it('Кнопка Deploy скрыта при status=pending (даже с deploy_command)', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makeDeployTask('pending', 'git push') as any) + const router = makeRouter() + await router.push('/task/KIN-049') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-049' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const hasDeployBtn = wrapper.findAll('button').some(b => b.text().includes('Deploy')) + expect(hasDeployBtn, 'Deploy не должна быть видна при статусе pending').toBe(false) + }) + + it('Кнопка Deploy скрыта при status=in_progress', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makeDeployTask('in_progress', 'git push') as any) + const router = makeRouter() + await router.push('/task/KIN-049') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-049' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const hasDeployBtn = wrapper.findAll('button').some(b => b.text().includes('Deploy')) + expect(hasDeployBtn, 'Deploy не должна быть видна при статусе in_progress').toBe(false) + }) + + it('Кнопка Deploy скрыта при status=review', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makeDeployTask('review', 'git push') as any) + const router = makeRouter() + await router.push('/task/KIN-049') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-049' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const hasDeployBtn = wrapper.findAll('button').some(b => b.text().includes('Deploy')) + expect(hasDeployBtn, 'Deploy не должна быть видна при статусе review').toBe(false) + }) + + it('Клик по Deploy вызывает api.deployProject с project_id задачи', async () => { + vi.mocked(api.taskFull).mockResolvedValue(makeDeployTask('done', 'echo ok') as any) + vi.mocked(api.deployProject).mockResolvedValue({ + success: true, exit_code: 0, stdout: 'ok\n', stderr: '', duration_seconds: 0.1, + } as any) + + const router = makeRouter() + await router.push('/task/KIN-049') + + const wrapper = mount(TaskDetail, { + props: { id: 'KIN-049' }, + global: { plugins: [router] }, + }) + await flushPromises() + + const deployBtn = wrapper.findAll('button').find(b => b.text().includes('Deploy')) + await deployBtn!.trigger('click') + await flushPromises() + + expect(api.deployProject).toHaveBeenCalledWith('KIN') + }) +}) diff --git a/web/frontend/src/api.ts b/web/frontend/src/api.ts index 5040aae..6efc374 100644 --- a/web/frontend/src/api.ts +++ b/web/frontend/src/api.ts @@ -42,6 +42,7 @@ export interface Project { execution_mode: string | null autocommit_enabled: number | null obsidian_vault_path: string | null + deploy_command: string | null created_at: string total_tasks: number done_tasks: number @@ -76,6 +77,7 @@ export interface Task { execution_mode: string | null blocked_reason: string | null dangerously_skipped: number | null + category: string | null created_at: string updated_at: string } @@ -116,9 +118,18 @@ export interface PipelineStep { created_at: string } +export interface DeployResult { + success: boolean + exit_code: number + stdout: string + stderr: string + duration_seconds: number +} + export interface TaskFull extends Task { pipeline_steps: PipelineStep[] related_decisions: Decision[] + project_deploy_command: string | null } export interface PendingAction { @@ -161,7 +172,7 @@ export const api = { cost: (days = 7) => get(`/cost?days=${days}`), createProject: (data: { id: string; name: string; path: string; tech_stack?: string[]; priority?: number }) => post('/projects', data), - createTask: (data: { project_id: string; title: string; priority?: number; route_type?: string }) => + createTask: (data: { project_id: string; title: string; priority?: number; route_type?: string; category?: string }) => post('/tasks', data), approveTask: (id: string, data?: { decision_title?: string; decision_description?: string; decision_type?: string; create_followups?: boolean }) => post<{ status: string; followup_tasks: Task[]; needs_decision: boolean; pending_actions: PendingAction[] }>(`/tasks/${id}/approve`, data || {}), @@ -181,8 +192,10 @@ export const api = { post<{ updated: string[]; count: number }>(`/projects/${projectId}/audit/apply`, { task_ids: taskIds }), patchTask: (id: string, data: { status?: string; execution_mode?: string; priority?: number; route_type?: string; title?: string; brief_text?: string }) => patch(`/tasks/${id}`, data), - patchProject: (id: string, data: { execution_mode?: string; autocommit_enabled?: boolean; obsidian_vault_path?: string }) => + patchProject: (id: string, data: { execution_mode?: string; autocommit_enabled?: boolean; obsidian_vault_path?: string; deploy_command?: string }) => patch(`/projects/${id}`, data), + deployProject: (projectId: string) => + post(`/projects/${projectId}/deploy`, {}), syncObsidian: (projectId: string) => post(`/projects/${projectId}/sync/obsidian`, {}), deleteDecision: (projectId: string, decisionId: number) => diff --git a/web/frontend/src/views/SettingsView.vue b/web/frontend/src/views/SettingsView.vue index 319574a..e3b2dd1 100644 --- a/web/frontend/src/views/SettingsView.vue +++ b/web/frontend/src/views/SettingsView.vue @@ -4,9 +4,12 @@ import { api, type Project, type ObsidianSyncResult } from '../api' const projects = ref([]) const vaultPaths = ref>({}) +const deployCommands = ref>({}) const saving = ref>({}) +const savingDeploy = ref>({}) const syncing = ref>({}) const saveStatus = ref>({}) +const saveDeployStatus = ref>({}) const syncResults = ref>({}) const error = ref(null) @@ -15,6 +18,7 @@ onMounted(async () => { projects.value = await api.projects() for (const p of projects.value) { vaultPaths.value[p.id] = p.obsidian_vault_path ?? '' + deployCommands.value[p.id] = p.deploy_command ?? '' } } catch (e) { error.value = String(e) @@ -34,6 +38,19 @@ async function saveVaultPath(projectId: string) { } } +async function saveDeployCommand(projectId: string) { + savingDeploy.value[projectId] = true + saveDeployStatus.value[projectId] = '' + try { + await api.patchProject(projectId, { deploy_command: deployCommands.value[projectId] }) + saveDeployStatus.value[projectId] = 'Saved' + } catch (e) { + saveDeployStatus.value[projectId] = `Error: ${e}` + } finally { + savingDeploy.value[projectId] = false + } +} + async function runSync(projectId: string) { syncing.value[projectId] = true syncResults.value[projectId] = null @@ -70,13 +87,37 @@ async function runSync(projectId: string) { />
+
+ + +

Команда выполняется через shell в директории проекта. Настраивается только администратором.

+
+ +
+ + + {{ saveDeployStatus[project.id] }} + +
+
+ +
+ + +
+
+ + {{ deployResult.success ? '✓ Deploy succeeded' : '✗ Deploy failed' }} + + exit {{ deployResult.exit_code }} · {{ deployResult.duration_seconds }}s + +
+
{{ deployResult.stdout }}
+
{{ deployResult.stderr }}
From 81f974e6d3acef4ea21adca2314c1c66bbdfe250 Mon Sep 17 00:00:00 2001 From: Gros Frumos Date: Mon, 16 Mar 2026 08:34:30 +0200 Subject: [PATCH 13/57] =?UTF-8?q?kin:=20KIN-OBS-009=20Task=20ID=20=D0=BF?= =?UTF-8?q?=D0=BE=20=D0=BA=D0=B0=D1=82=D0=B5=D0=B3=D0=BE=D1=80=D0=B8=D1=8F?= =?UTF-8?q?=D0=BC:=20PROJ-CAT-NUM=20(VDOL-SEC-001,=20VDOL-UI-003,=20VDOL-A?= =?UTF-8?q?PI-002,=20VDOL-INFRA-001,=20VDOL-BIZ-001).=20PM=20=D0=BD=D0=B0?= =?UTF-8?q?=D0=B7=D0=BD=D0=B0=D1=87=D0=B0=D0=B5=D1=82=20=D0=BA=D0=B0=D1=82?= =?UTF-8?q?=D0=B5=D0=B3=D0=BE=D1=80=D0=B8=D1=8E=20=D0=BF=D1=80=D0=B8=20?= =?UTF-8?q?=D1=81=D0=BE=D0=B7=D0=B4=D0=B0=D0=BD=D0=B8=D0=B8=20=D0=B7=D0=B0?= =?UTF-8?q?=D0=B4=D0=B0=D1=87=D0=B8.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- core/obsidian_sync.py | 2 +- tests/test_models.py | 125 +++++++++++++++++++++++++ web/frontend/src/views/ProjectView.vue | 18 +++- 3 files changed, 142 insertions(+), 3 deletions(-) diff --git a/core/obsidian_sync.py b/core/obsidian_sync.py index da4256c..1f4d2b6 100644 --- a/core/obsidian_sync.py +++ b/core/obsidian_sync.py @@ -90,7 +90,7 @@ def parse_task_checkboxes( Returns: [{"task_id": "KIN-013", "done": True, "title": "..."}] """ - pattern = re.compile(r"^[-*]\s+\[([xX ])\]\s+([A-Z][A-Z0-9]*-\d+)\s+(.+)$") + pattern = re.compile(r"^[-*]\s+\[([xX ])\]\s+([A-Z][A-Z0-9]*-(?:[A-Z][A-Z0-9]*-)?\d+)\s+(.+)$") results: list[dict] = [] search_dirs = [ diff --git a/tests/test_models.py b/tests/test_models.py index 33ba1c2..59157c4 100644 --- a/tests/test_models.py +++ b/tests/test_models.py @@ -1,8 +1,10 @@ """Tests for core/models.py — all functions, in-memory SQLite.""" +import re import pytest from core.db import init_db from core import models +from core.models import TASK_CATEGORIES @pytest.fixture @@ -330,3 +332,126 @@ def test_add_decision_if_new_skips_whitespace_duplicate(conn): result = models.add_decision_if_new(conn, "p1", "convention", " Run tests after each change ", "desc2") assert result is None assert len(models.get_decisions(conn, "p1")) == 1 + + +# -- next_task_id (KIN-OBS-009) -- + +def test_next_task_id_with_category_first(conn): + """Первая задача с category='SEC' → 'VDOL-SEC-001'.""" + models.create_project(conn, "vdol", "VDOL", "/vdol") + task_id = models.next_task_id(conn, "vdol", category="SEC") + assert task_id == "VDOL-SEC-001" + + +def test_next_task_id_with_category_increments(conn): + """Вторая задача с category='SEC' → 'VDOL-SEC-002'.""" + models.create_project(conn, "vdol", "VDOL", "/vdol") + models.create_task(conn, "VDOL-SEC-001", "vdol", "Task 1", category="SEC") + task_id = models.next_task_id(conn, "vdol", category="SEC") + assert task_id == "VDOL-SEC-002" + + +def test_next_task_id_category_counters_independent(conn): + """Счётчики категорий независимы: SEC-002 не влияет на UI-001.""" + models.create_project(conn, "vdol", "VDOL", "/vdol") + models.create_task(conn, "VDOL-SEC-001", "vdol", "Sec Task 1", category="SEC") + models.create_task(conn, "VDOL-SEC-002", "vdol", "Sec Task 2", category="SEC") + task_id = models.next_task_id(conn, "vdol", category="UI") + assert task_id == "VDOL-UI-001" + + +def test_next_task_id_without_category_backward_compat(conn): + """Задача без category → 'VDOL-001' (backward compat).""" + models.create_project(conn, "vdol", "VDOL", "/vdol") + task_id = models.next_task_id(conn, "vdol") + assert task_id == "VDOL-001" + + +def test_next_task_id_mixed_formats_no_collision(conn): + """Смешанный проект: счётчики старого и нового форматов не пересекаются.""" + models.create_project(conn, "kin", "KIN", "/kin") + models.create_task(conn, "KIN-001", "kin", "Old style task") + models.create_task(conn, "KIN-002", "kin", "Old style task 2") + # Новый формат с категорией не мешает старому + cat_id = models.next_task_id(conn, "kin", category="OBS") + assert cat_id == "KIN-OBS-001" + # Старый формат не мешает новому + old_id = models.next_task_id(conn, "kin") + assert old_id == "KIN-003" + + +# -- Obsidian sync regex (KIN-OBS-009, решение #75) -- + +_OBSIDIAN_TASK_PATTERN = re.compile( + r"^[-*]\s+\[([xX ])\]\s+([A-Z][A-Z0-9]*-(?:[A-Z][A-Z0-9]*-)?\d+)\s+(.+)$" +) + + +def test_obsidian_regex_matches_old_format(): + """Старый формат KIN-001 матчится.""" + m = _OBSIDIAN_TASK_PATTERN.match("- [x] KIN-001 Fix login bug") + assert m is not None + assert m.group(2) == "KIN-001" + + +def test_obsidian_regex_matches_new_format(): + """Новый формат VDOL-SEC-001 матчится.""" + m = _OBSIDIAN_TASK_PATTERN.match("- [ ] VDOL-SEC-001 Security audit") + assert m is not None + assert m.group(2) == "VDOL-SEC-001" + + +def test_obsidian_regex_matches_obs_format(): + """Формат KIN-OBS-009 матчится (проверяем задачу этой фичи).""" + m = _OBSIDIAN_TASK_PATTERN.match("* [X] KIN-OBS-009 Task ID по категориям") + assert m is not None + assert m.group(2) == "KIN-OBS-009" + + +def test_obsidian_regex_no_match_lowercase(): + """Нижний регистр не матчится.""" + assert _OBSIDIAN_TASK_PATTERN.match("- [x] proj-001 lowercase id") is None + + +def test_obsidian_regex_no_match_numeric_prefix(): + """Числовой префикс не матчится.""" + assert _OBSIDIAN_TASK_PATTERN.match("- [x] 123-abc invalid format") is None + + +def test_obsidian_regex_done_state(conn): + """Статус done/pending корректно извлекается.""" + m_done = _OBSIDIAN_TASK_PATTERN.match("- [x] KIN-UI-003 Done task") + m_pending = _OBSIDIAN_TASK_PATTERN.match("- [ ] KIN-UI-004 Pending task") + assert m_done.group(1) == "x" + assert m_pending.group(1) == " " + + +# -- next_task_id для всех 12 категорий (KIN-OBS-009) -- + +@pytest.mark.parametrize("cat", TASK_CATEGORIES) +def test_next_task_id_all_categories_generate_correct_format(conn, cat): + """next_task_id генерирует ID формата PROJ-CAT-001 для каждой из 12 категорий.""" + models.create_project(conn, "vdol", "VDOL", "/vdol") + task_id = models.next_task_id(conn, "vdol", category=cat) + assert task_id == f"VDOL-{cat}-001" + + +# -- update_task category не ломает brief (KIN-OBS-009, решение #74) -- + +def test_update_task_category_preserves_brief(conn): + """update_task(category=...) не перетирает существующее поле brief.""" + models.create_project(conn, "p1", "P1", "/p1") + models.create_task(conn, "P1-001", "p1", "Task", brief={"summary": "important context"}) + updated = models.update_task(conn, "P1-001", category="SEC") + assert updated["category"] == "SEC" + assert updated["brief"] == {"summary": "important context"} + + +def test_update_task_category_preserves_status_and_priority(conn): + """update_task(category=...) не меняет остальные поля задачи.""" + models.create_project(conn, "p1", "P1", "/p1") + models.create_task(conn, "P1-001", "p1", "Task", status="in_progress", priority=3) + updated = models.update_task(conn, "P1-001", category="UI") + assert updated["category"] == "UI" + assert updated["status"] == "in_progress" + assert updated["priority"] == 3 diff --git a/web/frontend/src/views/ProjectView.vue b/web/frontend/src/views/ProjectView.vue index b057a5a..dd0811f 100644 --- a/web/frontend/src/views/ProjectView.vue +++ b/web/frontend/src/views/ProjectView.vue @@ -117,8 +117,14 @@ async function applyAudit() { } // Add task modal +const TASK_CATEGORIES = ['SEC', 'UI', 'API', 'INFRA', 'BIZ', 'DB', 'ARCH', 'TEST', 'PERF', 'DOCS', 'FIX', 'OBS'] +const CATEGORY_COLORS: Record = { + SEC: 'red', UI: 'blue', API: 'green', INFRA: 'orange', BIZ: 'purple', + DB: 'yellow', ARCH: 'gray', TEST: 'purple', PERF: 'orange', DOCS: 'gray', + FIX: 'red', OBS: 'blue', +} const showAddTask = ref(false) -const taskForm = ref({ title: '', priority: 5, route_type: '' }) +const taskForm = ref({ title: '', priority: 5, route_type: '', category: '' }) const taskFormError = ref('') // Add decision modal @@ -206,9 +212,10 @@ async function addTask() { title: taskForm.value.title, priority: taskForm.value.priority, route_type: taskForm.value.route_type || undefined, + category: taskForm.value.category || undefined, }) showAddTask.value = false - taskForm.value = { title: '', priority: 5, route_type: '' } + taskForm.value = { title: '', priority: 5, route_type: '', category: '' } await load() } catch (e: any) { taskFormError.value = e.message @@ -354,6 +361,7 @@ async function addDecision() {
{{ t.id }} + {{ t.title }} escalated from {{ t.parent_task_id }}
@@ -373,6 +381,7 @@ async function addDecision() {
{{ t.id }} + {{ t.title }} +