kin/tests/test_runner.py

1666 lines
77 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

"""Tests for agents/runner.py — agent execution with mocked claude CLI."""
import json
import subprocess
import pytest
from unittest.mock import patch, MagicMock
from core.db import init_db
from core import models
from agents.runner import (
run_agent, run_pipeline, run_audit, _try_parse_json, _run_learning_extraction,
_build_claude_env, _resolve_claude_cmd, _EXTRA_PATH_DIRS, _run_autocommit,
)
@pytest.fixture
def conn():
c = init_db(":memory:")
models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek",
tech_stack=["vue3"])
models.create_task(c, "VDOL-001", "vdol", "Fix bug",
brief={"route_type": "debug"})
yield c
c.close()
def _mock_claude_success(output_data):
"""Create a mock subprocess result with successful claude output."""
mock = MagicMock()
mock.stdout = json.dumps(output_data) if isinstance(output_data, dict) else output_data
mock.stderr = ""
mock.returncode = 0
return mock
def _mock_claude_failure(error_msg):
mock = MagicMock()
mock.stdout = ""
mock.stderr = error_msg
mock.returncode = 1
return mock
# ---------------------------------------------------------------------------
# run_agent
# ---------------------------------------------------------------------------
class TestRunAgent:
@patch("agents.runner.subprocess.run")
def test_successful_agent_run(self, mock_run, conn):
mock_run.return_value = _mock_claude_success({
"result": "Found race condition in useSearch.ts",
"usage": {"total_tokens": 5000},
"cost_usd": 0.015,
})
result = run_agent(conn, "debugger", "VDOL-001", "vdol")
assert result["success"] is True
assert result["role"] == "debugger"
assert result["model"] == "sonnet"
assert result["duration_seconds"] >= 0
# Verify claude was called with right args
call_args = mock_run.call_args
cmd = call_args[0][0]
assert "claude" in cmd[0]
assert "-p" in cmd
assert "--output-format" in cmd
assert "json" in cmd
@patch("agents.runner.subprocess.run")
def test_failed_agent_run(self, mock_run, conn):
mock_run.return_value = _mock_claude_failure("API error")
result = run_agent(conn, "debugger", "VDOL-001", "vdol")
assert result["success"] is False
# Should be logged in agent_logs
logs = conn.execute("SELECT * FROM agent_logs WHERE task_id='VDOL-001'").fetchall()
assert len(logs) == 1
assert logs[0]["success"] == 0
def test_dry_run_returns_prompt(self, conn):
result = run_agent(conn, "debugger", "VDOL-001", "vdol", dry_run=True)
assert result["dry_run"] is True
assert result["prompt"] is not None
assert "VDOL-001" in result["prompt"]
assert result["output"] is None
@patch("agents.runner.subprocess.run")
def test_agent_logs_to_db(self, mock_run, conn):
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "tester", "VDOL-001", "vdol")
logs = conn.execute("SELECT * FROM agent_logs WHERE agent_role='tester'").fetchall()
assert len(logs) == 1
assert logs[0]["project_id"] == "vdol"
@patch("agents.runner.subprocess.run")
def test_full_output_saved_to_db(self, mock_run, conn):
"""Bug fix: output_summary must contain the FULL output, not truncated."""
long_json = json.dumps({
"result": json.dumps({
"summary": "Security audit complete",
"findings": [{"title": f"Finding {i}", "severity": "HIGH"} for i in range(50)],
}),
})
mock = MagicMock()
mock.stdout = long_json
mock.stderr = ""
mock.returncode = 0
mock_run.return_value = mock
run_agent(conn, "security", "VDOL-001", "vdol")
logs = conn.execute("SELECT output_summary FROM agent_logs WHERE agent_role='security'").fetchall()
assert len(logs) == 1
output = logs[0]["output_summary"]
assert output is not None
assert len(output) > 1000 # Must not be truncated
# Should contain all 50 findings
assert "Finding 49" in output
@patch("agents.runner.subprocess.run")
def test_dict_output_saved_as_json_string(self, mock_run, conn):
"""When claude returns structured JSON, it must be saved as string."""
mock_run.return_value = _mock_claude_success({
"result": {"status": "ok", "files": ["a.py", "b.py"]},
})
result = run_agent(conn, "debugger", "VDOL-001", "vdol")
# output should be a string (JSON serialized), not a dict
assert isinstance(result["raw_output"], str)
logs = conn.execute("SELECT output_summary FROM agent_logs WHERE agent_role='debugger'").fetchall()
saved = logs[0]["output_summary"]
assert isinstance(saved, str)
assert "a.py" in saved
@patch("agents.runner.subprocess.run")
def test_previous_output_passed(self, mock_run, conn):
mock_run.return_value = _mock_claude_success({"result": "tests pass"})
run_agent(conn, "tester", "VDOL-001", "vdol",
previous_output="Found bug in line 42")
call_args = mock_run.call_args
prompt = call_args[0][0][2] # -p argument
assert "line 42" in prompt
# ---------------------------------------------------------------------------
# run_pipeline
# ---------------------------------------------------------------------------
class TestRunPipeline:
@patch("agents.runner._run_autocommit") # gotcha #41: мокируем в тестах не о autocommit
@patch("agents.runner.subprocess.run")
def test_successful_pipeline(self, mock_run, mock_autocommit, conn):
mock_run.return_value = _mock_claude_success({"result": "done"})
steps = [
{"role": "debugger", "brief": "find bug"},
{"role": "tester", "depends_on": "debugger", "brief": "verify"},
]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
assert result["steps_completed"] == 2
assert len(result["results"]) == 2
# Pipeline created in DB
pipe = conn.execute("SELECT * FROM pipelines WHERE task_id='VDOL-001'").fetchone()
assert pipe is not None
assert pipe["status"] == "completed"
# Task updated to review
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "review"
@patch("agents.runner.subprocess.run")
def test_pipeline_fails_on_step(self, mock_run, conn):
# First step succeeds, second fails
mock_run.side_effect = [
_mock_claude_success({"result": "found bug"}),
_mock_claude_failure("compilation error"),
]
steps = [
{"role": "debugger", "brief": "find"},
{"role": "frontend_dev", "brief": "fix"},
{"role": "tester", "brief": "test"},
]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is False
assert result["steps_completed"] == 1 # Only debugger completed
assert "frontend_dev" in result["error"]
# Pipeline marked as failed
pipe = conn.execute("SELECT * FROM pipelines WHERE task_id='VDOL-001'").fetchone()
assert pipe["status"] == "failed"
# Task marked as blocked
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "blocked"
def test_pipeline_dry_run(self, conn):
steps = [
{"role": "debugger", "brief": "find"},
{"role": "tester", "brief": "verify"},
]
result = run_pipeline(conn, "VDOL-001", steps, dry_run=True)
assert result["dry_run"] is True
assert result["success"] is True
assert result["steps_completed"] == 2
# No pipeline created in DB
pipes = conn.execute("SELECT * FROM pipelines").fetchall()
assert len(pipes) == 0
@patch("agents.runner.subprocess.run")
def test_pipeline_chains_output(self, mock_run, conn):
"""Output from step N is passed as previous_output to step N+1."""
call_count = [0]
def side_effect(*args, **kwargs):
call_count[0] += 1
if call_count[0] == 1:
return _mock_claude_success({"result": "bug is in line 42"})
return _mock_claude_success({"result": "test written"})
mock_run.side_effect = side_effect
steps = [
{"role": "debugger", "brief": "find"},
{"role": "tester", "brief": "write test"},
]
run_pipeline(conn, "VDOL-001", steps)
# Second call should include first step's output in prompt
second_call = mock_run.call_args_list[1]
prompt = second_call[0][0][2] # -p argument
assert "line 42" in prompt or "bug" in prompt
def test_pipeline_task_not_found(self, conn):
result = run_pipeline(conn, "NONEXISTENT", [{"role": "debugger"}])
assert result["success"] is False
assert "not found" in result["error"]
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_hooks_called_after_successful_pipeline(self, mock_run, mock_hooks, conn):
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_hooks.return_value = []
steps = [{"role": "debugger", "brief": "find"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
mock_hooks.assert_called_once()
call_kwargs = mock_hooks.call_args
assert call_kwargs[1].get("event") == "pipeline_completed" or \
call_kwargs[0][3] == "pipeline_completed"
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_hooks_not_called_on_failed_pipeline(self, mock_run, mock_hooks, conn):
mock_run.return_value = _mock_claude_failure("compilation error")
mock_hooks.return_value = []
steps = [{"role": "debugger", "brief": "find"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is False
mock_hooks.assert_not_called()
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_hook_failure_does_not_affect_pipeline_result(self, mock_run, mock_hooks, conn):
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_hooks.side_effect = Exception("hook exploded")
steps = [{"role": "debugger", "brief": "find"}]
# Must not raise — hook failures must not propagate
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
# ---------------------------------------------------------------------------
# Auto mode
# ---------------------------------------------------------------------------
class TestAutoMode:
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_auto_mode_generates_followups(self, mock_run, mock_hooks, mock_followup, conn):
"""Auto_complete mode должен вызывать generate_followups (последний шаг — tester)."""
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
models.update_project(conn, "vdol", execution_mode="auto_complete")
steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
mock_followup.assert_called_once_with(conn, "VDOL-001")
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "done"
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_review_mode_skips_followups(self, mock_run, mock_hooks, mock_followup, conn):
"""Review mode НЕ должен вызывать generate_followups автоматически."""
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
# Проект остаётся в default "review" mode
steps = [{"role": "debugger", "brief": "find"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
mock_followup.assert_not_called()
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "review"
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_auto_mode_skips_followups_for_followup_tasks(self, mock_run, mock_hooks, mock_followup, conn):
"""Auto_complete mode НЕ должен генерировать followups для followup-задач (предотвращение рекурсии)."""
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
models.update_project(conn, "vdol", execution_mode="auto_complete")
models.update_task(conn, "VDOL-001", brief={"source": "followup:VDOL-000"})
steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
mock_followup.assert_not_called()
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_auto_mode_fires_task_done_event(self, mock_run, mock_hooks, mock_followup, conn):
"""Auto_complete mode должен вызывать run_hooks с event='task_done' (последний шаг — tester)."""
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
models.update_project(conn, "vdol", execution_mode="auto_complete")
steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
events_fired = [call[1].get("event") or call[0][3]
for call in mock_hooks.call_args_list]
assert "task_done" in events_fired
@patch("core.followup.auto_resolve_pending_actions")
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_auto_mode_resolves_pending_actions(self, mock_run, mock_hooks, mock_followup, mock_resolve, conn):
"""Auto_complete mode должен авто-резолвить pending_actions (последний шаг — tester)."""
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_hooks.return_value = []
pending = [{"type": "permission_fix", "description": "Fix X",
"original_item": {}, "options": ["rerun"]}]
mock_followup.return_value = {"created": [], "pending_actions": pending}
mock_resolve.return_value = [{"resolved": "rerun", "result": {}}]
models.update_project(conn, "vdol", execution_mode="auto_complete")
steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
mock_resolve.assert_called_once_with(conn, "VDOL-001", pending)
# ---------------------------------------------------------------------------
# Retry on permission error
# ---------------------------------------------------------------------------
class TestRetryOnPermissionError:
@patch("agents.runner._run_autocommit")
@patch("agents.runner._run_learning_extraction")
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_retry_on_permission_error_auto_mode(self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn):
"""Auto mode: retry при permission error должен срабатывать."""
permission_fail = _mock_claude_failure("permission denied: cannot write file")
retry_success = _mock_claude_success({"result": "fixed"})
mock_run.side_effect = [permission_fail, retry_success]
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
mock_learn.return_value = {"added": 0, "skipped": 0}
models.update_project(conn, "vdol", execution_mode="auto_complete")
steps = [{"role": "debugger", "brief": "find"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
assert mock_run.call_count == 2
# Second call must include --dangerously-skip-permissions
second_cmd = mock_run.call_args_list[1][0][0]
assert "--dangerously-skip-permissions" in second_cmd
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_review_mode_does_not_retry_on_permission_error(self, mock_run, mock_hooks, conn):
"""Review mode: retry при permission error НЕ должен срабатывать."""
permission_fail = _mock_claude_failure("permission denied: cannot write file")
mock_run.return_value = permission_fail
mock_hooks.return_value = []
# Проект остаётся в default "review" mode
steps = [{"role": "debugger", "brief": "find"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is False
assert mock_run.call_count == 1
# ---------------------------------------------------------------------------
# JSON parsing
# ---------------------------------------------------------------------------
class TestTryParseJson:
def test_direct_json(self):
assert _try_parse_json('{"a": 1}') == {"a": 1}
def test_json_in_code_fence(self):
text = 'Some text\n```json\n{"a": 1}\n```\nMore text'
assert _try_parse_json(text) == {"a": 1}
def test_json_embedded_in_text(self):
text = 'Here is the result: {"status": "ok", "count": 42} and more'
result = _try_parse_json(text)
assert result == {"status": "ok", "count": 42}
def test_empty_string(self):
assert _try_parse_json("") is None
def test_no_json(self):
assert _try_parse_json("just plain text") is None
def test_json_array(self):
assert _try_parse_json('[1, 2, 3]') == [1, 2, 3]
# ---------------------------------------------------------------------------
# Non-interactive mode
# ---------------------------------------------------------------------------
class TestNonInteractive:
@patch("agents.runner.subprocess.run")
def test_noninteractive_sets_stdin_devnull(self, mock_run, conn):
"""When noninteractive=True, subprocess.run should get stdin=subprocess.DEVNULL."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=True)
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("stdin") == subprocess.DEVNULL
@patch.dict("os.environ", {"KIN_AGENT_TIMEOUT": ""}, clear=False)
@patch("agents.runner.subprocess.run")
def test_noninteractive_uses_600s_timeout(self, mock_run, conn):
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=True)
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("timeout") == 600
@patch.dict("os.environ", {"KIN_NONINTERACTIVE": ""})
@patch("agents.runner.subprocess.run")
def test_interactive_uses_600s_timeout(self, mock_run, conn):
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False)
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("timeout") == 600
@patch.dict("os.environ", {"KIN_NONINTERACTIVE": ""})
@patch("agents.runner.subprocess.run")
def test_interactive_no_stdin_override(self, mock_run, conn):
"""In interactive mode, stdin should not be set to DEVNULL."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False)
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("stdin") is None
@patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1"})
@patch("agents.runner.subprocess.run")
def test_env_var_activates_noninteractive(self, mock_run, conn):
"""KIN_NONINTERACTIVE=1 env var should activate non-interactive mode."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False)
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("stdin") == subprocess.DEVNULL
assert call_kwargs.get("timeout") == 600
@patch.dict("os.environ", {"KIN_AGENT_TIMEOUT": "900"})
@patch("agents.runner.subprocess.run")
def test_custom_timeout_via_env_var(self, mock_run, conn):
"""KIN_AGENT_TIMEOUT overrides the default 600s timeout."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol")
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("timeout") == 900
@patch("agents.runner.subprocess.run")
def test_allow_write_adds_skip_permissions(self, mock_run, conn):
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol", allow_write=True)
cmd = mock_run.call_args[0][0]
assert "--dangerously-skip-permissions" in cmd
@patch("agents.runner.subprocess.run")
def test_no_allow_write_no_skip_permissions(self, mock_run, conn):
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol", allow_write=False)
cmd = mock_run.call_args[0][0]
assert "--dangerously-skip-permissions" not in cmd
# ---------------------------------------------------------------------------
# run_audit
# ---------------------------------------------------------------------------
class TestRunAudit:
@patch("agents.runner.subprocess.run")
def test_audit_success(self, mock_run, conn):
"""Audit should return parsed already_done/still_pending/unclear."""
audit_output = json.dumps({
"already_done": [{"id": "VDOL-001", "reason": "Fixed in runner.py"}],
"still_pending": [],
"unclear": [],
})
mock_run.return_value = _mock_claude_success({"result": audit_output})
result = run_audit(conn, "vdol")
assert result["success"] is True
assert len(result["already_done"]) == 1
assert result["already_done"][0]["id"] == "VDOL-001"
@patch("agents.runner.subprocess.run")
def test_audit_logs_to_db(self, mock_run, conn):
"""Audit should log to agent_logs with role=backlog_audit."""
mock_run.return_value = _mock_claude_success({
"result": json.dumps({"already_done": [], "still_pending": [], "unclear": []}),
})
run_audit(conn, "vdol")
logs = conn.execute(
"SELECT * FROM agent_logs WHERE agent_role='backlog_audit'"
).fetchall()
assert len(logs) == 1
assert logs[0]["action"] == "audit"
def test_audit_no_pending_tasks(self, conn):
"""If no pending tasks, return success with empty lists."""
# Mark existing task as done
models.update_task(conn, "VDOL-001", status="done")
result = run_audit(conn, "vdol")
assert result["success"] is True
assert result["already_done"] == []
assert "No pending tasks" in result.get("message", "")
def test_audit_project_not_found(self, conn):
result = run_audit(conn, "nonexistent")
assert result["success"] is False
assert "not found" in result["error"]
@patch("agents.runner.subprocess.run")
def test_audit_uses_sonnet(self, mock_run, conn):
"""Audit should use sonnet model."""
mock_run.return_value = _mock_claude_success({
"result": json.dumps({"already_done": [], "still_pending": [], "unclear": []}),
})
run_audit(conn, "vdol")
cmd = mock_run.call_args[0][0]
model_idx = cmd.index("--model")
assert cmd[model_idx + 1] == "sonnet"
@patch("agents.runner.subprocess.run")
def test_audit_includes_tasks_in_prompt(self, mock_run, conn):
"""The prompt should contain the task title."""
mock_run.return_value = _mock_claude_success({
"result": json.dumps({"already_done": [], "still_pending": [], "unclear": []}),
})
run_audit(conn, "vdol")
prompt = mock_run.call_args[0][0][2] # -p argument
assert "VDOL-001" in prompt
assert "Fix bug" in prompt
@patch("agents.runner.subprocess.run")
def test_audit_auto_apply_marks_done(self, mock_run, conn):
"""auto_apply=True should mark already_done tasks as done in DB."""
mock_run.return_value = _mock_claude_success({
"result": json.dumps({
"already_done": [{"id": "VDOL-001", "reason": "Done"}],
"still_pending": [],
"unclear": [],
}),
})
result = run_audit(conn, "vdol", auto_apply=True)
assert result["success"] is True
assert "VDOL-001" in result["applied"]
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "done"
@patch("agents.runner.subprocess.run")
def test_audit_no_auto_apply_keeps_pending(self, mock_run, conn):
"""auto_apply=False should NOT change task status."""
mock_run.return_value = _mock_claude_success({
"result": json.dumps({
"already_done": [{"id": "VDOL-001", "reason": "Done"}],
"still_pending": [],
"unclear": [],
}),
})
result = run_audit(conn, "vdol", auto_apply=False)
assert result["success"] is True
assert result["applied"] == []
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "pending"
@patch("agents.runner.subprocess.run")
def test_audit_uses_dangerously_skip_permissions(self, mock_run, conn):
"""Audit must use --dangerously-skip-permissions for tool access."""
mock_run.return_value = _mock_claude_success({
"result": json.dumps({"already_done": [], "still_pending": [], "unclear": []}),
})
run_audit(conn, "vdol")
cmd = mock_run.call_args[0][0]
assert "--dangerously-skip-permissions" in cmd
# ---------------------------------------------------------------------------
# KIN-019: Silent FAILED diagnostics (regression tests)
# ---------------------------------------------------------------------------
class TestSilentFailedDiagnostics:
"""Regression: агент падает без вывода — runner должен сохранять диагностику в БД."""
@patch("agents.runner.subprocess.run")
def test_agent_empty_stdout_saves_stderr_as_error_message_in_db(self, mock_run, conn):
"""Когда stdout пустой и returncode != 0, stderr должен сохраняться как error_message в agent_logs."""
mock = MagicMock()
mock.stdout = ""
mock.stderr = "API rate limit exceeded (429)"
mock.returncode = 1
mock_run.return_value = mock
run_agent(conn, "debugger", "VDOL-001", "vdol")
log = conn.execute(
"SELECT error_message FROM agent_logs WHERE task_id='VDOL-001'"
).fetchone()
assert log is not None
assert log["error_message"] is not None
assert "rate limit" in log["error_message"]
@patch("agents.runner.subprocess.run")
def test_agent_empty_stdout_returns_error_key_with_stderr(self, mock_run, conn):
"""run_agent должен вернуть ключ 'error' с содержимым stderr при пустом stdout и ненулевом returncode."""
mock = MagicMock()
mock.stdout = ""
mock.stderr = "Permission denied: cannot write to /etc/hosts"
mock.returncode = 1
mock_run.return_value = mock
result = run_agent(conn, "debugger", "VDOL-001", "vdol")
assert result["success"] is False
assert "error" in result
assert result["error"] is not None
assert "Permission denied" in result["error"]
@patch("agents.runner.subprocess.run")
def test_pipeline_error_message_includes_agent_stderr(self, mock_run, conn):
"""Сообщение об ошибке pipeline должно включать stderr агента, а не только generic 'step failed'."""
mock = MagicMock()
mock.stdout = ""
mock.stderr = "Internal server error: unexpected EOF"
mock.returncode = 1
mock_run.return_value = mock
steps = [{"role": "tester", "brief": "run tests"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is False
assert "Internal server error" in result["error"] or "unexpected EOF" in result["error"]
@patch("agents.runner.build_context")
def test_pipeline_exception_in_run_agent_marks_task_blocked(self, mock_ctx, conn):
"""Исключение внутри run_agent (например, из build_context) должно ставить задачу в blocked."""
mock_ctx.side_effect = RuntimeError("DB connection lost")
steps = [{"role": "debugger", "brief": "find"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is False
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "blocked"
@patch("agents.runner.build_context")
def test_pipeline_exception_logs_to_agent_logs(self, mock_ctx, conn):
"""Исключение в run_agent должно быть залогировано в agent_logs с success=False."""
mock_ctx.side_effect = ValueError("bad context data")
steps = [{"role": "tester", "brief": "test"}]
run_pipeline(conn, "VDOL-001", steps)
logs = conn.execute(
"SELECT * FROM agent_logs WHERE task_id='VDOL-001' AND success=0"
).fetchall()
assert len(logs) >= 1
@patch("agents.runner.build_context")
def test_pipeline_exception_marks_pipeline_failed_in_db(self, mock_ctx, conn):
"""При исключении запись pipeline должна существовать в БД и иметь статус failed."""
mock_ctx.side_effect = RuntimeError("network timeout")
steps = [{"role": "debugger", "brief": "find"}]
run_pipeline(conn, "VDOL-001", steps)
pipe = conn.execute("SELECT * FROM pipelines WHERE task_id='VDOL-001'").fetchone()
assert pipe is not None
assert pipe["status"] == "failed"
@patch("agents.runner.subprocess.run")
def test_agent_success_has_no_error_key_populated(self, mock_run, conn):
"""При успешном запуске агента ключ 'error' в результате должен быть None (нет ложных срабатываний)."""
mock_run.return_value = _mock_claude_success({"result": "all good"})
result = run_agent(conn, "debugger", "VDOL-001", "vdol")
assert result["success"] is True
assert result.get("error") is None
# ---------------------------------------------------------------------------
# Auto-learning: _run_learning_extraction
# ---------------------------------------------------------------------------
class TestRunLearningExtraction:
@patch("agents.runner.subprocess.run")
def test_extracts_and_saves_decisions(self, mock_run, conn):
"""Успешный сценарий: learner возвращает JSON с decisions, они сохраняются в БД."""
learner_output = json.dumps({
"decisions": [
{"type": "gotcha", "title": "SQLite WAL mode needed", "description": "Without WAL concurrent reads fail", "tags": ["sqlite", "db"]},
{"type": "convention", "title": "Always run tests after change", "description": "Prevents regressions", "tags": ["testing"]},
]
})
mock_run.return_value = _mock_claude_success({"result": learner_output})
step_results = [
{"role": "debugger", "raw_output": "Found issue with sqlite concurrent access"},
]
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
assert result["added"] == 2
assert result["skipped"] == 0
decisions = conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()
assert len(decisions) == 2
titles = {d["title"] for d in decisions}
assert "SQLite WAL mode needed" in titles
assert "Always run tests after change" in titles
@patch("agents.runner.subprocess.run")
def test_skips_duplicate_decisions(self, mock_run, conn):
"""Дедупликация: если decision с таким title+type уже есть, пропускается."""
from core import models as m
m.add_decision(conn, "vdol", "gotcha", "SQLite WAL mode needed", "existing desc")
learner_output = json.dumps({
"decisions": [
{"type": "gotcha", "title": "SQLite WAL mode needed", "description": "duplicate", "tags": []},
{"type": "convention", "title": "New convention here", "description": "new desc", "tags": []},
]
})
mock_run.return_value = _mock_claude_success({"result": learner_output})
step_results = [{"role": "tester", "raw_output": "test output"}]
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
assert result["added"] == 1
assert result["skipped"] == 1
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 2
@patch("agents.runner.subprocess.run")
def test_limits_to_5_decisions(self, mock_run, conn):
"""Learner не должен сохранять более 5 decisions даже если агент вернул больше."""
decisions_list = [
{"type": "decision", "title": f"Decision {i}", "description": f"desc {i}", "tags": []}
for i in range(8)
]
learner_output = json.dumps({"decisions": decisions_list})
mock_run.return_value = _mock_claude_success({"result": learner_output})
step_results = [{"role": "architect", "raw_output": "long output"}]
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
assert result["added"] == 5
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 5
@patch("agents.runner.subprocess.run")
def test_non_json_output_returns_error(self, mock_run, conn):
"""Если learner вернул не-JSON, функция возвращает error, не бросает исключение."""
mock_run.return_value = _mock_claude_success({"result": "plain text, not json"})
step_results = [{"role": "debugger", "raw_output": "output"}]
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
assert result["added"] == 0
assert "error" in result
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0
@patch("agents.runner.subprocess.run")
def test_decisions_linked_to_task(self, mock_run, conn):
"""Сохранённые decisions должны быть привязаны к task_id."""
learner_output = json.dumps({
"decisions": [
{"type": "gotcha", "title": "Important gotcha", "description": "desc", "tags": []},
]
})
mock_run.return_value = _mock_claude_success({"result": learner_output})
step_results = [{"role": "debugger", "raw_output": "output"}]
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
d = conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchone()
assert d["task_id"] == "VDOL-001"
@patch("agents.runner._run_learning_extraction")
@patch("agents.runner.subprocess.run")
def test_pipeline_triggers_learning_after_completion(self, mock_run, mock_learn, conn):
"""run_pipeline должен вызывать _run_learning_extraction после успешного завершения."""
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_learn.return_value = {"added": 1, "skipped": 0}
steps = [{"role": "debugger", "brief": "find bug"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
mock_learn.assert_called_once()
call_args = mock_learn.call_args[0]
assert call_args[1] == "VDOL-001" # task_id
assert call_args[2] == "vdol" # project_id
@patch("agents.runner._run_learning_extraction")
@patch("agents.runner.subprocess.run")
def test_learning_error_does_not_break_pipeline(self, mock_run, mock_learn, conn):
"""Если _run_learning_extraction бросает исключение, pipeline не падает."""
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_learn.side_effect = Exception("learning failed")
steps = [{"role": "debugger", "brief": "find bug"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
def test_pipeline_dry_run_skips_learning(self, conn):
"""Dry run не должен вызывать _run_learning_extraction."""
steps = [{"role": "debugger", "brief": "find bug"}]
result = run_pipeline(conn, "VDOL-001", steps, dry_run=True)
assert result["dry_run"] is True
# No decisions saved (dry run — no DB activity)
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0
@patch("agents.runner.subprocess.run")
def test_empty_learner_output_returns_no_decisions(self, mock_run, conn):
"""Пустой stdout от learner (subprocess вернул "") — не бросает исключение, возвращает error."""
# Используем пустую строку как stdout (не dict), чтобы raw_output оказался пустым
mock_run.return_value = _mock_claude_success("")
step_results = [{"role": "debugger", "raw_output": "output"}]
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
assert result["added"] == 0
assert "error" in result
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0
@patch("agents.runner.subprocess.run")
def test_empty_decisions_list_returns_zero_counts(self, mock_run, conn):
"""Learner возвращает {"decisions": []} — added=0, skipped=0, без ошибки."""
mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})})
step_results = [{"role": "debugger", "raw_output": "output"}]
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
assert result["added"] == 0
assert result["skipped"] == 0
assert "error" not in result
@patch("agents.runner.subprocess.run")
def test_decision_missing_title_is_skipped(self, mock_run, conn):
"""Decision без title молча пропускается, не вызывает исключение."""
learner_output = json.dumps({
"decisions": [
{"type": "gotcha", "description": "no title here", "tags": []},
{"type": "convention", "title": "Valid decision", "description": "desc", "tags": []},
]
})
mock_run.return_value = _mock_claude_success({"result": learner_output})
step_results = [{"role": "debugger", "raw_output": "output"}]
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
assert result["added"] == 1
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 1
@patch("agents.runner.subprocess.run")
def test_decisions_field_not_list_returns_error(self, mock_run, conn):
"""Если поле decisions не является списком — возвращается error dict."""
mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": "not a list"})})
step_results = [{"role": "debugger", "raw_output": "output"}]
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
assert result["added"] == 0
assert "error" in result
@patch("agents.runner.subprocess.run")
def test_logs_agent_run_to_db(self, mock_run, conn):
"""KIN-060: _run_learning_extraction должна писать запись в agent_logs."""
learner_output = json.dumps({
"decisions": [
{"type": "gotcha", "title": "Log test", "description": "desc", "tags": []},
]
})
mock_run.return_value = _mock_claude_success({"result": learner_output})
step_results = [{"role": "debugger", "raw_output": "output"}]
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
logs = conn.execute(
"SELECT * FROM agent_logs WHERE agent_role='learner' AND project_id='vdol'"
).fetchall()
assert len(logs) == 1
log = logs[0]
assert log["task_id"] == "VDOL-001"
assert log["action"] == "learn"
assert log["model"] == "sonnet"
@patch("agents.runner.subprocess.run")
def test_learner_cost_included_in_cost_summary(self, mock_run, conn):
"""KIN-060: get_cost_summary() включает затраты learner-агента."""
learner_output = json.dumps({"decisions": []})
mock_run.return_value = _mock_claude_success({
"result": learner_output,
"cost_usd": 0.042,
"usage": {"total_tokens": 3000},
})
step_results = [{"role": "debugger", "raw_output": "output"}]
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
costs = models.get_cost_summary(conn, days=1)
assert len(costs) == 1
assert costs[0]["project_id"] == "vdol"
assert costs[0]["total_cost_usd"] == pytest.approx(0.042)
assert costs[0]["total_tokens"] == 3000
# -----------------------------------------------------------------------
# KIN-061: Regression — валидация поля type в decision
# -----------------------------------------------------------------------
@patch("agents.runner.subprocess.run")
def test_valid_type_gotcha_is_saved_as_is(self, mock_run, conn):
"""KIN-061: валидный тип 'gotcha' сохраняется без изменений."""
learner_output = json.dumps({
"decisions": [
{"type": "gotcha", "title": "Use WAL mode", "description": "Concurrent reads need WAL", "tags": []},
]
})
mock_run.return_value = _mock_claude_success({"result": learner_output})
result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}])
assert result["added"] == 1
d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone()
assert d["type"] == "gotcha"
@patch("agents.runner.subprocess.run")
def test_invalid_type_falls_back_to_decision(self, mock_run, conn):
"""KIN-061: невалидный тип 'unknown_type' заменяется на 'decision'."""
learner_output = json.dumps({
"decisions": [
{"type": "unknown_type", "title": "Some title", "description": "Some desc", "tags": []},
]
})
mock_run.return_value = _mock_claude_success({"result": learner_output})
result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}])
assert result["added"] == 1
d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone()
assert d["type"] == "decision"
@patch("agents.runner.subprocess.run")
def test_missing_type_falls_back_to_decision(self, mock_run, conn):
"""KIN-061: отсутствующий ключ 'type' в decision заменяется на 'decision'."""
learner_output = json.dumps({
"decisions": [
{"title": "No type key here", "description": "desc without type", "tags": []},
]
})
mock_run.return_value = _mock_claude_success({"result": learner_output})
result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}])
assert result["added"] == 1
d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone()
assert d["type"] == "decision"
# -----------------------------------------------------------------------
# KIN-062: KIN_LEARNER_TIMEOUT — отдельный таймаут для learner-агента
# -----------------------------------------------------------------------
@patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": ""}, clear=False)
@patch("agents.runner.subprocess.run")
def test_learner_uses_120s_default_timeout(self, mock_run, conn):
"""KIN-062: по умолчанию learner использует таймаут 120s (KIN_LEARNER_TIMEOUT не задан)."""
mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})})
step_results = [{"role": "debugger", "raw_output": "output"}]
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("timeout") == 120
@patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": "300"}, clear=False)
@patch("agents.runner.subprocess.run")
def test_learner_uses_custom_timeout_from_env(self, mock_run, conn):
"""KIN-062: KIN_LEARNER_TIMEOUT переопределяет дефолтный таймаут learner-агента."""
mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})})
step_results = [{"role": "debugger", "raw_output": "output"}]
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("timeout") == 300
@patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": "60", "KIN_AGENT_TIMEOUT": "900"}, clear=False)
@patch("agents.runner.subprocess.run")
def test_learner_timeout_independent_of_agent_timeout(self, mock_run, conn):
"""KIN-062: KIN_LEARNER_TIMEOUT не зависит от KIN_AGENT_TIMEOUT."""
mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})})
step_results = [{"role": "debugger", "raw_output": "output"}]
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("timeout") == 60
# ---------------------------------------------------------------------------
# KIN-056: Regression — web path timeout parity with CLI
# ---------------------------------------------------------------------------
class TestRegressionKIN056:
"""Регрессионные тесты KIN-056: агенты таймаутили через 300s из web, но не из CLI.
Причина: noninteractive режим использовал timeout=300s.
Web API всегда устанавливает KIN_NONINTERACTIVE=1, поэтому таймаут был 300s.
Фикс: единый timeout=600s независимо от noninteractive (переопределяется KIN_AGENT_TIMEOUT).
Каждый тест ПАДАЛ бы со старым кодом (timeout=300 для noninteractive)
и ПРОХОДИТ после фикса.
"""
@patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""})
@patch("agents.runner.subprocess.run")
def test_web_noninteractive_env_does_not_use_300s(self, mock_run, conn):
"""Web путь устанавливает KIN_NONINTERACTIVE=1. До фикса это давало timeout=300s."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol")
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("timeout") != 300, (
"Регрессия KIN-056: timeout не должен быть 300s в noninteractive режиме"
)
@patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""})
@patch("agents.runner.subprocess.run")
def test_web_noninteractive_timeout_is_600(self, mock_run, conn):
"""Web путь: KIN_NONINTERACTIVE=1 → timeout = 600s (не 300s)."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol")
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("timeout") == 600
@patch("agents.runner.subprocess.run")
def test_web_and_cli_paths_use_same_timeout(self, mock_run, conn):
"""Таймаут через web-путь (KIN_NONINTERACTIVE=1) == таймаут CLI (noninteractive=True)."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
# Web path: env var KIN_NONINTERACTIVE=1, noninteractive param not set
with patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""}):
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False)
web_timeout = mock_run.call_args[1].get("timeout")
mock_run.reset_mock()
# CLI path: noninteractive=True, no env var
with patch.dict("os.environ", {"KIN_NONINTERACTIVE": "", "KIN_AGENT_TIMEOUT": ""}):
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=True)
cli_timeout = mock_run.call_args[1].get("timeout")
assert web_timeout == cli_timeout, (
f"Таймаут web ({web_timeout}s) != CLI ({cli_timeout}s) — регрессия KIN-056"
)
@patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": "900"})
@patch("agents.runner.subprocess.run")
def test_web_noninteractive_respects_kin_agent_timeout_override(self, mock_run, conn):
"""Web путь: KIN_AGENT_TIMEOUT переопределяет дефолтный таймаут даже при KIN_NONINTERACTIVE=1."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol")
call_kwargs = mock_run.call_args[1]
assert call_kwargs.get("timeout") == 900
# ---------------------------------------------------------------------------
# KIN-057: claude CLI в PATH при запуске через launchctl
# ---------------------------------------------------------------------------
class TestClaudePath:
"""Регрессионные тесты KIN-057: launchctl-демоны могут не видеть claude в PATH."""
def test_build_claude_env_contains_extra_paths(self):
"""_build_claude_env должен добавить /opt/homebrew/bin и /usr/local/bin в PATH."""
env = _build_claude_env()
path_dirs = env["PATH"].split(":")
for extra_dir in _EXTRA_PATH_DIRS:
assert extra_dir in path_dirs, (
f"Регрессия KIN-057: {extra_dir} не найден в PATH, сгенерированном _build_claude_env"
)
def test_build_claude_env_no_duplicate_paths(self):
"""_build_claude_env не должен дублировать уже существующие пути.
Мокируем PATH на фиксированное значение, чтобы тест не зависел от
реального окружения (решение #48).
"""
fixed_path = "/usr/bin:/bin"
with patch.dict("os.environ", {"PATH": fixed_path}, clear=False):
env = _build_claude_env()
path_dirs = env["PATH"].split(":")
seen = set()
for d in path_dirs:
assert d not in seen, f"Дублирующийся PATH entry: {d}"
seen.add(d)
def test_build_claude_env_preserves_existing_path(self):
"""_build_claude_env должен сохранять уже существующие пути."""
with patch.dict("os.environ", {"PATH": "/custom/bin:/usr/bin:/bin"}):
env = _build_claude_env()
path_dirs = env["PATH"].split(":")
assert "/custom/bin" in path_dirs
assert "/usr/bin" in path_dirs
def test_resolve_claude_cmd_returns_string(self):
"""_resolve_claude_cmd должен всегда возвращать строку."""
cmd = _resolve_claude_cmd()
assert isinstance(cmd, str)
assert len(cmd) > 0
def test_resolve_claude_cmd_fallback_when_not_found(self):
"""_resolve_claude_cmd должен вернуть 'claude' если CLI не найден в PATH."""
with patch("agents.runner.shutil.which", return_value=None):
cmd = _resolve_claude_cmd()
assert cmd == "claude"
def test_resolve_claude_cmd_returns_full_path_when_found(self):
"""_resolve_claude_cmd должен вернуть полный путь если claude найден."""
with patch("agents.runner.shutil.which", return_value="/opt/homebrew/bin/claude"):
cmd = _resolve_claude_cmd()
assert cmd == "/opt/homebrew/bin/claude"
@patch("agents.runner.subprocess.run")
def test_run_claude_passes_env_to_subprocess(self, mock_run, conn):
"""_run_claude должен передавать env= в subprocess.run (а не наследовать голый PATH)."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol")
call_kwargs = mock_run.call_args[1]
assert "env" in call_kwargs, (
"Регрессия KIN-057: subprocess.run должен получать явный env с расширенным PATH"
)
assert call_kwargs["env"] is not None
@patch("agents.runner.subprocess.run")
def test_run_claude_env_has_homebrew_in_path(self, mock_run, conn):
"""env переданный в subprocess.run должен содержать /opt/homebrew/bin в PATH."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
run_agent(conn, "debugger", "VDOL-001", "vdol")
call_kwargs = mock_run.call_args[1]
env = call_kwargs.get("env", {})
assert "/opt/homebrew/bin" in env.get("PATH", ""), (
"Регрессия KIN-057: /opt/homebrew/bin не найден в env['PATH'] subprocess.run"
)
@patch("agents.runner.subprocess.run")
def test_file_not_found_returns_127(self, mock_run, conn):
"""Если claude не найден (FileNotFoundError), должен вернуться returncode 127."""
mock_run.side_effect = FileNotFoundError("claude not found")
result = run_agent(conn, "debugger", "VDOL-001", "vdol")
assert result["success"] is False
assert "not found" in (result.get("error") or "").lower()
@patch.dict("os.environ", {"PATH": ""})
def test_launchctl_empty_path_build_env_adds_extra_dirs(self):
"""Регрессия KIN-057: когда launchctl запускает с пустым PATH,
_build_claude_env должен добавить _EXTRA_PATH_DIRS чтобы claude был доступен.
Без фикса: os.environ["PATH"]="" → shutil.which("claude") → None → FileNotFoundError.
После фикса: _build_claude_env строит PATH с /opt/homebrew/bin и др.
"""
env = _build_claude_env()
path_dirs = env["PATH"].split(":")
# Явная проверка каждой критичной директории
for extra_dir in _EXTRA_PATH_DIRS:
assert extra_dir in path_dirs, (
f"KIN-057: при пустом os PATH директория {extra_dir} должна быть добавлена"
)
@patch.dict("os.environ", {"PATH": ""})
def test_launchctl_empty_path_shutil_which_fails_without_fix(self):
"""Воспроизводит сломанное поведение: при PATH='' shutil.which возвращает None.
Это точно то, что происходило до фикса — launchctl не видел claude.
Тест документирует, ПОЧЕМУ нужен _build_claude_env вместо прямого os.environ.
"""
import shutil
# Без фикса: поиск с пустым PATH не найдёт claude
result_without_fix = shutil.which("claude", path="")
assert result_without_fix is None, (
"Если этот assert упал — shutil.which нашёл claude в пустом PATH, "
"что невозможно. Ожидаем None — именно поэтому нужен _build_claude_env."
)
# С фиксом: _resolve_claude_cmd строит расширенный PATH и находит claude
# (или возвращает fallback "claude", но не бросает FileNotFoundError)
cmd = _resolve_claude_cmd()
assert isinstance(cmd, str) and len(cmd) > 0, (
"KIN-057: _resolve_claude_cmd должен возвращать строку даже при пустом os PATH"
)
# ---------------------------------------------------------------------------
# KIN-063: TestCompletionMode — auto_complete + last-step role check
# ---------------------------------------------------------------------------
class TestCompletionMode:
"""auto_complete mode срабатывает только если последний шаг — tester или reviewer."""
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_auto_complete_with_tester_last_sets_done(self, mock_run, mock_hooks, mock_followup, conn):
"""auto_complete + последний шаг tester → status=done (Decision #29)."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
models.update_project(conn, "vdol", execution_mode="auto_complete")
steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "done"
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_auto_complete_with_reviewer_last_sets_done(self, mock_run, mock_hooks, mock_followup, conn):
"""auto_complete + последний шаг reviewer → status=done."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
models.update_project(conn, "vdol", execution_mode="auto_complete")
steps = [{"role": "developer", "brief": "fix"}, {"role": "reviewer", "brief": "review"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "done"
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_auto_complete_without_tester_last_sets_review(self, mock_run, mock_hooks, mock_followup, conn):
"""auto_complete + последний шаг НЕ tester/reviewer → status=review (Decision #29)."""
mock_run.return_value = _mock_claude_success({"result": "ok"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
models.update_project(conn, "vdol", execution_mode="auto_complete")
steps = [{"role": "developer", "brief": "fix"}, {"role": "debugger", "brief": "debug"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "review", (
"Регрессия KIN-063: auto_complete без tester/reviewer последним НЕ должен авто-завершать"
)
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_legacy_auto_mode_value_not_recognized(self, mock_run, mock_hooks, mock_followup, conn):
"""Регрессия: старое значение 'auto' больше не является валидным режимом.
После KIN-063 'auto''auto_complete'. Если в DB осталось 'auto' (без миграции),
runner НЕ должен авто-завершать — это 'review'-ветка (безопасный fallback).
(Decision #29)
"""
mock_run.return_value = _mock_claude_success({"result": "ok"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
# Прямой SQL-апдейт, обходя validate_completion_mode, чтобы симулировать
# старую запись в БД без миграции
conn.execute("UPDATE projects SET execution_mode='auto' WHERE id='vdol'")
conn.commit()
steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "review", (
"Регрессия: 'auto' (старый формат) не должен срабатывать как auto_complete"
)
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_review_mode_with_tester_last_keeps_task_in_review(self, mock_run, mock_hooks, mock_followup, conn):
"""review mode + последний шаг tester → task.status == 'review', НЕ done (ждёт ручного approve)."""
mock_run.return_value = _mock_claude_success({"result": "all tests pass"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
# Проект и задача остаются в дефолтном 'review' mode
steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "review"
assert task["status"] != "done", (
"KIN-063: review mode не должен авто-завершать задачу даже если tester последний"
)
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_project_review_overrides_no_task_completion_mode(self, mock_run, mock_hooks, mock_followup, conn):
"""Project execution_mode='review' + задача без override → pipeline завершается в 'review'.
Сценарий: PM выбрал auto_complete, но проект настроен на 'review' (ручной override человека).
Задача не имеет task-level execution_mode, поэтому get_effective_mode возвращает project-level 'review'.
"""
mock_run.return_value = _mock_claude_success({"result": "ok"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
# Проект явно в 'review', задача без execution_mode
models.update_project(conn, "vdol", execution_mode="review")
# task VDOL-001 создана без execution_mode (None) — fixture
steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
assert result["mode"] == "review"
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "review", (
"KIN-063: project-level 'review' должен применяться когда задача не имеет override"
)
# ---------------------------------------------------------------------------
# KIN-048: _run_autocommit — флаг, git path, env=
# ---------------------------------------------------------------------------
class TestAutocommit:
"""KIN-048: _run_autocommit — autocommit_enabled флаг, shutil.which, env= regression."""
def test_disabled_project_skips_subprocess(self, conn):
"""autocommit_enabled=0 (дефолт) → subprocess не вызывается."""
with patch("agents.runner.subprocess.run") as mock_run:
_run_autocommit(conn, "VDOL-001", "vdol")
mock_run.assert_not_called()
@patch("agents.runner.subprocess.run")
@patch("agents.runner.shutil.which")
def test_enabled_calls_git_add_and_commit(self, mock_which, mock_run, conn, tmp_path):
"""autocommit_enabled=1 → вызываются git add -A и git commit с task_id и title."""
mock_which.return_value = "/usr/bin/git"
mock_run.return_value = MagicMock(returncode=0)
models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path))
_run_autocommit(conn, "VDOL-001", "vdol")
assert mock_run.call_count == 2
add_cmd = mock_run.call_args_list[0][0][0]
assert add_cmd == ["/usr/bin/git", "add", "-A"]
commit_cmd = mock_run.call_args_list[1][0][0]
assert commit_cmd[0] == "/usr/bin/git"
assert commit_cmd[1] == "commit"
assert "VDOL-001" in commit_cmd[-1]
assert "Fix bug" in commit_cmd[-1]
@patch("agents.runner.subprocess.run")
def test_nothing_to_commit_no_exception(self, mock_run, conn, tmp_path):
"""returncode=1 (nothing to commit) → исключение не бросается."""
mock_run.return_value = MagicMock(returncode=1)
models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path))
_run_autocommit(conn, "VDOL-001", "vdol") # must not raise
@patch("agents.runner.subprocess.run")
def test_passes_env_to_subprocess(self, mock_run, conn, tmp_path):
"""Regression #33: env= должен передаваться в subprocess.run."""
mock_run.return_value = MagicMock(returncode=0)
models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path))
_run_autocommit(conn, "VDOL-001", "vdol")
for call in mock_run.call_args_list:
kwargs = call[1]
assert "env" in kwargs, "Regression #33: subprocess.run должен получать env="
assert "/opt/homebrew/bin" in kwargs["env"].get("PATH", "")
@patch("agents.runner.subprocess.run")
@patch("agents.runner.shutil.which")
def test_resolves_git_via_shutil_which(self, mock_which, mock_run, conn, tmp_path):
"""Regression #32: git резолвится через shutil.which, а не hardcoded 'git'."""
mock_which.return_value = "/opt/homebrew/bin/git"
mock_run.return_value = MagicMock(returncode=0)
models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path))
_run_autocommit(conn, "VDOL-001", "vdol")
git_which_calls = [c for c in mock_which.call_args_list if c[0][0] == "git"]
assert len(git_which_calls) > 0, "Regression #32: shutil.which должен вызываться для git"
first_cmd = mock_run.call_args_list[0][0][0]
assert first_cmd[0] == "/opt/homebrew/bin/git"
@patch("agents.runner.subprocess.run")
@patch("agents.runner.shutil.which")
def test_git_not_found_no_crash_logs_warning(self, mock_which, mock_run, conn, tmp_path):
"""shutil.which(git) → None → fallback 'git' → FileNotFoundError → no crash, WARNING logged."""
mock_which.return_value = None # git не найден в PATH
mock_run.side_effect = FileNotFoundError("git: command not found")
models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path))
with patch("agents.runner._logger") as mock_logger:
_run_autocommit(conn, "VDOL-001", "vdol") # не должен бросать исключение
mock_logger.warning.assert_called_once()
@patch("agents.runner._run_autocommit")
@patch("agents.runner.subprocess.run")
def test_autocommit_not_called_on_failed_pipeline(self, mock_run, mock_autocommit, conn):
"""Pipeline failure → _run_autocommit must NOT be called (gotcha #41)."""
mock_run.return_value = _mock_claude_failure("compilation error")
steps = [{"role": "debugger", "brief": "find"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is False
mock_autocommit.assert_not_called()
# ---------------------------------------------------------------------------
# KIN-055: execution_mode='review' при переводе задачи в статус review
# ---------------------------------------------------------------------------
class TestReviewModeExecutionMode:
"""Регрессия KIN-055: execution_mode должен быть 'review', а не NULL после pipeline в review mode."""
def test_task_execution_mode_is_null_before_pipeline(self, conn):
"""Граничный случай: execution_mode IS NULL до запуска pipeline (задача только создана)."""
task = models.get_task(conn, "VDOL-001")
assert task["execution_mode"] is None, (
"Задача должна иметь NULL execution_mode до выполнения pipeline"
)
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_review_mode_sets_execution_mode_review(self, mock_run, mock_hooks, conn):
"""После pipeline в review mode task.execution_mode должно быть 'review', а не NULL."""
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_hooks.return_value = []
steps = [{"role": "debugger", "brief": "find bug"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "review"
# Регрессионная проверка KIN-055: execution_mode не должен быть NULL
assert task["execution_mode"] is not None, (
"Регрессия KIN-055: execution_mode не должен быть NULL после перевода задачи в статус review"
)
assert task["execution_mode"] == "review"
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_review_mode_execution_mode_persisted_in_db(self, mock_run, mock_hooks, conn):
"""execution_mode='review' должно сохраняться в SQLite напрямую, минуя ORM-слой."""
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_hooks.return_value = []
steps = [{"role": "debugger", "brief": "find"}]
run_pipeline(conn, "VDOL-001", steps)
row = conn.execute(
"SELECT execution_mode FROM tasks WHERE id='VDOL-001'"
).fetchone()
assert row is not None
assert row["execution_mode"] == "review", (
"Регрессия KIN-055: execution_mode должен быть 'review' в SQLite после pipeline"
)
# ---------------------------------------------------------------------------
# KIN-021: Audit log for --dangerously-skip-permissions
# ---------------------------------------------------------------------------
class TestAuditLogDangerousSkip:
@patch("agents.runner._run_autocommit")
@patch("agents.runner._run_learning_extraction")
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_audit_log_written_on_permission_retry(
self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn
):
"""При retry с --dangerously-skip-permissions записывается событие в audit_log."""
permission_fail = _mock_claude_failure("permission denied: cannot write file")
retry_success = _mock_claude_success({"result": "fixed"})
mock_run.side_effect = [permission_fail, retry_success]
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
mock_learn.return_value = {"added": 0, "skipped": 0}
models.update_project(conn, "vdol", execution_mode="auto_complete")
steps = [{"role": "debugger", "brief": "find"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
# Проверяем audit_log через прямой SQL
rows = conn.execute(
"SELECT * FROM audit_log WHERE task_id='VDOL-001'"
).fetchall()
assert len(rows) == 1
assert rows[0]["event_type"] == "dangerous_skip"
assert rows[0]["step_id"] == "debugger"
assert "debugger" in rows[0]["reason"]
@patch("agents.runner._run_autocommit")
@patch("agents.runner._run_learning_extraction")
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_dangerously_skipped_flag_set_on_task(
self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn
):
"""tasks.dangerously_skipped=1 после retry с --dangerously-skip-permissions."""
permission_fail = _mock_claude_failure("permission denied: cannot write file")
retry_success = _mock_claude_success({"result": "fixed"})
mock_run.side_effect = [permission_fail, retry_success]
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
mock_learn.return_value = {"added": 0, "skipped": 0}
models.update_project(conn, "vdol", execution_mode="auto_complete")
steps = [{"role": "debugger", "brief": "find"}]
run_pipeline(conn, "VDOL-001", steps)
# Верификация через прямой SQL (минуя ORM)
row = conn.execute(
"SELECT dangerously_skipped FROM tasks WHERE id='VDOL-001'"
).fetchone()
assert row is not None
assert row["dangerously_skipped"] == 1
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_no_audit_log_in_review_mode(self, mock_run, mock_hooks, conn):
"""В review mode retry не происходит, audit_log остаётся пустым."""
permission_fail = _mock_claude_failure("permission denied: cannot write file")
mock_run.return_value = permission_fail
mock_hooks.return_value = []
steps = [{"role": "debugger", "brief": "find"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is False
rows = conn.execute(
"SELECT * FROM audit_log WHERE task_id='VDOL-001'"
).fetchall()
assert len(rows) == 0
@patch("agents.runner._run_autocommit")
@patch("agents.runner._run_learning_extraction")
@patch("core.followup.generate_followups")
@patch("agents.runner.run_hooks")
@patch("agents.runner.subprocess.run")
def test_audit_log_no_entry_on_normal_success(
self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn
):
"""При успешном выполнении без retry audit_log не записывается."""
mock_run.return_value = _mock_claude_success({"result": "done"})
mock_hooks.return_value = []
mock_followup.return_value = {"created": [], "pending_actions": []}
mock_learn.return_value = {"added": 0, "skipped": 0}
models.update_project(conn, "vdol", execution_mode="auto_complete")
steps = [{"role": "tester", "brief": "test"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
rows = conn.execute(
"SELECT * FROM audit_log WHERE task_id='VDOL-001'"
).fetchall()
assert len(rows) == 0