kin: KIN-048 Post-pipeline hook: автокоммит после успешного завершения задачи. git add -A && git commit -m 'kin: TASK_ID TITLE'. Срабатывает автоматически как rebuild-frontend.
This commit is contained in:
parent
8a6f280cbd
commit
ae21e48b65
13 changed files with 1554 additions and 65 deletions
|
|
@ -6,7 +6,10 @@ import pytest
|
|||
from unittest.mock import patch, MagicMock
|
||||
from core.db import init_db
|
||||
from core import models
|
||||
from agents.runner import run_agent, run_pipeline, run_audit, _try_parse_json
|
||||
from agents.runner import (
|
||||
run_agent, run_pipeline, run_audit, _try_parse_json, _run_learning_extraction,
|
||||
_build_claude_env, _resolve_claude_cmd, _EXTRA_PATH_DIRS, _run_autocommit,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
|
|
@ -155,8 +158,9 @@ class TestRunAgent:
|
|||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestRunPipeline:
|
||||
@patch("agents.runner._run_autocommit") # gotcha #41: мокируем в тестах не о autocommit
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_successful_pipeline(self, mock_run, conn):
|
||||
def test_successful_pipeline(self, mock_run, mock_autocommit, conn):
|
||||
mock_run.return_value = _mock_claude_success({"result": "done"})
|
||||
|
||||
steps = [
|
||||
|
|
@ -298,13 +302,13 @@ class TestAutoMode:
|
|||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_auto_mode_generates_followups(self, mock_run, mock_hooks, mock_followup, conn):
|
||||
"""Auto mode должен вызывать generate_followups после task_auto_approved."""
|
||||
"""Auto_complete mode должен вызывать generate_followups (последний шаг — tester)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "done"})
|
||||
mock_hooks.return_value = []
|
||||
mock_followup.return_value = {"created": [], "pending_actions": []}
|
||||
|
||||
models.update_project(conn, "vdol", execution_mode="auto")
|
||||
steps = [{"role": "debugger", "brief": "find"}]
|
||||
models.update_project(conn, "vdol", execution_mode="auto_complete")
|
||||
steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
|
|
@ -334,15 +338,15 @@ class TestAutoMode:
|
|||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_auto_mode_skips_followups_for_followup_tasks(self, mock_run, mock_hooks, mock_followup, conn):
|
||||
"""Auto mode НЕ должен генерировать followups для followup-задач (предотвращение рекурсии)."""
|
||||
"""Auto_complete mode НЕ должен генерировать followups для followup-задач (предотвращение рекурсии)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "done"})
|
||||
mock_hooks.return_value = []
|
||||
mock_followup.return_value = {"created": [], "pending_actions": []}
|
||||
|
||||
models.update_project(conn, "vdol", execution_mode="auto")
|
||||
models.update_project(conn, "vdol", execution_mode="auto_complete")
|
||||
models.update_task(conn, "VDOL-001", brief={"source": "followup:VDOL-000"})
|
||||
|
||||
steps = [{"role": "debugger", "brief": "find"}]
|
||||
steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
|
|
@ -352,13 +356,13 @@ class TestAutoMode:
|
|||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_auto_mode_fires_task_done_event(self, mock_run, mock_hooks, mock_followup, conn):
|
||||
"""Auto mode должен вызывать run_hooks с event='task_done' после task_auto_approved."""
|
||||
"""Auto_complete mode должен вызывать run_hooks с event='task_done' (последний шаг — tester)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "done"})
|
||||
mock_hooks.return_value = []
|
||||
mock_followup.return_value = {"created": [], "pending_actions": []}
|
||||
|
||||
models.update_project(conn, "vdol", execution_mode="auto")
|
||||
steps = [{"role": "debugger", "brief": "find"}]
|
||||
models.update_project(conn, "vdol", execution_mode="auto_complete")
|
||||
steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
|
|
@ -371,7 +375,7 @@ class TestAutoMode:
|
|||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_auto_mode_resolves_pending_actions(self, mock_run, mock_hooks, mock_followup, mock_resolve, conn):
|
||||
"""Auto mode должен авто-резолвить pending_actions из followup generation."""
|
||||
"""Auto_complete mode должен авто-резолвить pending_actions (последний шаг — tester)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "done"})
|
||||
mock_hooks.return_value = []
|
||||
|
||||
|
|
@ -380,8 +384,8 @@ class TestAutoMode:
|
|||
mock_followup.return_value = {"created": [], "pending_actions": pending}
|
||||
mock_resolve.return_value = [{"resolved": "rerun", "result": {}}]
|
||||
|
||||
models.update_project(conn, "vdol", execution_mode="auto")
|
||||
steps = [{"role": "debugger", "brief": "find"}]
|
||||
models.update_project(conn, "vdol", execution_mode="auto_complete")
|
||||
steps = [{"role": "debugger", "brief": "find"}, {"role": "tester", "brief": "test"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
|
|
@ -393,10 +397,12 @@ class TestAutoMode:
|
|||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestRetryOnPermissionError:
|
||||
@patch("agents.runner._run_autocommit")
|
||||
@patch("agents.runner._run_learning_extraction")
|
||||
@patch("core.followup.generate_followups")
|
||||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_retry_on_permission_error_auto_mode(self, mock_run, mock_hooks, mock_followup, conn):
|
||||
def test_retry_on_permission_error_auto_mode(self, mock_run, mock_hooks, mock_followup, mock_learn, mock_autocommit, conn):
|
||||
"""Auto mode: retry при permission error должен срабатывать."""
|
||||
permission_fail = _mock_claude_failure("permission denied: cannot write file")
|
||||
retry_success = _mock_claude_success({"result": "fixed"})
|
||||
|
|
@ -404,8 +410,9 @@ class TestRetryOnPermissionError:
|
|||
mock_run.side_effect = [permission_fail, retry_success]
|
||||
mock_hooks.return_value = []
|
||||
mock_followup.return_value = {"created": [], "pending_actions": []}
|
||||
mock_learn.return_value = {"added": 0, "skipped": 0}
|
||||
|
||||
models.update_project(conn, "vdol", execution_mode="auto")
|
||||
models.update_project(conn, "vdol", execution_mode="auto_complete")
|
||||
steps = [{"role": "debugger", "brief": "find"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
|
|
@ -472,12 +479,13 @@ class TestNonInteractive:
|
|||
call_kwargs = mock_run.call_args[1]
|
||||
assert call_kwargs.get("stdin") == subprocess.DEVNULL
|
||||
|
||||
@patch.dict("os.environ", {"KIN_AGENT_TIMEOUT": ""}, clear=False)
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_noninteractive_uses_300s_timeout(self, mock_run, conn):
|
||||
def test_noninteractive_uses_600s_timeout(self, mock_run, conn):
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=True)
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
assert call_kwargs.get("timeout") == 300
|
||||
assert call_kwargs.get("timeout") == 600
|
||||
|
||||
@patch.dict("os.environ", {"KIN_NONINTERACTIVE": ""})
|
||||
@patch("agents.runner.subprocess.run")
|
||||
|
|
@ -504,7 +512,16 @@ class TestNonInteractive:
|
|||
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False)
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
assert call_kwargs.get("stdin") == subprocess.DEVNULL
|
||||
assert call_kwargs.get("timeout") == 300
|
||||
assert call_kwargs.get("timeout") == 600
|
||||
|
||||
@patch.dict("os.environ", {"KIN_AGENT_TIMEOUT": "900"})
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_custom_timeout_via_env_var(self, mock_run, conn):
|
||||
"""KIN_AGENT_TIMEOUT overrides the default 600s timeout."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
run_agent(conn, "debugger", "VDOL-001", "vdol")
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
assert call_kwargs.get("timeout") == 900
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_allow_write_adds_skip_permissions(self, mock_run, conn):
|
||||
|
|
@ -751,3 +768,786 @@ class TestSilentFailedDiagnostics:
|
|||
|
||||
assert result["success"] is True
|
||||
assert result.get("error") is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Auto-learning: _run_learning_extraction
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestRunLearningExtraction:
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_extracts_and_saves_decisions(self, mock_run, conn):
|
||||
"""Успешный сценарий: learner возвращает JSON с decisions, они сохраняются в БД."""
|
||||
learner_output = json.dumps({
|
||||
"decisions": [
|
||||
{"type": "gotcha", "title": "SQLite WAL mode needed", "description": "Without WAL concurrent reads fail", "tags": ["sqlite", "db"]},
|
||||
{"type": "convention", "title": "Always run tests after change", "description": "Prevents regressions", "tags": ["testing"]},
|
||||
]
|
||||
})
|
||||
mock_run.return_value = _mock_claude_success({"result": learner_output})
|
||||
|
||||
step_results = [
|
||||
{"role": "debugger", "raw_output": "Found issue with sqlite concurrent access"},
|
||||
]
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
assert result["added"] == 2
|
||||
assert result["skipped"] == 0
|
||||
|
||||
decisions = conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()
|
||||
assert len(decisions) == 2
|
||||
titles = {d["title"] for d in decisions}
|
||||
assert "SQLite WAL mode needed" in titles
|
||||
assert "Always run tests after change" in titles
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_skips_duplicate_decisions(self, mock_run, conn):
|
||||
"""Дедупликация: если decision с таким title+type уже есть, пропускается."""
|
||||
from core import models as m
|
||||
m.add_decision(conn, "vdol", "gotcha", "SQLite WAL mode needed", "existing desc")
|
||||
|
||||
learner_output = json.dumps({
|
||||
"decisions": [
|
||||
{"type": "gotcha", "title": "SQLite WAL mode needed", "description": "duplicate", "tags": []},
|
||||
{"type": "convention", "title": "New convention here", "description": "new desc", "tags": []},
|
||||
]
|
||||
})
|
||||
mock_run.return_value = _mock_claude_success({"result": learner_output})
|
||||
|
||||
step_results = [{"role": "tester", "raw_output": "test output"}]
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
assert result["added"] == 1
|
||||
assert result["skipped"] == 1
|
||||
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 2
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_limits_to_5_decisions(self, mock_run, conn):
|
||||
"""Learner не должен сохранять более 5 decisions даже если агент вернул больше."""
|
||||
decisions_list = [
|
||||
{"type": "decision", "title": f"Decision {i}", "description": f"desc {i}", "tags": []}
|
||||
for i in range(8)
|
||||
]
|
||||
learner_output = json.dumps({"decisions": decisions_list})
|
||||
mock_run.return_value = _mock_claude_success({"result": learner_output})
|
||||
|
||||
step_results = [{"role": "architect", "raw_output": "long output"}]
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
assert result["added"] == 5
|
||||
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 5
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_non_json_output_returns_error(self, mock_run, conn):
|
||||
"""Если learner вернул не-JSON, функция возвращает error, не бросает исключение."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "plain text, not json"})
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
assert result["added"] == 0
|
||||
assert "error" in result
|
||||
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_decisions_linked_to_task(self, mock_run, conn):
|
||||
"""Сохранённые decisions должны быть привязаны к task_id."""
|
||||
learner_output = json.dumps({
|
||||
"decisions": [
|
||||
{"type": "gotcha", "title": "Important gotcha", "description": "desc", "tags": []},
|
||||
]
|
||||
})
|
||||
mock_run.return_value = _mock_claude_success({"result": learner_output})
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
d = conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchone()
|
||||
assert d["task_id"] == "VDOL-001"
|
||||
|
||||
@patch("agents.runner._run_learning_extraction")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_pipeline_triggers_learning_after_completion(self, mock_run, mock_learn, conn):
|
||||
"""run_pipeline должен вызывать _run_learning_extraction после успешного завершения."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "done"})
|
||||
mock_learn.return_value = {"added": 1, "skipped": 0}
|
||||
|
||||
steps = [{"role": "debugger", "brief": "find bug"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
mock_learn.assert_called_once()
|
||||
call_args = mock_learn.call_args[0]
|
||||
assert call_args[1] == "VDOL-001" # task_id
|
||||
assert call_args[2] == "vdol" # project_id
|
||||
|
||||
@patch("agents.runner._run_learning_extraction")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_learning_error_does_not_break_pipeline(self, mock_run, mock_learn, conn):
|
||||
"""Если _run_learning_extraction бросает исключение, pipeline не падает."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "done"})
|
||||
mock_learn.side_effect = Exception("learning failed")
|
||||
|
||||
steps = [{"role": "debugger", "brief": "find bug"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
|
||||
def test_pipeline_dry_run_skips_learning(self, conn):
|
||||
"""Dry run не должен вызывать _run_learning_extraction."""
|
||||
steps = [{"role": "debugger", "brief": "find bug"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps, dry_run=True)
|
||||
|
||||
assert result["dry_run"] is True
|
||||
# No decisions saved (dry run — no DB activity)
|
||||
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_empty_learner_output_returns_no_decisions(self, mock_run, conn):
|
||||
"""Пустой stdout от learner (subprocess вернул "") — не бросает исключение, возвращает error."""
|
||||
# Используем пустую строку как stdout (не dict), чтобы raw_output оказался пустым
|
||||
mock_run.return_value = _mock_claude_success("")
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
assert result["added"] == 0
|
||||
assert "error" in result
|
||||
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 0
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_empty_decisions_list_returns_zero_counts(self, mock_run, conn):
|
||||
"""Learner возвращает {"decisions": []} — added=0, skipped=0, без ошибки."""
|
||||
mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})})
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
assert result["added"] == 0
|
||||
assert result["skipped"] == 0
|
||||
assert "error" not in result
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_decision_missing_title_is_skipped(self, mock_run, conn):
|
||||
"""Decision без title молча пропускается, не вызывает исключение."""
|
||||
learner_output = json.dumps({
|
||||
"decisions": [
|
||||
{"type": "gotcha", "description": "no title here", "tags": []},
|
||||
{"type": "convention", "title": "Valid decision", "description": "desc", "tags": []},
|
||||
]
|
||||
})
|
||||
mock_run.return_value = _mock_claude_success({"result": learner_output})
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
assert result["added"] == 1
|
||||
assert len(conn.execute("SELECT * FROM decisions WHERE project_id='vdol'").fetchall()) == 1
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_decisions_field_not_list_returns_error(self, mock_run, conn):
|
||||
"""Если поле decisions не является списком — возвращается error dict."""
|
||||
mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": "not a list"})})
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
assert result["added"] == 0
|
||||
assert "error" in result
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_logs_agent_run_to_db(self, mock_run, conn):
|
||||
"""KIN-060: _run_learning_extraction должна писать запись в agent_logs."""
|
||||
learner_output = json.dumps({
|
||||
"decisions": [
|
||||
{"type": "gotcha", "title": "Log test", "description": "desc", "tags": []},
|
||||
]
|
||||
})
|
||||
mock_run.return_value = _mock_claude_success({"result": learner_output})
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
logs = conn.execute(
|
||||
"SELECT * FROM agent_logs WHERE agent_role='learner' AND project_id='vdol'"
|
||||
).fetchall()
|
||||
assert len(logs) == 1
|
||||
log = logs[0]
|
||||
assert log["task_id"] == "VDOL-001"
|
||||
assert log["action"] == "learn"
|
||||
assert log["model"] == "sonnet"
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_learner_cost_included_in_cost_summary(self, mock_run, conn):
|
||||
"""KIN-060: get_cost_summary() включает затраты learner-агента."""
|
||||
learner_output = json.dumps({"decisions": []})
|
||||
mock_run.return_value = _mock_claude_success({
|
||||
"result": learner_output,
|
||||
"cost_usd": 0.042,
|
||||
"usage": {"total_tokens": 3000},
|
||||
})
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
costs = models.get_cost_summary(conn, days=1)
|
||||
assert len(costs) == 1
|
||||
assert costs[0]["project_id"] == "vdol"
|
||||
assert costs[0]["total_cost_usd"] == pytest.approx(0.042)
|
||||
assert costs[0]["total_tokens"] == 3000
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# KIN-061: Regression — валидация поля type в decision
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_valid_type_gotcha_is_saved_as_is(self, mock_run, conn):
|
||||
"""KIN-061: валидный тип 'gotcha' сохраняется без изменений."""
|
||||
learner_output = json.dumps({
|
||||
"decisions": [
|
||||
{"type": "gotcha", "title": "Use WAL mode", "description": "Concurrent reads need WAL", "tags": []},
|
||||
]
|
||||
})
|
||||
mock_run.return_value = _mock_claude_success({"result": learner_output})
|
||||
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}])
|
||||
|
||||
assert result["added"] == 1
|
||||
d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone()
|
||||
assert d["type"] == "gotcha"
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_invalid_type_falls_back_to_decision(self, mock_run, conn):
|
||||
"""KIN-061: невалидный тип 'unknown_type' заменяется на 'decision'."""
|
||||
learner_output = json.dumps({
|
||||
"decisions": [
|
||||
{"type": "unknown_type", "title": "Some title", "description": "Some desc", "tags": []},
|
||||
]
|
||||
})
|
||||
mock_run.return_value = _mock_claude_success({"result": learner_output})
|
||||
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}])
|
||||
|
||||
assert result["added"] == 1
|
||||
d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone()
|
||||
assert d["type"] == "decision"
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_missing_type_falls_back_to_decision(self, mock_run, conn):
|
||||
"""KIN-061: отсутствующий ключ 'type' в decision заменяется на 'decision'."""
|
||||
learner_output = json.dumps({
|
||||
"decisions": [
|
||||
{"title": "No type key here", "description": "desc without type", "tags": []},
|
||||
]
|
||||
})
|
||||
mock_run.return_value = _mock_claude_success({"result": learner_output})
|
||||
|
||||
result = _run_learning_extraction(conn, "VDOL-001", "vdol", [{"role": "debugger", "raw_output": "x"}])
|
||||
|
||||
assert result["added"] == 1
|
||||
d = conn.execute("SELECT type FROM decisions WHERE project_id='vdol'").fetchone()
|
||||
assert d["type"] == "decision"
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------
|
||||
# KIN-062: KIN_LEARNER_TIMEOUT — отдельный таймаут для learner-агента
|
||||
# -----------------------------------------------------------------------
|
||||
|
||||
@patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": ""}, clear=False)
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_learner_uses_120s_default_timeout(self, mock_run, conn):
|
||||
"""KIN-062: по умолчанию learner использует таймаут 120s (KIN_LEARNER_TIMEOUT не задан)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})})
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
assert call_kwargs.get("timeout") == 120
|
||||
|
||||
@patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": "300"}, clear=False)
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_learner_uses_custom_timeout_from_env(self, mock_run, conn):
|
||||
"""KIN-062: KIN_LEARNER_TIMEOUT переопределяет дефолтный таймаут learner-агента."""
|
||||
mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})})
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
assert call_kwargs.get("timeout") == 300
|
||||
|
||||
@patch.dict("os.environ", {"KIN_LEARNER_TIMEOUT": "60", "KIN_AGENT_TIMEOUT": "900"}, clear=False)
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_learner_timeout_independent_of_agent_timeout(self, mock_run, conn):
|
||||
"""KIN-062: KIN_LEARNER_TIMEOUT не зависит от KIN_AGENT_TIMEOUT."""
|
||||
mock_run.return_value = _mock_claude_success({"result": json.dumps({"decisions": []})})
|
||||
|
||||
step_results = [{"role": "debugger", "raw_output": "output"}]
|
||||
_run_learning_extraction(conn, "VDOL-001", "vdol", step_results)
|
||||
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
assert call_kwargs.get("timeout") == 60
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# KIN-056: Regression — web path timeout parity with CLI
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestRegressionKIN056:
|
||||
"""Регрессионные тесты KIN-056: агенты таймаутили через 300s из web, но не из CLI.
|
||||
|
||||
Причина: noninteractive режим использовал timeout=300s.
|
||||
Web API всегда устанавливает KIN_NONINTERACTIVE=1, поэтому таймаут был 300s.
|
||||
Фикс: единый timeout=600s независимо от noninteractive (переопределяется KIN_AGENT_TIMEOUT).
|
||||
|
||||
Каждый тест ПАДАЛ бы со старым кодом (timeout=300 для noninteractive)
|
||||
и ПРОХОДИТ после фикса.
|
||||
"""
|
||||
|
||||
@patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""})
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_web_noninteractive_env_does_not_use_300s(self, mock_run, conn):
|
||||
"""Web путь устанавливает KIN_NONINTERACTIVE=1. До фикса это давало timeout=300s."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
run_agent(conn, "debugger", "VDOL-001", "vdol")
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
assert call_kwargs.get("timeout") != 300, (
|
||||
"Регрессия KIN-056: timeout не должен быть 300s в noninteractive режиме"
|
||||
)
|
||||
|
||||
@patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""})
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_web_noninteractive_timeout_is_600(self, mock_run, conn):
|
||||
"""Web путь: KIN_NONINTERACTIVE=1 → timeout = 600s (не 300s)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
run_agent(conn, "debugger", "VDOL-001", "vdol")
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
assert call_kwargs.get("timeout") == 600
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_web_and_cli_paths_use_same_timeout(self, mock_run, conn):
|
||||
"""Таймаут через web-путь (KIN_NONINTERACTIVE=1) == таймаут CLI (noninteractive=True)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
|
||||
# Web path: env var KIN_NONINTERACTIVE=1, noninteractive param not set
|
||||
with patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": ""}):
|
||||
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=False)
|
||||
web_timeout = mock_run.call_args[1].get("timeout")
|
||||
|
||||
mock_run.reset_mock()
|
||||
|
||||
# CLI path: noninteractive=True, no env var
|
||||
with patch.dict("os.environ", {"KIN_NONINTERACTIVE": "", "KIN_AGENT_TIMEOUT": ""}):
|
||||
run_agent(conn, "debugger", "VDOL-001", "vdol", noninteractive=True)
|
||||
cli_timeout = mock_run.call_args[1].get("timeout")
|
||||
|
||||
assert web_timeout == cli_timeout, (
|
||||
f"Таймаут web ({web_timeout}s) != CLI ({cli_timeout}s) — регрессия KIN-056"
|
||||
)
|
||||
|
||||
@patch.dict("os.environ", {"KIN_NONINTERACTIVE": "1", "KIN_AGENT_TIMEOUT": "900"})
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_web_noninteractive_respects_kin_agent_timeout_override(self, mock_run, conn):
|
||||
"""Web путь: KIN_AGENT_TIMEOUT переопределяет дефолтный таймаут даже при KIN_NONINTERACTIVE=1."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
run_agent(conn, "debugger", "VDOL-001", "vdol")
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
assert call_kwargs.get("timeout") == 900
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# KIN-057: claude CLI в PATH при запуске через launchctl
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestClaudePath:
|
||||
"""Регрессионные тесты KIN-057: launchctl-демоны могут не видеть claude в PATH."""
|
||||
|
||||
def test_build_claude_env_contains_extra_paths(self):
|
||||
"""_build_claude_env должен добавить /opt/homebrew/bin и /usr/local/bin в PATH."""
|
||||
env = _build_claude_env()
|
||||
path_dirs = env["PATH"].split(":")
|
||||
for extra_dir in _EXTRA_PATH_DIRS:
|
||||
assert extra_dir in path_dirs, (
|
||||
f"Регрессия KIN-057: {extra_dir} не найден в PATH, сгенерированном _build_claude_env"
|
||||
)
|
||||
|
||||
def test_build_claude_env_no_duplicate_paths(self):
|
||||
"""_build_claude_env не должен дублировать уже существующие пути."""
|
||||
env = _build_claude_env()
|
||||
path_dirs = env["PATH"].split(":")
|
||||
seen = set()
|
||||
for d in path_dirs:
|
||||
assert d not in seen, f"Дублирующийся PATH entry: {d}"
|
||||
seen.add(d)
|
||||
|
||||
def test_build_claude_env_preserves_existing_path(self):
|
||||
"""_build_claude_env должен сохранять уже существующие пути."""
|
||||
with patch.dict("os.environ", {"PATH": "/custom/bin:/usr/bin:/bin"}):
|
||||
env = _build_claude_env()
|
||||
path_dirs = env["PATH"].split(":")
|
||||
assert "/custom/bin" in path_dirs
|
||||
assert "/usr/bin" in path_dirs
|
||||
|
||||
def test_resolve_claude_cmd_returns_string(self):
|
||||
"""_resolve_claude_cmd должен всегда возвращать строку."""
|
||||
cmd = _resolve_claude_cmd()
|
||||
assert isinstance(cmd, str)
|
||||
assert len(cmd) > 0
|
||||
|
||||
def test_resolve_claude_cmd_fallback_when_not_found(self):
|
||||
"""_resolve_claude_cmd должен вернуть 'claude' если CLI не найден в PATH."""
|
||||
with patch("agents.runner.shutil.which", return_value=None):
|
||||
cmd = _resolve_claude_cmd()
|
||||
assert cmd == "claude"
|
||||
|
||||
def test_resolve_claude_cmd_returns_full_path_when_found(self):
|
||||
"""_resolve_claude_cmd должен вернуть полный путь если claude найден."""
|
||||
with patch("agents.runner.shutil.which", return_value="/opt/homebrew/bin/claude"):
|
||||
cmd = _resolve_claude_cmd()
|
||||
assert cmd == "/opt/homebrew/bin/claude"
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_run_claude_passes_env_to_subprocess(self, mock_run, conn):
|
||||
"""_run_claude должен передавать env= в subprocess.run (а не наследовать голый PATH)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
run_agent(conn, "debugger", "VDOL-001", "vdol")
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
assert "env" in call_kwargs, (
|
||||
"Регрессия KIN-057: subprocess.run должен получать явный env с расширенным PATH"
|
||||
)
|
||||
assert call_kwargs["env"] is not None
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_run_claude_env_has_homebrew_in_path(self, mock_run, conn):
|
||||
"""env переданный в subprocess.run должен содержать /opt/homebrew/bin в PATH."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
run_agent(conn, "debugger", "VDOL-001", "vdol")
|
||||
call_kwargs = mock_run.call_args[1]
|
||||
env = call_kwargs.get("env", {})
|
||||
assert "/opt/homebrew/bin" in env.get("PATH", ""), (
|
||||
"Регрессия KIN-057: /opt/homebrew/bin не найден в env['PATH'] subprocess.run"
|
||||
)
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_file_not_found_returns_127(self, mock_run, conn):
|
||||
"""Если claude не найден (FileNotFoundError), должен вернуться returncode 127."""
|
||||
mock_run.side_effect = FileNotFoundError("claude not found")
|
||||
result = run_agent(conn, "debugger", "VDOL-001", "vdol")
|
||||
assert result["success"] is False
|
||||
assert "not found" in (result.get("error") or "").lower()
|
||||
|
||||
@patch.dict("os.environ", {"PATH": ""})
|
||||
def test_launchctl_empty_path_build_env_adds_extra_dirs(self):
|
||||
"""Регрессия KIN-057: когда launchctl запускает с пустым PATH,
|
||||
_build_claude_env должен добавить _EXTRA_PATH_DIRS чтобы claude был доступен.
|
||||
Без фикса: os.environ["PATH"]="" → shutil.which("claude") → None → FileNotFoundError.
|
||||
После фикса: _build_claude_env строит PATH с /opt/homebrew/bin и др.
|
||||
"""
|
||||
env = _build_claude_env()
|
||||
path_dirs = env["PATH"].split(":")
|
||||
# Явная проверка каждой критичной директории
|
||||
for extra_dir in _EXTRA_PATH_DIRS:
|
||||
assert extra_dir in path_dirs, (
|
||||
f"KIN-057: при пустом os PATH директория {extra_dir} должна быть добавлена"
|
||||
)
|
||||
|
||||
@patch.dict("os.environ", {"PATH": ""})
|
||||
def test_launchctl_empty_path_shutil_which_fails_without_fix(self):
|
||||
"""Воспроизводит сломанное поведение: при PATH='' shutil.which возвращает None.
|
||||
Это точно то, что происходило до фикса — launchctl не видел claude.
|
||||
Тест документирует, ПОЧЕМУ нужен _build_claude_env вместо прямого os.environ.
|
||||
"""
|
||||
import shutil
|
||||
# Без фикса: поиск с пустым PATH не найдёт claude
|
||||
result_without_fix = shutil.which("claude", path="")
|
||||
assert result_without_fix is None, (
|
||||
"Если этот assert упал — shutil.which нашёл claude в пустом PATH, "
|
||||
"что невозможно. Ожидаем None — именно поэтому нужен _build_claude_env."
|
||||
)
|
||||
# С фиксом: _resolve_claude_cmd строит расширенный PATH и находит claude
|
||||
# (или возвращает fallback "claude", но не бросает FileNotFoundError)
|
||||
cmd = _resolve_claude_cmd()
|
||||
assert isinstance(cmd, str) and len(cmd) > 0, (
|
||||
"KIN-057: _resolve_claude_cmd должен возвращать строку даже при пустом os PATH"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# KIN-063: TestCompletionMode — auto_complete + last-step role check
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestCompletionMode:
|
||||
"""auto_complete mode срабатывает только если последний шаг — tester или reviewer."""
|
||||
|
||||
@patch("core.followup.generate_followups")
|
||||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_auto_complete_with_tester_last_sets_done(self, mock_run, mock_hooks, mock_followup, conn):
|
||||
"""auto_complete + последний шаг tester → status=done (Decision #29)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
mock_hooks.return_value = []
|
||||
mock_followup.return_value = {"created": [], "pending_actions": []}
|
||||
|
||||
models.update_project(conn, "vdol", execution_mode="auto_complete")
|
||||
steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
task = models.get_task(conn, "VDOL-001")
|
||||
assert task["status"] == "done"
|
||||
|
||||
@patch("core.followup.generate_followups")
|
||||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_auto_complete_with_reviewer_last_sets_done(self, mock_run, mock_hooks, mock_followup, conn):
|
||||
"""auto_complete + последний шаг reviewer → status=done."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
mock_hooks.return_value = []
|
||||
mock_followup.return_value = {"created": [], "pending_actions": []}
|
||||
|
||||
models.update_project(conn, "vdol", execution_mode="auto_complete")
|
||||
steps = [{"role": "developer", "brief": "fix"}, {"role": "reviewer", "brief": "review"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
task = models.get_task(conn, "VDOL-001")
|
||||
assert task["status"] == "done"
|
||||
|
||||
@patch("core.followup.generate_followups")
|
||||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_auto_complete_without_tester_last_sets_review(self, mock_run, mock_hooks, mock_followup, conn):
|
||||
"""auto_complete + последний шаг НЕ tester/reviewer → status=review (Decision #29)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
mock_hooks.return_value = []
|
||||
mock_followup.return_value = {"created": [], "pending_actions": []}
|
||||
|
||||
models.update_project(conn, "vdol", execution_mode="auto_complete")
|
||||
steps = [{"role": "developer", "brief": "fix"}, {"role": "debugger", "brief": "debug"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
task = models.get_task(conn, "VDOL-001")
|
||||
assert task["status"] == "review", (
|
||||
"Регрессия KIN-063: auto_complete без tester/reviewer последним НЕ должен авто-завершать"
|
||||
)
|
||||
|
||||
@patch("core.followup.generate_followups")
|
||||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_legacy_auto_mode_value_not_recognized(self, mock_run, mock_hooks, mock_followup, conn):
|
||||
"""Регрессия: старое значение 'auto' больше не является валидным режимом.
|
||||
|
||||
После KIN-063 'auto' → 'auto_complete'. Если в DB осталось 'auto' (без миграции),
|
||||
runner НЕ должен авто-завершать — это 'review'-ветка (безопасный fallback).
|
||||
(Decision #29)
|
||||
"""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
mock_hooks.return_value = []
|
||||
mock_followup.return_value = {"created": [], "pending_actions": []}
|
||||
|
||||
# Прямой SQL-апдейт, обходя validate_completion_mode, чтобы симулировать
|
||||
# старую запись в БД без миграции
|
||||
conn.execute("UPDATE projects SET execution_mode='auto' WHERE id='vdol'")
|
||||
conn.commit()
|
||||
steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
task = models.get_task(conn, "VDOL-001")
|
||||
assert task["status"] == "review", (
|
||||
"Регрессия: 'auto' (старый формат) не должен срабатывать как auto_complete"
|
||||
)
|
||||
|
||||
@patch("core.followup.generate_followups")
|
||||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_review_mode_with_tester_last_keeps_task_in_review(self, mock_run, mock_hooks, mock_followup, conn):
|
||||
"""review mode + последний шаг tester → task.status == 'review', НЕ done (ждёт ручного approve)."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "all tests pass"})
|
||||
mock_hooks.return_value = []
|
||||
mock_followup.return_value = {"created": [], "pending_actions": []}
|
||||
|
||||
# Проект и задача остаются в дефолтном 'review' mode
|
||||
steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
task = models.get_task(conn, "VDOL-001")
|
||||
assert task["status"] == "review"
|
||||
assert task["status"] != "done", (
|
||||
"KIN-063: review mode не должен авто-завершать задачу даже если tester последний"
|
||||
)
|
||||
|
||||
@patch("core.followup.generate_followups")
|
||||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_project_review_overrides_no_task_completion_mode(self, mock_run, mock_hooks, mock_followup, conn):
|
||||
"""Project execution_mode='review' + задача без override → pipeline завершается в 'review'.
|
||||
|
||||
Сценарий: PM выбрал auto_complete, но проект настроен на 'review' (ручной override человека).
|
||||
Задача не имеет task-level execution_mode, поэтому get_effective_mode возвращает project-level 'review'.
|
||||
"""
|
||||
mock_run.return_value = _mock_claude_success({"result": "ok"})
|
||||
mock_hooks.return_value = []
|
||||
mock_followup.return_value = {"created": [], "pending_actions": []}
|
||||
|
||||
# Проект явно в 'review', задача без execution_mode
|
||||
models.update_project(conn, "vdol", execution_mode="review")
|
||||
# task VDOL-001 создана без execution_mode (None) — fixture
|
||||
steps = [{"role": "developer", "brief": "fix"}, {"role": "tester", "brief": "test"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
assert result["mode"] == "review"
|
||||
task = models.get_task(conn, "VDOL-001")
|
||||
assert task["status"] == "review", (
|
||||
"KIN-063: project-level 'review' должен применяться когда задача не имеет override"
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# KIN-048: _run_autocommit — флаг, git path, env=
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestAutocommit:
|
||||
"""KIN-048: _run_autocommit — autocommit_enabled флаг, shutil.which, env= regression."""
|
||||
|
||||
def test_disabled_project_skips_subprocess(self, conn):
|
||||
"""autocommit_enabled=0 (дефолт) → subprocess не вызывается."""
|
||||
with patch("agents.runner.subprocess.run") as mock_run:
|
||||
_run_autocommit(conn, "VDOL-001", "vdol")
|
||||
mock_run.assert_not_called()
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
@patch("agents.runner.shutil.which")
|
||||
def test_enabled_calls_git_add_and_commit(self, mock_which, mock_run, conn, tmp_path):
|
||||
"""autocommit_enabled=1 → вызываются git add -A и git commit с task_id и title."""
|
||||
mock_which.return_value = "/usr/bin/git"
|
||||
mock_run.return_value = MagicMock(returncode=0)
|
||||
models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path))
|
||||
|
||||
_run_autocommit(conn, "VDOL-001", "vdol")
|
||||
|
||||
assert mock_run.call_count == 2
|
||||
add_cmd = mock_run.call_args_list[0][0][0]
|
||||
assert add_cmd == ["/usr/bin/git", "add", "-A"]
|
||||
commit_cmd = mock_run.call_args_list[1][0][0]
|
||||
assert commit_cmd[0] == "/usr/bin/git"
|
||||
assert commit_cmd[1] == "commit"
|
||||
assert "VDOL-001" in commit_cmd[-1]
|
||||
assert "Fix bug" in commit_cmd[-1]
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_nothing_to_commit_no_exception(self, mock_run, conn, tmp_path):
|
||||
"""returncode=1 (nothing to commit) → исключение не бросается."""
|
||||
mock_run.return_value = MagicMock(returncode=1)
|
||||
models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path))
|
||||
|
||||
_run_autocommit(conn, "VDOL-001", "vdol") # must not raise
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_passes_env_to_subprocess(self, mock_run, conn, tmp_path):
|
||||
"""Regression #33: env= должен передаваться в subprocess.run."""
|
||||
mock_run.return_value = MagicMock(returncode=0)
|
||||
models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path))
|
||||
|
||||
_run_autocommit(conn, "VDOL-001", "vdol")
|
||||
|
||||
for call in mock_run.call_args_list:
|
||||
kwargs = call[1]
|
||||
assert "env" in kwargs, "Regression #33: subprocess.run должен получать env="
|
||||
assert "/opt/homebrew/bin" in kwargs["env"].get("PATH", "")
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
@patch("agents.runner.shutil.which")
|
||||
def test_resolves_git_via_shutil_which(self, mock_which, mock_run, conn, tmp_path):
|
||||
"""Regression #32: git резолвится через shutil.which, а не hardcoded 'git'."""
|
||||
mock_which.return_value = "/opt/homebrew/bin/git"
|
||||
mock_run.return_value = MagicMock(returncode=0)
|
||||
models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path))
|
||||
|
||||
_run_autocommit(conn, "VDOL-001", "vdol")
|
||||
|
||||
git_which_calls = [c for c in mock_which.call_args_list if c[0][0] == "git"]
|
||||
assert len(git_which_calls) > 0, "Regression #32: shutil.which должен вызываться для git"
|
||||
first_cmd = mock_run.call_args_list[0][0][0]
|
||||
assert first_cmd[0] == "/opt/homebrew/bin/git"
|
||||
|
||||
@patch("agents.runner.subprocess.run")
|
||||
@patch("agents.runner.shutil.which")
|
||||
def test_git_not_found_no_crash_logs_warning(self, mock_which, mock_run, conn, tmp_path):
|
||||
"""shutil.which(git) → None → fallback 'git' → FileNotFoundError → no crash, WARNING logged."""
|
||||
mock_which.return_value = None # git не найден в PATH
|
||||
mock_run.side_effect = FileNotFoundError("git: command not found")
|
||||
models.update_project(conn, "vdol", autocommit_enabled=1, path=str(tmp_path))
|
||||
|
||||
with patch("agents.runner._logger") as mock_logger:
|
||||
_run_autocommit(conn, "VDOL-001", "vdol") # не должен бросать исключение
|
||||
|
||||
mock_logger.warning.assert_called_once()
|
||||
|
||||
@patch("agents.runner._run_autocommit")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_autocommit_not_called_on_failed_pipeline(self, mock_run, mock_autocommit, conn):
|
||||
"""Pipeline failure → _run_autocommit must NOT be called (gotcha #41)."""
|
||||
mock_run.return_value = _mock_claude_failure("compilation error")
|
||||
|
||||
steps = [{"role": "debugger", "brief": "find"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is False
|
||||
mock_autocommit.assert_not_called()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# KIN-055: execution_mode='review' при переводе задачи в статус review
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class TestReviewModeExecutionMode:
|
||||
"""Регрессия KIN-055: execution_mode должен быть 'review', а не NULL после pipeline в review mode."""
|
||||
|
||||
def test_task_execution_mode_is_null_before_pipeline(self, conn):
|
||||
"""Граничный случай: execution_mode IS NULL до запуска pipeline (задача только создана)."""
|
||||
task = models.get_task(conn, "VDOL-001")
|
||||
assert task["execution_mode"] is None, (
|
||||
"Задача должна иметь NULL execution_mode до выполнения pipeline"
|
||||
)
|
||||
|
||||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_review_mode_sets_execution_mode_review(self, mock_run, mock_hooks, conn):
|
||||
"""После pipeline в review mode task.execution_mode должно быть 'review', а не NULL."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "done"})
|
||||
mock_hooks.return_value = []
|
||||
|
||||
steps = [{"role": "debugger", "brief": "find bug"}]
|
||||
result = run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
assert result["success"] is True
|
||||
task = models.get_task(conn, "VDOL-001")
|
||||
assert task["status"] == "review"
|
||||
# Регрессионная проверка KIN-055: execution_mode не должен быть NULL
|
||||
assert task["execution_mode"] is not None, (
|
||||
"Регрессия KIN-055: execution_mode не должен быть NULL после перевода задачи в статус review"
|
||||
)
|
||||
assert task["execution_mode"] == "review"
|
||||
|
||||
@patch("agents.runner.run_hooks")
|
||||
@patch("agents.runner.subprocess.run")
|
||||
def test_review_mode_execution_mode_persisted_in_db(self, mock_run, mock_hooks, conn):
|
||||
"""execution_mode='review' должно сохраняться в SQLite напрямую, минуя ORM-слой."""
|
||||
mock_run.return_value = _mock_claude_success({"result": "done"})
|
||||
mock_hooks.return_value = []
|
||||
|
||||
steps = [{"role": "debugger", "brief": "find"}]
|
||||
run_pipeline(conn, "VDOL-001", steps)
|
||||
|
||||
row = conn.execute(
|
||||
"SELECT execution_mode FROM tasks WHERE id='VDOL-001'"
|
||||
).fetchone()
|
||||
assert row is not None
|
||||
assert row["execution_mode"] == "review", (
|
||||
"Регрессия KIN-055: execution_mode должен быть 'review' в SQLite после pipeline"
|
||||
)
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue