kin: auto-commit after pipeline

This commit is contained in:
Gros Frumos 2026-03-17 15:59:43 +02:00
parent 396f5193d3
commit 18160de45e
9 changed files with 449 additions and 0 deletions

View file

@ -2174,3 +2174,28 @@ def test_get_projects_includes_test_command(client):
p1 = next((p for p in projects if p["id"] == "p1"), None)
assert p1 is not None
assert p1["test_command"] == "cargo test"
def test_patch_project_test_command_db_value_verified(client):
"""KIN-ARCH-008 decision #318: PATCH test_command сохраняет значение и в response, и в БД."""
r = client.patch("/api/projects/p1", json={"test_command": "pytest --tb=short"})
assert r.status_code == 200
assert r.json()["test_command"] == "pytest --tb=short"
from core.db import init_db
conn = init_db(api_module.DB_PATH)
row = conn.execute("SELECT test_command FROM projects WHERE id = 'p1'").fetchone()
conn.close()
assert row[0] == "pytest --tb=short"
def test_patch_project_test_command_null_returns_400(client):
"""KIN-ARCH-008: PATCH с test_command=null (и без других полей) → 400 'Nothing to update'.
null трактуется как «поле не передано»: has_any=False 400.
Это документирует текущее поведение: нет способа сбросить test_command через PATCH null.
"""
client.patch("/api/projects/p1", json={"test_command": "npm test"})
r = client.patch("/api/projects/p1", json={"test_command": None})
assert r.status_code == 400
assert "Nothing to update" in r.json()["detail"]

View file

@ -213,6 +213,22 @@ class TestRunProjectTests:
assert result["success"] is False
assert result["returncode"] == 127
def test_run_project_tests_empty_string_returns_failure(self):
"""KIN-ARCH-008 AC#7: пустая строка test_command возвращает returncode -1, success=False."""
from agents.runner import _run_project_tests
result = _run_project_tests("/fake/path", test_command="")
assert result["success"] is False
assert result["returncode"] == -1
assert "Empty test_command" in result["output"]
def test_returncode_127_output_contains_not_found(self):
"""KIN-ARCH-008 AC#7: при returncode 127 вывод содержит 'not found' для диагностики."""
from agents.runner import _run_project_tests
with patch("agents.runner.subprocess.run", side_effect=FileNotFoundError), \
patch("agents.runner.shutil.which", return_value=None):
result = _run_project_tests("/fake/path", test_command="badcmd")
assert "not found" in result["output"].lower()
def _mock_success(output="done"):
m = MagicMock()
@ -359,6 +375,30 @@ class TestAutoTestInPipeline:
called_test_command = mock_tests.call_args[0][1]
assert called_test_command == "npm test"
@patch("agents.runner._run_autocommit")
@patch("agents.runner._run_project_tests")
@patch("agents.runner.subprocess.run")
def test_auto_test_returncode_127_blocks_task(
self, mock_run, mock_tests, mock_autocommit, conn
):
"""KIN-ARCH-008 AC#7: returncode 127 (команда не найдена) исчерпывает попытки и блокирует задачу."""
from agents.runner import run_pipeline
from core import models
import os
mock_run.return_value = _mock_success()
# Команда не найдена — всегда 127, always fails
mock_tests.return_value = {"success": False, "output": "badcmd not found in PATH", "returncode": 127}
models.update_project(conn, "vdol", auto_test_enabled=True, test_command="badcmd")
with patch.dict(os.environ, {"KIN_AUTO_TEST_MAX_ATTEMPTS": "1"}):
steps = [{"role": "backend_dev", "brief": "implement"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is False
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "blocked"
# ---------------------------------------------------------------------------
# (3) Spec-driven workflow route

233
tests/test_watchdog.py Normal file
View file

@ -0,0 +1,233 @@
"""Tests for pipeline watchdog (KIN-099)."""
import json
import os
import pytest
from unittest.mock import patch, MagicMock
from core.db import init_db
from core import models
from core.watchdog import _check_dead_pipelines
@pytest.fixture
def conn():
c = init_db(db_path=":memory:")
# Seed project + task used by run_pipeline tests
models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek",
tech_stack=["vue3"])
models.create_task(c, "VDOL-001", "vdol", "Fix bug",
brief={"route_type": "debug"})
yield c
c.close()
@pytest.fixture
def project_and_task(conn):
p = models.create_project(conn, "proj1", "Test Project", "/tmp/test")
t = models.create_task(conn, "T-001", "proj1", "Test task", priority=3)
return p, t
def _make_running_pipeline(conn, task_id, project_id, pid=None):
pipeline = models.create_pipeline(conn, task_id, project_id, "custom", [])
models.update_pipeline(conn, pipeline["id"], status="running", pid=pid)
return pipeline
# ---------------------------------------------------------------------------
# get_running_pipelines_with_pid
# ---------------------------------------------------------------------------
def test_get_running_pipelines_returns_only_with_pid(conn, project_and_task):
_, task = project_and_task
tid = task["id"]
_make_running_pipeline(conn, tid, "proj1", pid=12345)
_make_running_pipeline(conn, tid, "proj1", pid=None) # no pid — should be excluded
rows = models.get_running_pipelines_with_pid(conn)
assert len(rows) == 1
assert rows[0]["pid"] == 12345
def test_get_running_pipelines_excludes_completed(conn, project_and_task):
_, task = project_and_task
tid = task["id"]
pipeline = models.create_pipeline(conn, tid, "proj1", "custom", [])
# completed pipeline with pid — should NOT appear
models.update_pipeline(conn, pipeline["id"], status="completed", pid=99999)
rows = models.get_running_pipelines_with_pid(conn)
assert rows == []
# ---------------------------------------------------------------------------
# _check_dead_pipelines (watchdog logic via tmp db file)
# ---------------------------------------------------------------------------
def _db_with_running_pipeline(tmp_path, pid):
"""Create a temp DB file with one running pipeline and return (db_path, task_id)."""
db_path = tmp_path / "kin.db"
conn = init_db(db_path=str(db_path))
models.create_project(conn, "p1", "Proj", "/tmp/p")
task = models.create_task(conn, "T-W01", "p1", "watchdog task", priority=1)
tid = task["id"]
pipeline = models.create_pipeline(conn, tid, "p1", "custom", [])
models.update_pipeline(conn, pipeline["id"], status="running", pid=pid)
conn.close()
return db_path, tid, pipeline["id"]
def test_watchdog_alive_pid_no_change(tmp_path):
"""Живой PID не должен менять статус задачи."""
live_pid = os.getpid() # Our own PID — definitely alive
db_path, task_id, pipeline_id = _db_with_running_pipeline(tmp_path, live_pid)
_check_dead_pipelines(db_path)
conn = init_db(db_path=str(db_path))
task = models.get_task(conn, task_id)
pipeline_row = conn.execute("SELECT status FROM pipelines WHERE id=?", (pipeline_id,)).fetchone()
conn.close()
assert task["status"] != "blocked"
assert pipeline_row["status"] == "running"
def test_watchdog_dead_pid_blocks_task(tmp_path):
"""Мёртвый PID → pipeline=failed, task=blocked с blocked_reason."""
dead_pid = 2**31 - 1 # Guaranteed non-existent PID
db_path, task_id, pipeline_id = _db_with_running_pipeline(tmp_path, dead_pid)
# Ensure the PID is actually dead
with pytest.raises((ProcessLookupError, OSError)):
os.kill(dead_pid, 0)
_check_dead_pipelines(db_path)
conn = init_db(db_path=str(db_path))
task = models.get_task(conn, task_id)
pipeline_row = conn.execute("SELECT status FROM pipelines WHERE id=?", (pipeline_id,)).fetchone()
conn.close()
assert task["status"] == "blocked"
assert str(dead_pid) in (task.get("blocked_reason") or "")
assert pipeline_row["status"] == "failed"
def test_watchdog_pipeline_without_pid_skipped(tmp_path):
"""Pipeline без pid не трогается watchdog'ом."""
db_path = tmp_path / "kin.db"
conn = init_db(db_path=str(db_path))
models.create_project(conn, "p2", "Proj2", "/tmp/p2")
task = models.create_task(conn, "T-W02", "p2", "no-pid task", priority=1)
tid = task["id"]
pipeline = models.create_pipeline(conn, tid, "p2", "custom", [])
models.update_pipeline(conn, pipeline["id"], status="running") # pid=None
conn.close()
_check_dead_pipelines(db_path)
conn = init_db(db_path=str(db_path))
task = models.get_task(conn, tid)
conn.close()
assert task["status"] != "blocked"
# ---------------------------------------------------------------------------
# DB migration (KIN-099): pid column exists and is nullable
# ---------------------------------------------------------------------------
def test_db_migration_pid_column_exists(conn):
"""После init_db поле pid должно присутствовать в таблице pipelines."""
cols = {row[1] for row in conn.execute("PRAGMA table_info(pipelines)").fetchall()}
assert "pid" in cols
def test_db_migration_pid_column_nullable(conn, project_and_task):
"""pid nullable: INSERT pipeline без pid должен работать."""
_, task = project_and_task
# create_pipeline не передаёт pid — должно вставиться без ошибок
pipeline = models.create_pipeline(conn, task["id"], "proj1", "custom", [])
row = conn.execute("SELECT pid FROM pipelines WHERE id=?", (pipeline["id"],)).fetchone()
assert row["pid"] is None
# ---------------------------------------------------------------------------
# PID saved after run_pipeline()
# ---------------------------------------------------------------------------
def _make_mock_claude_success():
mock = MagicMock()
mock.stdout = json.dumps({
"result": "done",
"usage": {"total_tokens": 100},
"cost_usd": 0.001,
})
mock.stderr = ""
mock.returncode = 0
return mock
def test_run_pipeline_saves_pid(conn):
"""run_pipeline() сохраняет os.getpid() в поле pid таблицы pipelines."""
from agents.runner import run_pipeline
steps = [{"role": "tester", "model": "haiku", "brief": "test brief"}]
with patch("agents.runner.subprocess.run") as mock_run, \
patch("agents.runner.check_claude_auth"):
mock_run.return_value = _make_mock_claude_success()
run_pipeline(conn, "VDOL-001", steps)
# Проверяем, что хоть один pipeline записан с pid = os.getpid()
row = conn.execute(
"SELECT pid FROM pipelines WHERE task_id='VDOL-001' ORDER BY id DESC LIMIT 1"
).fetchone()
assert row is not None
assert row["pid"] == os.getpid()
# ---------------------------------------------------------------------------
# Parent process check per pipeline step
# ---------------------------------------------------------------------------
def test_run_pipeline_aborts_when_parent_dies(conn):
"""Если parent process мёртв — pipeline завершается с failed, task blocked."""
import errno as _errno
from agents.runner import run_pipeline
steps = [{"role": "tester", "model": "haiku", "brief": "test brief"}]
dead_ppid = 2**31 - 1 # guaranteed non-existent
def _fake_kill(pid, sig):
if pid == dead_ppid and sig == 0:
# Must carry errno=ESRCH so _check_parent_alive recognises it
raise OSError(_errno.ESRCH, os.strerror(_errno.ESRCH))
# Other PIDs (e.g. our own) — allow silently
return None
with patch("agents.runner.os.getppid", return_value=dead_ppid), \
patch("agents.runner.os.kill", side_effect=_fake_kill), \
patch("agents.runner.check_claude_auth"), \
patch("agents.runner.subprocess.run") as mock_run:
mock_run.return_value = _make_mock_claude_success()
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is False
assert result.get("error") == "parent_process_died"
# task должна быть blocked с упоминанием dead_ppid
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "blocked"
assert str(dead_ppid) in (task.get("blocked_reason") or "")
# pipeline должен быть failed
row = conn.execute(
"SELECT status FROM pipelines WHERE task_id='VDOL-001' ORDER BY id DESC LIMIT 1"
).fetchone()
assert row["status"] == "failed"