kin: auto-commit after pipeline

This commit is contained in:
Gros Frumos 2026-03-17 19:30:15 +02:00
parent e7c65c22e5
commit 0e522e54a9
7 changed files with 363 additions and 65 deletions

View file

@ -774,6 +774,50 @@ def _is_test_failure(result: dict) -> bool:
_AUTO_TEST_ROLES = {"backend_dev", "frontend_dev"}
def _detect_test_command(project_path: str) -> str | None:
"""Auto-detect test command by inspecting project files.
Candidates (in priority order):
1. make test Makefile exists and has a 'test' target
2. npm test package.json exists and has scripts.test
3. pytest pyproject.toml or setup.py exists
4. npx tsc --noEmit tsconfig.json exists
Returns the first matching command, or None if no framework is detected.
"""
path = Path(project_path)
# 1. make test
makefile = path / "Makefile"
if makefile.is_file():
try:
content = makefile.read_text(errors="ignore")
if re.search(r"^test\s*:", content, re.MULTILINE):
return "make test"
except OSError:
pass
# 2. npm test
pkg_json = path / "package.json"
if pkg_json.is_file():
try:
pkg = json.loads(pkg_json.read_text())
if pkg.get("scripts", {}).get("test"):
return "npm test"
except (json.JSONDecodeError, OSError):
pass
# 3. pytest
if (path / "pyproject.toml").is_file() or (path / "setup.py").is_file():
return "pytest"
# 4. npx tsc --noEmit
if (path / "tsconfig.json").is_file():
return "npx tsc --noEmit"
return None
def _run_project_tests(project_path: str, test_command: str = 'make test', timeout: int = 120) -> dict:
"""Run test_command in project_path. Returns {success, output, returncode}.
@ -1748,8 +1792,9 @@ def run_pipeline(
previous_output = json.dumps(previous_output, ensure_ascii=False)
continue
# Project-level auto-test: run `make test` after backend_dev/frontend_dev steps.
# Project-level auto-test: run tests after backend_dev/frontend_dev steps.
# Enabled per project via auto_test_enabled flag (opt-in).
# test_command priority: project.test_command (explicit) → auto-detect → skip.
# On failure, loop fixer up to KIN_AUTO_TEST_MAX_ATTEMPTS times, then block.
if (
not dry_run
@ -1759,9 +1804,25 @@ def run_pipeline(
and project_for_wt.get("auto_test_enabled")
and project_for_wt.get("path")
):
max_auto_test_attempts = int(os.environ.get("KIN_AUTO_TEST_MAX_ATTEMPTS") or 3)
p_path_str = str(Path(project_for_wt["path"]).expanduser())
p_test_cmd = project_for_wt.get("test_command") or "make test"
p_test_cmd_override = project_for_wt.get("test_command")
if p_test_cmd_override:
p_test_cmd = p_test_cmd_override
else:
p_test_cmd = _detect_test_command(p_path_str)
if p_test_cmd is None:
# No test framework detected — skip without blocking pipeline
_logger.info("auto-test: no test framework detected in %s, skipping", p_path_str)
results.append({
"role": "_auto_test",
"success": True,
"output": "no test framework detected",
"_project_test": True,
"_skipped": True,
})
else:
max_auto_test_attempts = int(os.environ.get("KIN_AUTO_TEST_MAX_ATTEMPTS") or 3)
test_run = _run_project_tests(p_path_str, p_test_cmd)
results.append({"role": "_auto_test", "success": test_run["success"],
"output": test_run["output"], "_project_test": True})

View file

@ -37,7 +37,7 @@ CREATE TABLE IF NOT EXISTS projects (
obsidian_vault_path TEXT,
worktrees_enabled INTEGER DEFAULT 0,
auto_test_enabled INTEGER DEFAULT 0,
test_command TEXT DEFAULT 'make test',
test_command TEXT DEFAULT NULL,
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
);
@ -696,10 +696,10 @@ def _migrate(conn: sqlite3.Connection):
""")
conn.commit()
# Add test_command column to projects (KIN-ARCH-008)
# Add test_command column to projects (KIN-ARCH-008); NULL = auto-detect (KIN-101)
projects_cols = {row["name"] for row in conn.execute("PRAGMA table_info(projects)")}
if "test_command" not in projects_cols:
conn.execute("ALTER TABLE projects ADD COLUMN test_command TEXT DEFAULT 'make test'")
conn.execute("ALTER TABLE projects ADD COLUMN test_command TEXT DEFAULT NULL")
conn.commit()
# Rename legacy 'auto' → 'auto_complete' (KIN-063)

View file

@ -2197,8 +2197,11 @@ def test_create_project_with_test_command(client):
assert row[0] == "npm test"
def test_patch_project_test_command_empty_string_stores_empty(client):
"""KIN-ARCH-008: PATCH с пустой строкой сохраняет пустую строку (не NULL, в отличие от deploy_command)."""
def test_patch_project_test_command_empty_string_stores_null(client):
"""KIN-101: PATCH с пустой строкой сохраняет NULL — включает режим авто-определения фреймворка.
Пустая строка сентинел для сброса test_command (аналогично deploy_command и другим полям).
"""
client.patch("/api/projects/p1", json={"test_command": "pytest -v"})
client.patch("/api/projects/p1", json={"test_command": ""})
@ -2206,7 +2209,7 @@ def test_patch_project_test_command_empty_string_stores_empty(client):
conn = init_db(api_module.DB_PATH)
row = conn.execute("SELECT test_command FROM projects WHERE id = 'p1'").fetchone()
conn.close()
assert row[0] == ""
assert row[0] is None
def test_get_projects_includes_test_command(client):

View file

@ -260,7 +260,7 @@ class TestAutoTestInPipeline:
from core import models
mock_run.return_value = _mock_success()
mock_tests.return_value = {"success": True, "output": "OK", "returncode": 0}
models.update_project(conn, "vdol", auto_test_enabled=True)
models.update_project(conn, "vdol", auto_test_enabled=True, test_command="make test")
steps = [{"role": "backend_dev", "brief": "implement"}]
result = run_pipeline(conn, "VDOL-001", steps)
@ -301,7 +301,7 @@ class TestAutoTestInPipeline:
{"success": False, "output": "FAILED: test_foo", "returncode": 1},
{"success": True, "output": "OK", "returncode": 0},
]
models.update_project(conn, "vdol", auto_test_enabled=True)
models.update_project(conn, "vdol", auto_test_enabled=True, test_command="make test")
with patch.dict(os.environ, {"KIN_AUTO_TEST_MAX_ATTEMPTS": "3"}):
steps = [{"role": "backend_dev", "brief": "implement"}]
@ -327,7 +327,7 @@ class TestAutoTestInPipeline:
mock_run.return_value = _mock_success()
# Тест всегда падает
mock_tests.return_value = {"success": False, "output": "FAILED", "returncode": 1}
models.update_project(conn, "vdol", auto_test_enabled=True)
models.update_project(conn, "vdol", auto_test_enabled=True, test_command="make test")
with patch.dict(os.environ, {"KIN_AUTO_TEST_MAX_ATTEMPTS": "2"}):
steps = [{"role": "backend_dev", "brief": "implement"}]
@ -348,7 +348,7 @@ class TestAutoTestInPipeline:
from agents.runner import run_pipeline
from core import models
mock_run.return_value = _mock_success()
models.update_project(conn, "vdol", auto_test_enabled=True)
models.update_project(conn, "vdol", auto_test_enabled=True, test_command="make test")
steps = [{"role": "debugger", "brief": "find"}]
run_pipeline(conn, "VDOL-001", steps)

View file

@ -0,0 +1,233 @@
"""
Regression tests for KIN-101:
Auto-test fallback: _detect_test_command and pipeline skip logic.
Coverage:
(1) _detect_test_command file-based detection
(2) Pipeline: auto-detect used when test_command is NULL
(3) Pipeline: skips tests with log when no framework detected (no blocking)
"""
import json
import os
import pytest
from pathlib import Path
from unittest.mock import patch, MagicMock
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
def conn():
from core.db import init_db
from core import models
c = init_db(":memory:")
models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek",
tech_stack=["vue3"])
models.create_task(c, "VDOL-001", "vdol", "Fix bug",
brief={"route_type": "debug"})
yield c
c.close()
def _mock_success():
m = MagicMock()
m.returncode = 0
m.stdout = json.dumps({
"status": "done",
"changes": [],
"notes": "",
})
m.stderr = ""
return m
# ---------------------------------------------------------------------------
# (1) _detect_test_command — file detection logic
# ---------------------------------------------------------------------------
class TestDetectTestCommand:
def test_makefile_with_test_target(self, tmp_path):
"""Makefile с target 'test:' → возвращает 'make test'."""
from agents.runner import _detect_test_command
(tmp_path / "Makefile").write_text("build:\n\tgo build\n\ntest:\n\tgo test ./...\n")
assert _detect_test_command(str(tmp_path)) == "make test"
def test_makefile_without_test_target(self, tmp_path):
"""Makefile без target 'test' → не выбирается."""
from agents.runner import _detect_test_command
(tmp_path / "Makefile").write_text("build:\n\tgo build\n")
# Should fall through to other candidates (no package.json/pyproject.toml/tsconfig)
assert _detect_test_command(str(tmp_path)) is None
def test_package_json_with_scripts_test(self, tmp_path):
"""package.json со scripts.test → возвращает 'npm test'."""
from agents.runner import _detect_test_command
pkg = {"name": "myapp", "scripts": {"test": "vitest"}}
(tmp_path / "package.json").write_text(json.dumps(pkg))
assert _detect_test_command(str(tmp_path)) == "npm test"
def test_package_json_without_scripts_test(self, tmp_path):
"""package.json без scripts.test → не выбирается."""
from agents.runner import _detect_test_command
pkg = {"name": "myapp", "scripts": {"build": "vite build"}}
(tmp_path / "package.json").write_text(json.dumps(pkg))
assert _detect_test_command(str(tmp_path)) is None
def test_package_json_invalid_json_skipped(self, tmp_path):
"""Невалидный package.json → не крашится, пропускается."""
from agents.runner import _detect_test_command
(tmp_path / "package.json").write_text("not json {{")
assert _detect_test_command(str(tmp_path)) is None
def test_pyproject_toml_returns_pytest(self, tmp_path):
"""pyproject.toml → возвращает 'pytest'."""
from agents.runner import _detect_test_command
(tmp_path / "pyproject.toml").write_text("[tool.pytest.ini_options]\n")
assert _detect_test_command(str(tmp_path)) == "pytest"
def test_setup_py_returns_pytest(self, tmp_path):
"""setup.py → возвращает 'pytest'."""
from agents.runner import _detect_test_command
(tmp_path / "setup.py").write_text("from setuptools import setup\nsetup(name='x')\n")
assert _detect_test_command(str(tmp_path)) == "pytest"
def test_tsconfig_returns_npx_tsc(self, tmp_path):
"""tsconfig.json → возвращает 'npx tsc --noEmit'."""
from agents.runner import _detect_test_command
(tmp_path / "tsconfig.json").write_text('{"compilerOptions": {}}\n')
assert _detect_test_command(str(tmp_path)) == "npx tsc --noEmit"
def test_no_files_returns_none(self, tmp_path):
"""Пустая директория → возвращает None."""
from agents.runner import _detect_test_command
assert _detect_test_command(str(tmp_path)) is None
def test_priority_makefile_over_package_json(self, tmp_path):
"""Makefile с test target приоритетнее package.json."""
from agents.runner import _detect_test_command
(tmp_path / "Makefile").write_text("test:\n\tpytest\n")
(tmp_path / "package.json").write_text(json.dumps({"scripts": {"test": "jest"}}))
assert _detect_test_command(str(tmp_path)) == "make test"
def test_priority_package_json_over_pyproject(self, tmp_path):
"""package.json со scripts.test приоритетнее pyproject.toml."""
from agents.runner import _detect_test_command
(tmp_path / "package.json").write_text(json.dumps({"scripts": {"test": "jest"}}))
(tmp_path / "pyproject.toml").write_text("[build-system]\n")
assert _detect_test_command(str(tmp_path)) == "npm test"
def test_nonexistent_path_returns_none(self):
"""Несуществующая директория → возвращает None без исключений."""
from agents.runner import _detect_test_command
assert _detect_test_command("/nonexistent/path/xyz") is None
# ---------------------------------------------------------------------------
# (2) Pipeline: auto-detect used when test_command is NULL
# ---------------------------------------------------------------------------
class TestPipelineAutoDetect:
@patch("agents.runner._run_autocommit")
@patch("agents.runner._detect_test_command")
@patch("agents.runner._run_project_tests")
@patch("agents.runner.subprocess.run")
def test_null_test_command_triggers_autodetect(
self, mock_run, mock_tests, mock_detect, mock_autocommit, conn
):
"""Если test_command=NULL — вызывается _detect_test_command."""
from agents.runner import run_pipeline
from core import models
mock_run.return_value = _mock_success()
mock_tests.return_value = {"success": True, "output": "OK", "returncode": 0}
mock_detect.return_value = "pytest"
# Leave test_command as NULL (default)
models.update_project(conn, "vdol", auto_test_enabled=True)
steps = [{"role": "backend_dev", "brief": "implement"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
mock_detect.assert_called_once()
# _run_project_tests вызывается с обнаруженной командой
mock_tests.assert_called_once()
called_cmd = mock_tests.call_args[0][1]
assert called_cmd == "pytest"
@patch("agents.runner._run_autocommit")
@patch("agents.runner._detect_test_command")
@patch("agents.runner._run_project_tests")
@patch("agents.runner.subprocess.run")
def test_explicit_test_command_skips_autodetect(
self, mock_run, mock_tests, mock_detect, mock_autocommit, conn
):
"""Если test_command задан явно — _detect_test_command не вызывается."""
from agents.runner import run_pipeline
from core import models
mock_run.return_value = _mock_success()
mock_tests.return_value = {"success": True, "output": "OK", "returncode": 0}
models.update_project(conn, "vdol", auto_test_enabled=True, test_command="npm test")
steps = [{"role": "backend_dev", "brief": "implement"}]
run_pipeline(conn, "VDOL-001", steps)
mock_detect.assert_not_called()
called_cmd = mock_tests.call_args[0][1]
assert called_cmd == "npm test"
# ---------------------------------------------------------------------------
# (3) Pipeline: no framework detected → skip without blocking
# ---------------------------------------------------------------------------
class TestPipelineNoFramework:
@patch("agents.runner._run_autocommit")
@patch("agents.runner._detect_test_command")
@patch("agents.runner._run_project_tests")
@patch("agents.runner.subprocess.run")
def test_no_framework_skips_tests(
self, mock_run, mock_tests, mock_detect, mock_autocommit, conn
):
"""Если _detect_test_command=None — тесты пропускаются, pipeline не блокируется."""
from agents.runner import run_pipeline
from core import models
mock_run.return_value = _mock_success()
mock_detect.return_value = None
models.update_project(conn, "vdol", auto_test_enabled=True)
steps = [{"role": "backend_dev", "brief": "implement"}]
result = run_pipeline(conn, "VDOL-001", steps)
# Pipeline succeeds
assert result["success"] is True
# _run_project_tests NOT called
mock_tests.assert_not_called()
# Task NOT blocked
task = models.get_task(conn, "VDOL-001")
assert task["status"] != "blocked"
@patch("agents.runner._run_autocommit")
@patch("agents.runner._detect_test_command")
@patch("agents.runner._run_project_tests")
@patch("agents.runner.subprocess.run")
def test_no_framework_result_has_skipped_flag(
self, mock_run, mock_tests, mock_detect, mock_autocommit, conn
):
"""При пропуске тестов в results есть запись с _skipped=True и output='no test framework detected'."""
from agents.runner import run_pipeline
from core import models
mock_run.return_value = _mock_success()
mock_detect.return_value = None
models.update_project(conn, "vdol", auto_test_enabled=True)
steps = [{"role": "backend_dev", "brief": "implement"}]
result = run_pipeline(conn, "VDOL-001", steps)
auto_test_entries = [r for r in result.get("results", []) if r.get("role") == "_auto_test"]
assert len(auto_test_entries) == 1
entry = auto_test_entries[0]
assert entry["_skipped"] is True
assert entry["success"] is True
assert "no test framework detected" in entry["output"]

View file

@ -789,10 +789,10 @@ def test_schema_project_has_test_command_column(conn):
assert "test_command" in cols
def test_test_command_default_is_make_test(conn):
"""KIN-ARCH-008: новый проект без test_command получает дефолт 'make test'."""
def test_test_command_default_is_null(conn):
"""KIN-101: новый проект без test_command получает NULL (авто-определение фреймворка)."""
p = models.create_project(conn, "prj_tc", "TC Project", "/tmp/tc")
assert p["test_command"] == "make test"
assert p["test_command"] is None
def test_test_command_can_be_set(conn):

View file

@ -218,7 +218,7 @@ class ProjectCreate(BaseModel):
ssh_user: str | None = None
ssh_key_path: str | None = None
ssh_proxy_jump: str | None = None
test_command: str = 'make test'
test_command: str | None = None
@model_validator(mode="after")
def validate_fields(self) -> "ProjectCreate":
@ -294,7 +294,8 @@ def patch_project(project_id: str, body: ProjectPatch):
if body.deploy_restart_cmd is not None:
fields["deploy_restart_cmd"] = None if body.deploy_restart_cmd == "" else body.deploy_restart_cmd
if body.test_command is not None:
fields["test_command"] = body.test_command
# Empty string = sentinel for enabling auto-detect (stores NULL)
fields["test_command"] = None if body.test_command == "" else body.test_command
if body.project_type is not None:
fields["project_type"] = body.project_type
if body.ssh_host is not None:
@ -419,7 +420,7 @@ def create_project(body: ProjectCreate):
ssh_key_path=body.ssh_key_path,
ssh_proxy_jump=body.ssh_proxy_jump,
)
if body.test_command != "make test":
if body.test_command is not None:
p = models.update_project(conn, body.id, test_command=body.test_command)
conn.close()
return p