kin: auto-commit after pipeline

This commit is contained in:
Gros Frumos 2026-03-18 14:06:23 +02:00
parent 824341a972
commit e3a286ef6f
5 changed files with 1598 additions and 6 deletions

View file

@ -826,7 +826,7 @@ _WORKTREE_ROLES = {"backend_dev", "frontend_dev", "debugger"}
_DEV_GUARD_ROLES = {"backend_dev", "frontend_dev", "debugger"} _DEV_GUARD_ROLES = {"backend_dev", "frontend_dev", "debugger"}
def _detect_test_command(project_path: str) -> str | None: def _detect_test_command(project_path: str, role: str | None = None) -> str | None:
"""Auto-detect test command by inspecting project files. """Auto-detect test command by inspecting project files.
Candidates (in priority order): Candidates (in priority order):
@ -835,10 +835,22 @@ def _detect_test_command(project_path: str) -> str | None:
3. pytest pyproject.toml or setup.py exists 3. pytest pyproject.toml or setup.py exists
4. npx tsc --noEmit tsconfig.json exists 4. npx tsc --noEmit tsconfig.json exists
When role='backend_dev' and a Python project marker (pyproject.toml / setup.py)
is present, pytest is returned directly bypassing make test. This prevents
false-positive failures in mixed projects whose Makefile test target also runs
frontend (e.g. vitest) commands that may be unrelated to backend changes.
Returns the first matching command, or None if no framework is detected. Returns the first matching command, or None if no framework is detected.
""" """
path = Path(project_path) path = Path(project_path)
# For backend_dev: Python project marker takes precedence over Makefile.
# Rationale: make test in mixed projects often runs frontend tests too;
# backend changes should only be validated by the Python test runner.
if role == "backend_dev":
if (path / "pyproject.toml").is_file() or (path / "setup.py").is_file():
return f"{sys.executable} -m pytest"
# 1. make test # 1. make test
makefile = path / "Makefile" makefile = path / "Makefile"
if makefile.is_file(): if makefile.is_file():
@ -1882,7 +1894,7 @@ def run_pipeline(
if p_test_cmd_override: if p_test_cmd_override:
p_test_cmd = p_test_cmd_override p_test_cmd = p_test_cmd_override
else: else:
p_test_cmd = _detect_test_command(p_path_str) p_test_cmd = _detect_test_command(p_path_str, role=role)
if p_test_cmd is None: if p_test_cmd is None:
# No test framework detected — skip without blocking pipeline # No test framework detected — skip without blocking pipeline

View file

@ -126,16 +126,25 @@ def generate_followups(
parsed = _try_parse_json(output) parsed = _try_parse_json(output)
if not isinstance(parsed, list): if not isinstance(parsed, list):
if isinstance(parsed, dict): if isinstance(parsed, dict):
parsed = parsed.get("tasks") or parsed.get("followups") or [] if "tasks" in parsed:
parsed = parsed["tasks"]
elif "followups" in parsed:
parsed = parsed["followups"]
else:
parsed = []
else: else:
return {"created": [], "pending_actions": []} return {"created": [], "pending_actions": []}
# Guard: extracted value might be null/non-list (e.g. {"tasks": null})
if not isinstance(parsed, list):
parsed = []
# Separate permission-blocked items from normal ones # Separate permission-blocked items from normal ones
created = [] created = []
pending_actions = [] pending_actions = []
for item in parsed: for item in parsed:
if not isinstance(item, dict) or "title" not in item: if not isinstance(item, dict) or not item.get("title"):
continue continue
if _is_permission_blocked(item): if _is_permission_blocked(item):

View file

@ -31,13 +31,27 @@ def validate_completion_mode(value: str) -> str:
return "review" return "review"
# Columns that are stored as JSON strings and must be decoded on read.
# Text fields (title, description, name, etc.) are NOT in this set.
_JSON_COLUMNS: frozenset[str] = frozenset({
"tech_stack",
"brief", "spec", "review", "test_result", "security_result", "labels",
"tags",
"dependencies",
"steps",
"artifacts", "decisions_made", "blockers",
"extra_json",
"pending_actions",
})
def _row_to_dict(row: sqlite3.Row | None) -> dict | None: def _row_to_dict(row: sqlite3.Row | None) -> dict | None:
"""Convert sqlite3.Row to dict with JSON fields decoded.""" """Convert sqlite3.Row to dict with JSON fields decoded."""
if row is None: if row is None:
return None return None
d = dict(row) d = dict(row)
for key, val in d.items(): for key, val in d.items():
if isinstance(val, str) and val.startswith(("[", "{")): if key in _JSON_COLUMNS and isinstance(val, str) and val.startswith(("[", "{")):
try: try:
d[key] = json.loads(val) d[key] = json.loads(val)
except (json.JSONDecodeError, ValueError): except (json.JSONDecodeError, ValueError):

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,245 @@
"""Regression tests for KIN-124 — auto-test ложно определяет failure.
Root cause: make test запускал vitest после pytest, vitest падал на всех 16
тест-файлах с useI18n() без плагина i18n make test возвращал exit code != 0
auto-test считал весь прогон failed.
Исправления:
1. web/frontend/vite.config.ts: добавлен setupFiles с vitest-setup.ts
2. web/frontend/src/__tests__/vitest-setup.ts: глобальный i18n plugin для mount()
3. _detect_test_command(role='backend_dev'): возвращает pytest напрямую (не make test)
это предотвращает запуск vitest при backend_dev auto-test
Coverage:
(1) _run_project_tests: exit code 0 + "1533 passed" success=True (главный регрессион)
(2) _run_project_tests: exit code 1 + "2 failed" success=False
(3) _run_project_tests: exit code 0 + output содержит "failed" в середине success=True
(success определяется ТОЛЬКО по returncode, не по строке вывода)
(4) _detect_test_command: backend_dev + pyproject.toml возвращает pytest, не make test
(5) _detect_test_command: backend_dev + pyproject.toml + Makefile всё равно pytest
(6) _detect_test_command: frontend_dev + Makefile с test: возвращает make test
(7) _detect_test_command: frontend_dev + pyproject.toml (без Makefile) возвращает pytest
(8) _run_project_tests: timeout success=False, returncode=124
(9) _run_project_tests: команда не найдена success=False, returncode=127
(10) vite.config.ts содержит setupFiles с vitest-setup.ts
(11) vitest-setup.ts устанавливает i18n plugin глобально
"""
import subprocess
import sys
import os
from pathlib import Path
from unittest.mock import patch, MagicMock
import pytest
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_subprocess_result(returncode: int, stdout: str = "", stderr: str = "") -> MagicMock:
"""Build a MagicMock simulating subprocess.CompletedProcess."""
r = MagicMock()
r.returncode = returncode
r.stdout = stdout
r.stderr = stderr
return r
# ---------------------------------------------------------------------------
# (1-3) _run_project_tests: success determined solely by returncode
# ---------------------------------------------------------------------------
class TestRunProjectTestsSuccessDetermination:
"""_run_project_tests must use returncode, never parse stdout for pass/fail."""
@patch("subprocess.run")
def test_exit_code_0_with_1533_passed_returns_success_true(self, mock_run):
"""Regression KIN-124: exit code 0 + '1533 passed' → success=True."""
from agents.runner import _run_project_tests
mock_run.return_value = _make_subprocess_result(
returncode=0,
stdout="===================== 1533 passed in 42.7s =====================\n",
)
result = _run_project_tests("/tmp/proj", "pytest")
assert result["success"] is True, (
f"Expected success=True for returncode=0, got: {result}"
)
assert result["returncode"] == 0
@patch("subprocess.run")
def test_exit_code_1_with_failed_output_returns_success_false(self, mock_run):
"""exit code 1 + '2 failed' output → success=False."""
from agents.runner import _run_project_tests
mock_run.return_value = _make_subprocess_result(
returncode=1,
stdout="FAILED tests/test_foo.py::test_bar\n2 failed, 10 passed in 3.1s\n",
)
result = _run_project_tests("/tmp/proj", "pytest")
assert result["success"] is False, (
f"Expected success=False for returncode=1, got: {result}"
)
assert result["returncode"] == 1
@patch("subprocess.run")
def test_exit_code_0_with_failed_substring_in_output_returns_success_true(self, mock_run):
"""exit code 0 but output has 'failed' in a log line → success=True.
Success must be based on returncode only not string matching.
Example: a test description containing 'failed' should not confuse auto-test.
"""
from agents.runner import _run_project_tests
mock_run.return_value = _make_subprocess_result(
returncode=0,
stdout=(
"tests/test_retry.py::test_handles_previously_failed_request PASSED\n"
"1 passed in 0.5s\n"
),
)
result = _run_project_tests("/tmp/proj", "pytest")
assert result["success"] is True, (
"success must be True when returncode=0, even if 'failed' appears in output"
)
@patch("subprocess.run")
def test_output_is_concatenation_of_stdout_and_stderr(self, mock_run):
"""output field = stdout + stderr (both captured)."""
from agents.runner import _run_project_tests
mock_run.return_value = _make_subprocess_result(
returncode=0,
stdout="1 passed\n",
stderr="PytestWarning: something\n",
)
result = _run_project_tests("/tmp/proj", "pytest")
assert "1 passed" in result["output"]
assert "PytestWarning" in result["output"]
# ---------------------------------------------------------------------------
# (8-9) _run_project_tests: error handling
# ---------------------------------------------------------------------------
class TestRunProjectTestsErrorHandling:
@patch("subprocess.run", side_effect=subprocess.TimeoutExpired(cmd="pytest", timeout=60))
def test_timeout_returns_success_false_and_returncode_124(self, mock_run):
"""Timeout → success=False, returncode=124."""
from agents.runner import _run_project_tests
result = _run_project_tests("/tmp/proj", "pytest", timeout=60)
assert result["success"] is False
assert result["returncode"] == 124
assert "timed out" in result["output"].lower()
@patch("subprocess.run", side_effect=FileNotFoundError("pytest: not found"))
def test_command_not_found_returns_success_false_and_returncode_127(self, mock_run):
"""Command not found → success=False, returncode=127."""
from agents.runner import _run_project_tests
result = _run_project_tests("/tmp/proj", "pytest")
assert result["success"] is False
assert result["returncode"] == 127
# ---------------------------------------------------------------------------
# (4-7) _detect_test_command: role-based logic
# ---------------------------------------------------------------------------
class TestDetectTestCommandRoleLogic:
"""_detect_test_command must return pytest (not make test) for backend_dev
when pyproject.toml is present. This prevents vitest from running during
backend-only changes (the root cause of KIN-124)."""
def test_backend_dev_with_pyproject_toml_returns_pytest_not_make_test(self, tmp_path):
"""Regression KIN-124: backend_dev + pyproject.toml → pytest, not make test."""
from agents.runner import _detect_test_command
# Create both pyproject.toml and Makefile with test target
(tmp_path / "pyproject.toml").write_text("[tool.pytest.ini_options]\n")
makefile = tmp_path / "Makefile"
makefile.write_text("test:\n\tmake test\n")
cmd = _detect_test_command(str(tmp_path), role="backend_dev")
assert cmd is not None
assert "pytest" in cmd, (
f"Expected pytest command for backend_dev, got: {cmd!r}. "
"backend_dev must not run make test (which triggers vitest)."
)
assert "make" not in cmd, (
f"backend_dev must not use make test, got: {cmd!r}"
)
def test_backend_dev_with_only_pyproject_toml_returns_pytest(self, tmp_path):
"""backend_dev + only pyproject.toml (no Makefile) → pytest."""
from agents.runner import _detect_test_command
(tmp_path / "pyproject.toml").write_text("[build-system]\n")
cmd = _detect_test_command(str(tmp_path), role="backend_dev")
assert cmd is not None
assert "pytest" in cmd
def test_frontend_dev_with_makefile_returns_make_test(self, tmp_path):
"""frontend_dev + Makefile with test: target → make test (correct for frontend)."""
from agents.runner import _detect_test_command
(tmp_path / "Makefile").write_text("test:\n\tnpm test\n")
cmd = _detect_test_command(str(tmp_path), role="frontend_dev")
assert cmd == "make test", (
f"Expected 'make test' for frontend_dev with Makefile, got: {cmd!r}"
)
def test_frontend_dev_with_pyproject_toml_no_makefile_returns_pytest(self, tmp_path):
"""frontend_dev + pyproject.toml (no Makefile) → pytest (fallback)."""
from agents.runner import _detect_test_command
(tmp_path / "pyproject.toml").write_text("[tool.pytest]\n")
cmd = _detect_test_command(str(tmp_path), role="frontend_dev")
assert cmd is not None
assert "pytest" in cmd
def test_no_markers_returns_none(self, tmp_path):
"""Empty directory → None (no test framework detected)."""
from agents.runner import _detect_test_command
cmd = _detect_test_command(str(tmp_path))
assert cmd is None
# ---------------------------------------------------------------------------
# (10-11) Frontend vitest setup files
# ---------------------------------------------------------------------------
class TestVitestSetupFiles:
"""Verify the vitest setup changes that fix the KIN-124 root cause."""
def test_vite_config_has_setup_files(self):
"""vite.config.ts must declare setupFiles pointing to vitest-setup.ts."""
vite_config = Path(__file__).parent.parent / "web/frontend/vite.config.ts"
assert vite_config.exists(), "vite.config.ts not found"
content = vite_config.read_text()
assert "setupFiles" in content, (
"vite.config.ts must have setupFiles to load global vitest setup"
)
assert "vitest-setup" in content, (
"setupFiles must reference vitest-setup.ts"
)
def test_vitest_setup_file_exists(self):
"""web/frontend/src/__tests__/vitest-setup.ts must exist."""
setup_file = (
Path(__file__).parent.parent
/ "web/frontend/src/__tests__/vitest-setup.ts"
)
assert setup_file.exists(), (
"vitest-setup.ts not found — global i18n setup is missing, "
"vitest will fail on all useI18n() components"
)
def test_vitest_setup_registers_i18n_plugin(self):
"""vitest-setup.ts must register i18n as a global plugin."""
setup_file = (
Path(__file__).parent.parent
/ "web/frontend/src/__tests__/vitest-setup.ts"
)
assert setup_file.exists()
content = setup_file.read_text()
assert "i18n" in content, (
"vitest-setup.ts must register the i18n plugin"
)
assert "config.global.plugins" in content, (
"vitest-setup.ts must set config.global.plugins to inject i18n into all mounts"
)