234 lines
10 KiB
Python
234 lines
10 KiB
Python
|
|
"""
|
|||
|
|
Regression tests for KIN-101:
|
|||
|
|
Auto-test fallback: _detect_test_command and pipeline skip logic.
|
|||
|
|
|
|||
|
|
Coverage:
|
|||
|
|
(1) _detect_test_command — file-based detection
|
|||
|
|
(2) Pipeline: auto-detect used when test_command is NULL
|
|||
|
|
(3) Pipeline: skips tests with log when no framework detected (no blocking)
|
|||
|
|
"""
|
|||
|
|
|
|||
|
|
import json
|
|||
|
|
import os
|
|||
|
|
import pytest
|
|||
|
|
from pathlib import Path
|
|||
|
|
from unittest.mock import patch, MagicMock
|
|||
|
|
|
|||
|
|
|
|||
|
|
# ---------------------------------------------------------------------------
|
|||
|
|
# Fixtures
|
|||
|
|
# ---------------------------------------------------------------------------
|
|||
|
|
|
|||
|
|
@pytest.fixture
|
|||
|
|
def conn():
|
|||
|
|
from core.db import init_db
|
|||
|
|
from core import models
|
|||
|
|
c = init_db(":memory:")
|
|||
|
|
models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek",
|
|||
|
|
tech_stack=["vue3"])
|
|||
|
|
models.create_task(c, "VDOL-001", "vdol", "Fix bug",
|
|||
|
|
brief={"route_type": "debug"})
|
|||
|
|
yield c
|
|||
|
|
c.close()
|
|||
|
|
|
|||
|
|
|
|||
|
|
def _mock_success():
|
|||
|
|
m = MagicMock()
|
|||
|
|
m.returncode = 0
|
|||
|
|
m.stdout = json.dumps({
|
|||
|
|
"status": "done",
|
|||
|
|
"changes": [],
|
|||
|
|
"notes": "",
|
|||
|
|
})
|
|||
|
|
m.stderr = ""
|
|||
|
|
return m
|
|||
|
|
|
|||
|
|
|
|||
|
|
# ---------------------------------------------------------------------------
|
|||
|
|
# (1) _detect_test_command — file detection logic
|
|||
|
|
# ---------------------------------------------------------------------------
|
|||
|
|
|
|||
|
|
class TestDetectTestCommand:
|
|||
|
|
def test_makefile_with_test_target(self, tmp_path):
|
|||
|
|
"""Makefile с target 'test:' → возвращает 'make test'."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
(tmp_path / "Makefile").write_text("build:\n\tgo build\n\ntest:\n\tgo test ./...\n")
|
|||
|
|
assert _detect_test_command(str(tmp_path)) == "make test"
|
|||
|
|
|
|||
|
|
def test_makefile_without_test_target(self, tmp_path):
|
|||
|
|
"""Makefile без target 'test' → не выбирается."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
(tmp_path / "Makefile").write_text("build:\n\tgo build\n")
|
|||
|
|
# Should fall through to other candidates (no package.json/pyproject.toml/tsconfig)
|
|||
|
|
assert _detect_test_command(str(tmp_path)) is None
|
|||
|
|
|
|||
|
|
def test_package_json_with_scripts_test(self, tmp_path):
|
|||
|
|
"""package.json со scripts.test → возвращает 'npm test'."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
pkg = {"name": "myapp", "scripts": {"test": "vitest"}}
|
|||
|
|
(tmp_path / "package.json").write_text(json.dumps(pkg))
|
|||
|
|
assert _detect_test_command(str(tmp_path)) == "npm test"
|
|||
|
|
|
|||
|
|
def test_package_json_without_scripts_test(self, tmp_path):
|
|||
|
|
"""package.json без scripts.test → не выбирается."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
pkg = {"name": "myapp", "scripts": {"build": "vite build"}}
|
|||
|
|
(tmp_path / "package.json").write_text(json.dumps(pkg))
|
|||
|
|
assert _detect_test_command(str(tmp_path)) is None
|
|||
|
|
|
|||
|
|
def test_package_json_invalid_json_skipped(self, tmp_path):
|
|||
|
|
"""Невалидный package.json → не крашится, пропускается."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
(tmp_path / "package.json").write_text("not json {{")
|
|||
|
|
assert _detect_test_command(str(tmp_path)) is None
|
|||
|
|
|
|||
|
|
def test_pyproject_toml_returns_pytest(self, tmp_path):
|
|||
|
|
"""pyproject.toml → возвращает 'pytest'."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
(tmp_path / "pyproject.toml").write_text("[tool.pytest.ini_options]\n")
|
|||
|
|
assert _detect_test_command(str(tmp_path)) == "pytest"
|
|||
|
|
|
|||
|
|
def test_setup_py_returns_pytest(self, tmp_path):
|
|||
|
|
"""setup.py → возвращает 'pytest'."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
(tmp_path / "setup.py").write_text("from setuptools import setup\nsetup(name='x')\n")
|
|||
|
|
assert _detect_test_command(str(tmp_path)) == "pytest"
|
|||
|
|
|
|||
|
|
def test_tsconfig_returns_npx_tsc(self, tmp_path):
|
|||
|
|
"""tsconfig.json → возвращает 'npx tsc --noEmit'."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
(tmp_path / "tsconfig.json").write_text('{"compilerOptions": {}}\n')
|
|||
|
|
assert _detect_test_command(str(tmp_path)) == "npx tsc --noEmit"
|
|||
|
|
|
|||
|
|
def test_no_files_returns_none(self, tmp_path):
|
|||
|
|
"""Пустая директория → возвращает None."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
assert _detect_test_command(str(tmp_path)) is None
|
|||
|
|
|
|||
|
|
def test_priority_makefile_over_package_json(self, tmp_path):
|
|||
|
|
"""Makefile с test target приоритетнее package.json."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
(tmp_path / "Makefile").write_text("test:\n\tpytest\n")
|
|||
|
|
(tmp_path / "package.json").write_text(json.dumps({"scripts": {"test": "jest"}}))
|
|||
|
|
assert _detect_test_command(str(tmp_path)) == "make test"
|
|||
|
|
|
|||
|
|
def test_priority_package_json_over_pyproject(self, tmp_path):
|
|||
|
|
"""package.json со scripts.test приоритетнее pyproject.toml."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
(tmp_path / "package.json").write_text(json.dumps({"scripts": {"test": "jest"}}))
|
|||
|
|
(tmp_path / "pyproject.toml").write_text("[build-system]\n")
|
|||
|
|
assert _detect_test_command(str(tmp_path)) == "npm test"
|
|||
|
|
|
|||
|
|
def test_nonexistent_path_returns_none(self):
|
|||
|
|
"""Несуществующая директория → возвращает None без исключений."""
|
|||
|
|
from agents.runner import _detect_test_command
|
|||
|
|
assert _detect_test_command("/nonexistent/path/xyz") is None
|
|||
|
|
|
|||
|
|
|
|||
|
|
# ---------------------------------------------------------------------------
|
|||
|
|
# (2) Pipeline: auto-detect used when test_command is NULL
|
|||
|
|
# ---------------------------------------------------------------------------
|
|||
|
|
|
|||
|
|
class TestPipelineAutoDetect:
|
|||
|
|
@patch("agents.runner._run_autocommit")
|
|||
|
|
@patch("agents.runner._detect_test_command")
|
|||
|
|
@patch("agents.runner._run_project_tests")
|
|||
|
|
@patch("agents.runner.subprocess.run")
|
|||
|
|
def test_null_test_command_triggers_autodetect(
|
|||
|
|
self, mock_run, mock_tests, mock_detect, mock_autocommit, conn
|
|||
|
|
):
|
|||
|
|
"""Если test_command=NULL — вызывается _detect_test_command."""
|
|||
|
|
from agents.runner import run_pipeline
|
|||
|
|
from core import models
|
|||
|
|
mock_run.return_value = _mock_success()
|
|||
|
|
mock_tests.return_value = {"success": True, "output": "OK", "returncode": 0}
|
|||
|
|
mock_detect.return_value = "pytest"
|
|||
|
|
# Leave test_command as NULL (default)
|
|||
|
|
models.update_project(conn, "vdol", auto_test_enabled=True)
|
|||
|
|
|
|||
|
|
steps = [{"role": "backend_dev", "brief": "implement"}]
|
|||
|
|
result = run_pipeline(conn, "VDOL-001", steps)
|
|||
|
|
|
|||
|
|
assert result["success"] is True
|
|||
|
|
mock_detect.assert_called_once()
|
|||
|
|
# _run_project_tests вызывается с обнаруженной командой
|
|||
|
|
mock_tests.assert_called_once()
|
|||
|
|
called_cmd = mock_tests.call_args[0][1]
|
|||
|
|
assert called_cmd == "pytest"
|
|||
|
|
|
|||
|
|
@patch("agents.runner._run_autocommit")
|
|||
|
|
@patch("agents.runner._detect_test_command")
|
|||
|
|
@patch("agents.runner._run_project_tests")
|
|||
|
|
@patch("agents.runner.subprocess.run")
|
|||
|
|
def test_explicit_test_command_skips_autodetect(
|
|||
|
|
self, mock_run, mock_tests, mock_detect, mock_autocommit, conn
|
|||
|
|
):
|
|||
|
|
"""Если test_command задан явно — _detect_test_command не вызывается."""
|
|||
|
|
from agents.runner import run_pipeline
|
|||
|
|
from core import models
|
|||
|
|
mock_run.return_value = _mock_success()
|
|||
|
|
mock_tests.return_value = {"success": True, "output": "OK", "returncode": 0}
|
|||
|
|
models.update_project(conn, "vdol", auto_test_enabled=True, test_command="npm test")
|
|||
|
|
|
|||
|
|
steps = [{"role": "backend_dev", "brief": "implement"}]
|
|||
|
|
run_pipeline(conn, "VDOL-001", steps)
|
|||
|
|
|
|||
|
|
mock_detect.assert_not_called()
|
|||
|
|
called_cmd = mock_tests.call_args[0][1]
|
|||
|
|
assert called_cmd == "npm test"
|
|||
|
|
|
|||
|
|
|
|||
|
|
# ---------------------------------------------------------------------------
|
|||
|
|
# (3) Pipeline: no framework detected → skip without blocking
|
|||
|
|
# ---------------------------------------------------------------------------
|
|||
|
|
|
|||
|
|
class TestPipelineNoFramework:
|
|||
|
|
@patch("agents.runner._run_autocommit")
|
|||
|
|
@patch("agents.runner._detect_test_command")
|
|||
|
|
@patch("agents.runner._run_project_tests")
|
|||
|
|
@patch("agents.runner.subprocess.run")
|
|||
|
|
def test_no_framework_skips_tests(
|
|||
|
|
self, mock_run, mock_tests, mock_detect, mock_autocommit, conn
|
|||
|
|
):
|
|||
|
|
"""Если _detect_test_command=None — тесты пропускаются, pipeline не блокируется."""
|
|||
|
|
from agents.runner import run_pipeline
|
|||
|
|
from core import models
|
|||
|
|
mock_run.return_value = _mock_success()
|
|||
|
|
mock_detect.return_value = None
|
|||
|
|
models.update_project(conn, "vdol", auto_test_enabled=True)
|
|||
|
|
|
|||
|
|
steps = [{"role": "backend_dev", "brief": "implement"}]
|
|||
|
|
result = run_pipeline(conn, "VDOL-001", steps)
|
|||
|
|
|
|||
|
|
# Pipeline succeeds
|
|||
|
|
assert result["success"] is True
|
|||
|
|
# _run_project_tests NOT called
|
|||
|
|
mock_tests.assert_not_called()
|
|||
|
|
# Task NOT blocked
|
|||
|
|
task = models.get_task(conn, "VDOL-001")
|
|||
|
|
assert task["status"] != "blocked"
|
|||
|
|
|
|||
|
|
@patch("agents.runner._run_autocommit")
|
|||
|
|
@patch("agents.runner._detect_test_command")
|
|||
|
|
@patch("agents.runner._run_project_tests")
|
|||
|
|
@patch("agents.runner.subprocess.run")
|
|||
|
|
def test_no_framework_result_has_skipped_flag(
|
|||
|
|
self, mock_run, mock_tests, mock_detect, mock_autocommit, conn
|
|||
|
|
):
|
|||
|
|
"""При пропуске тестов в results есть запись с _skipped=True и output='no test framework detected'."""
|
|||
|
|
from agents.runner import run_pipeline
|
|||
|
|
from core import models
|
|||
|
|
mock_run.return_value = _mock_success()
|
|||
|
|
mock_detect.return_value = None
|
|||
|
|
models.update_project(conn, "vdol", auto_test_enabled=True)
|
|||
|
|
|
|||
|
|
steps = [{"role": "backend_dev", "brief": "implement"}]
|
|||
|
|
result = run_pipeline(conn, "VDOL-001", steps)
|
|||
|
|
|
|||
|
|
auto_test_entries = [r for r in result.get("results", []) if r.get("role") == "_auto_test"]
|
|||
|
|
assert len(auto_test_entries) == 1
|
|||
|
|
entry = auto_test_entries[0]
|
|||
|
|
assert entry["_skipped"] is True
|
|||
|
|
assert entry["success"] is True
|
|||
|
|
assert "no test framework detected" in entry["output"]
|