kin/tests/test_kin_124_regression.py

246 lines
11 KiB
Python
Raw Normal View History

2026-03-18 14:06:23 +02:00
"""Regression tests for KIN-124 — auto-test ложно определяет failure.
Root cause: make test запускал vitest после pytest, vitest падал на всех 16
тест-файлах с useI18n() без плагина i18n make test возвращал exit code != 0
auto-test считал весь прогон failed.
Исправления:
1. web/frontend/vite.config.ts: добавлен setupFiles с vitest-setup.ts
2. web/frontend/src/__tests__/vitest-setup.ts: глобальный i18n plugin для mount()
3. _detect_test_command(role='backend_dev'): возвращает pytest напрямую (не make test)
это предотвращает запуск vitest при backend_dev auto-test
Coverage:
(1) _run_project_tests: exit code 0 + "1533 passed" success=True (главный регрессион)
(2) _run_project_tests: exit code 1 + "2 failed" success=False
(3) _run_project_tests: exit code 0 + output содержит "failed" в середине success=True
(success определяется ТОЛЬКО по returncode, не по строке вывода)
(4) _detect_test_command: backend_dev + pyproject.toml возвращает pytest, не make test
(5) _detect_test_command: backend_dev + pyproject.toml + Makefile всё равно pytest
(6) _detect_test_command: frontend_dev + Makefile с test: возвращает make test
(7) _detect_test_command: frontend_dev + pyproject.toml (без Makefile) возвращает pytest
(8) _run_project_tests: timeout success=False, returncode=124
(9) _run_project_tests: команда не найдена success=False, returncode=127
(10) vite.config.ts содержит setupFiles с vitest-setup.ts
(11) vitest-setup.ts устанавливает i18n plugin глобально
"""
import subprocess
import sys
import os
from pathlib import Path
from unittest.mock import patch, MagicMock
import pytest
# ---------------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------------
def _make_subprocess_result(returncode: int, stdout: str = "", stderr: str = "") -> MagicMock:
"""Build a MagicMock simulating subprocess.CompletedProcess."""
r = MagicMock()
r.returncode = returncode
r.stdout = stdout
r.stderr = stderr
return r
# ---------------------------------------------------------------------------
# (1-3) _run_project_tests: success determined solely by returncode
# ---------------------------------------------------------------------------
class TestRunProjectTestsSuccessDetermination:
"""_run_project_tests must use returncode, never parse stdout for pass/fail."""
@patch("subprocess.run")
def test_exit_code_0_with_1533_passed_returns_success_true(self, mock_run):
"""Regression KIN-124: exit code 0 + '1533 passed' → success=True."""
from agents.runner import _run_project_tests
mock_run.return_value = _make_subprocess_result(
returncode=0,
stdout="===================== 1533 passed in 42.7s =====================\n",
)
result = _run_project_tests("/tmp/proj", "pytest")
assert result["success"] is True, (
f"Expected success=True for returncode=0, got: {result}"
)
assert result["returncode"] == 0
@patch("subprocess.run")
def test_exit_code_1_with_failed_output_returns_success_false(self, mock_run):
"""exit code 1 + '2 failed' output → success=False."""
from agents.runner import _run_project_tests
mock_run.return_value = _make_subprocess_result(
returncode=1,
stdout="FAILED tests/test_foo.py::test_bar\n2 failed, 10 passed in 3.1s\n",
)
result = _run_project_tests("/tmp/proj", "pytest")
assert result["success"] is False, (
f"Expected success=False for returncode=1, got: {result}"
)
assert result["returncode"] == 1
@patch("subprocess.run")
def test_exit_code_0_with_failed_substring_in_output_returns_success_true(self, mock_run):
"""exit code 0 but output has 'failed' in a log line → success=True.
Success must be based on returncode only not string matching.
Example: a test description containing 'failed' should not confuse auto-test.
"""
from agents.runner import _run_project_tests
mock_run.return_value = _make_subprocess_result(
returncode=0,
stdout=(
"tests/test_retry.py::test_handles_previously_failed_request PASSED\n"
"1 passed in 0.5s\n"
),
)
result = _run_project_tests("/tmp/proj", "pytest")
assert result["success"] is True, (
"success must be True when returncode=0, even if 'failed' appears in output"
)
@patch("subprocess.run")
def test_output_is_concatenation_of_stdout_and_stderr(self, mock_run):
"""output field = stdout + stderr (both captured)."""
from agents.runner import _run_project_tests
mock_run.return_value = _make_subprocess_result(
returncode=0,
stdout="1 passed\n",
stderr="PytestWarning: something\n",
)
result = _run_project_tests("/tmp/proj", "pytest")
assert "1 passed" in result["output"]
assert "PytestWarning" in result["output"]
# ---------------------------------------------------------------------------
# (8-9) _run_project_tests: error handling
# ---------------------------------------------------------------------------
class TestRunProjectTestsErrorHandling:
@patch("subprocess.run", side_effect=subprocess.TimeoutExpired(cmd="pytest", timeout=60))
def test_timeout_returns_success_false_and_returncode_124(self, mock_run):
"""Timeout → success=False, returncode=124."""
from agents.runner import _run_project_tests
result = _run_project_tests("/tmp/proj", "pytest", timeout=60)
assert result["success"] is False
assert result["returncode"] == 124
assert "timed out" in result["output"].lower()
@patch("subprocess.run", side_effect=FileNotFoundError("pytest: not found"))
def test_command_not_found_returns_success_false_and_returncode_127(self, mock_run):
"""Command not found → success=False, returncode=127."""
from agents.runner import _run_project_tests
result = _run_project_tests("/tmp/proj", "pytest")
assert result["success"] is False
assert result["returncode"] == 127
# ---------------------------------------------------------------------------
# (4-7) _detect_test_command: role-based logic
# ---------------------------------------------------------------------------
class TestDetectTestCommandRoleLogic:
"""_detect_test_command must return pytest (not make test) for backend_dev
when pyproject.toml is present. This prevents vitest from running during
backend-only changes (the root cause of KIN-124)."""
def test_backend_dev_with_pyproject_toml_returns_pytest_not_make_test(self, tmp_path):
"""Regression KIN-124: backend_dev + pyproject.toml → pytest, not make test."""
from agents.runner import _detect_test_command
# Create both pyproject.toml and Makefile with test target
(tmp_path / "pyproject.toml").write_text("[tool.pytest.ini_options]\n")
makefile = tmp_path / "Makefile"
makefile.write_text("test:\n\tmake test\n")
cmd = _detect_test_command(str(tmp_path), role="backend_dev")
assert cmd is not None
assert "pytest" in cmd, (
f"Expected pytest command for backend_dev, got: {cmd!r}. "
"backend_dev must not run make test (which triggers vitest)."
)
assert "make" not in cmd, (
f"backend_dev must not use make test, got: {cmd!r}"
)
def test_backend_dev_with_only_pyproject_toml_returns_pytest(self, tmp_path):
"""backend_dev + only pyproject.toml (no Makefile) → pytest."""
from agents.runner import _detect_test_command
(tmp_path / "pyproject.toml").write_text("[build-system]\n")
cmd = _detect_test_command(str(tmp_path), role="backend_dev")
assert cmd is not None
assert "pytest" in cmd
def test_frontend_dev_with_makefile_returns_make_test(self, tmp_path):
"""frontend_dev + Makefile with test: target → make test (correct for frontend)."""
from agents.runner import _detect_test_command
(tmp_path / "Makefile").write_text("test:\n\tnpm test\n")
cmd = _detect_test_command(str(tmp_path), role="frontend_dev")
assert cmd == "make test", (
f"Expected 'make test' for frontend_dev with Makefile, got: {cmd!r}"
)
def test_frontend_dev_with_pyproject_toml_no_makefile_returns_pytest(self, tmp_path):
"""frontend_dev + pyproject.toml (no Makefile) → pytest (fallback)."""
from agents.runner import _detect_test_command
(tmp_path / "pyproject.toml").write_text("[tool.pytest]\n")
cmd = _detect_test_command(str(tmp_path), role="frontend_dev")
assert cmd is not None
assert "pytest" in cmd
def test_no_markers_returns_none(self, tmp_path):
"""Empty directory → None (no test framework detected)."""
from agents.runner import _detect_test_command
cmd = _detect_test_command(str(tmp_path))
assert cmd is None
# ---------------------------------------------------------------------------
# (10-11) Frontend vitest setup files
# ---------------------------------------------------------------------------
class TestVitestSetupFiles:
"""Verify the vitest setup changes that fix the KIN-124 root cause."""
def test_vite_config_has_setup_files(self):
"""vite.config.ts must declare setupFiles pointing to vitest-setup.ts."""
vite_config = Path(__file__).parent.parent / "web/frontend/vite.config.ts"
assert vite_config.exists(), "vite.config.ts not found"
content = vite_config.read_text()
assert "setupFiles" in content, (
"vite.config.ts must have setupFiles to load global vitest setup"
)
assert "vitest-setup" in content, (
"setupFiles must reference vitest-setup.ts"
)
def test_vitest_setup_file_exists(self):
"""web/frontend/src/__tests__/vitest-setup.ts must exist."""
setup_file = (
Path(__file__).parent.parent
/ "web/frontend/src/__tests__/vitest-setup.ts"
)
assert setup_file.exists(), (
"vitest-setup.ts not found — global i18n setup is missing, "
"vitest will fail on all useI18n() components"
)
def test_vitest_setup_registers_i18n_plugin(self):
"""vitest-setup.ts must register i18n as a global plugin."""
setup_file = (
Path(__file__).parent.parent
/ "web/frontend/src/__tests__/vitest-setup.ts"
)
assert setup_file.exists()
content = setup_file.read_text()
assert "i18n" in content, (
"vitest-setup.ts must register the i18n plugin"
)
assert "config.global.plugins" in content, (
"vitest-setup.ts must set config.global.plugins to inject i18n into all mounts"
)