kin: KIN-091 Улучшения из исследования рынка: (1) Revise button с feedback loop, (2) auto-test before review — агент сам прогоняет тесты и фиксит до review, (3) spec-driven workflow для новых проектов — constitution → spec → plan → tasks, (4) git worktrees для параллельных агентов без конфликтов, (5) auto-trigger pipeline при создании задачи с label auto

This commit is contained in:
Gros Frumos 2026-03-16 22:35:31 +02:00
parent 0cc063d47a
commit 0ccd451b4b
14 changed files with 1660 additions and 18 deletions

View file

@ -448,11 +448,12 @@ class TestAttachmentsInContext:
assert "mockup.jpg" in filenames
assert "/tmp/prj/.kin/attachments/PRJ-001/screenshot.png" in paths
def test_build_context_no_attachments_key_when_empty(self, conn):
"""KIN-090: ключ 'attachments' отсутствует в контексте, если вложений нет."""
def test_build_context_attachments_key_always_present(self, conn):
"""KIN-094 #213: ключ 'attachments' всегда присутствует в контексте (пустой список если нет вложений)."""
# conn fixture has no attachments
ctx = build_context(conn, "VDOL-001", "debugger", "vdol")
assert "attachments" not in ctx
assert "attachments" in ctx
assert ctx["attachments"] == []
def test_all_roles_get_attachments(self, conn_with_attachments):
"""KIN-090: AC2 — все роли (debugger, pm, tester, reviewer) получают вложения."""
@ -473,3 +474,193 @@ class TestAttachmentsInContext:
ctx = build_context(conn, "VDOL-001", "debugger", "vdol")
prompt = format_prompt(ctx, "debugger", "Debug this.")
assert "## Attachments" not in prompt
# ---------------------------------------------------------------------------
# KIN-094: Attachments — ctx["attachments"] always present + inline text content
# ---------------------------------------------------------------------------
class TestAttachmentsKIN094:
"""KIN-094: AC3 — PM и другие агенты всегда получают ключ attachments в контексте;
текстовые файлы <= 32 KB вставляются inline в промпт."""
@pytest.fixture
def conn_no_attachments(self):
c = init_db(":memory:")
models.create_project(c, "prj", "Prj", "/tmp/prj")
models.create_task(c, "PRJ-001", "prj", "Task")
yield c
c.close()
@pytest.fixture
def conn_text_attachment(self, tmp_path):
"""Проект с текстовым вложением <= 32 KB на диске."""
c = init_db(":memory:")
models.create_project(c, "prj", "Prj", str(tmp_path))
models.create_task(c, "PRJ-001", "prj", "Task")
txt_file = tmp_path / "spec.txt"
txt_file.write_text("Привет, это спека задачи", encoding="utf-8")
models.create_attachment(
c, "PRJ-001", "spec.txt", str(txt_file), "text/plain", txt_file.stat().st_size,
)
yield c
c.close()
@pytest.fixture
def conn_md_attachment(self, tmp_path):
"""Проект с .md вложением (text/markdown или определяется по расширению)."""
c = init_db(":memory:")
models.create_project(c, "prj", "Prj", str(tmp_path))
models.create_task(c, "PRJ-001", "prj", "Task")
md_file = tmp_path / "README.md"
md_file.write_text("# Title\n\nContent of readme", encoding="utf-8")
models.create_attachment(
c, "PRJ-001", "README.md", str(md_file), "text/markdown", md_file.stat().st_size,
)
yield c
c.close()
@pytest.fixture
def conn_json_attachment(self, tmp_path):
"""Проект с JSON-вложением (application/json)."""
c = init_db(":memory:")
models.create_project(c, "prj", "Prj", str(tmp_path))
models.create_task(c, "PRJ-001", "prj", "Task")
json_file = tmp_path / "config.json"
json_file.write_text('{"key": "value"}', encoding="utf-8")
models.create_attachment(
c, "PRJ-001", "config.json", str(json_file), "application/json", json_file.stat().st_size,
)
yield c
c.close()
@pytest.fixture
def conn_large_text_attachment(self, tmp_path):
"""Проект с текстовым вложением > 32 KB (не должно инлайниться)."""
c = init_db(":memory:")
models.create_project(c, "prj", "Prj", str(tmp_path))
models.create_task(c, "PRJ-001", "prj", "Task")
big_file = tmp_path / "big.txt"
big_file.write_text("x" * (32 * 1024 + 1), encoding="utf-8")
models.create_attachment(
c, "PRJ-001", "big.txt", str(big_file), "text/plain", big_file.stat().st_size,
)
yield c
c.close()
@pytest.fixture
def conn_image_attachment(self, tmp_path):
"""Проект с бинарным PNG-вложением (не должно инлайниться)."""
c = init_db(":memory:")
models.create_project(c, "prj", "Prj", str(tmp_path))
models.create_task(c, "PRJ-001", "prj", "Task")
png_file = tmp_path / "screen.png"
png_file.write_bytes(b"\x89PNG\r\n\x1a\n" + b"\x00" * 64)
models.create_attachment(
c, "PRJ-001", "screen.png", str(png_file), "image/png", png_file.stat().st_size,
)
yield c
c.close()
# ------------------------------------------------------------------
# ctx["attachments"] always present
# ------------------------------------------------------------------
def test_pm_context_attachments_empty_list_when_no_attachments(self, conn_no_attachments):
"""KIN-094: PM получает пустой список attachments, а не отсутствующий ключ."""
ctx = build_context(conn_no_attachments, "PRJ-001", "pm", "prj")
assert "attachments" in ctx
assert ctx["attachments"] == []
def test_all_roles_attachments_key_present_when_empty(self, conn_no_attachments):
"""KIN-094: все роли получают ключ attachments (пустой список) даже без вложений."""
for role in ("pm", "debugger", "tester", "reviewer", "backend_dev", "frontend_dev", "architect"):
ctx = build_context(conn_no_attachments, "PRJ-001", role, "prj")
assert "attachments" in ctx, f"Role '{role}' missing 'attachments' key"
assert isinstance(ctx["attachments"], list), f"Role '{role}': attachments is not a list"
# ------------------------------------------------------------------
# Inline content for small text files
# ------------------------------------------------------------------
def test_format_prompt_inlines_small_text_file_content(self, conn_text_attachment):
"""KIN-094: содержимое текстового файла <= 32 KB вставляется inline в промпт."""
ctx = build_context(conn_text_attachment, "PRJ-001", "pm", "prj")
prompt = format_prompt(ctx, "pm", "You are PM.")
assert "Привет, это спека задачи" in prompt
def test_format_prompt_inlines_text_file_in_code_block(self, conn_text_attachment):
"""KIN-094: inline-контент обёрнут в блок кода (``` ... ```)."""
ctx = build_context(conn_text_attachment, "PRJ-001", "pm", "prj")
prompt = format_prompt(ctx, "pm", "You are PM.")
assert "```" in prompt
def test_format_prompt_inlines_md_file_by_extension(self, conn_md_attachment):
"""KIN-094: .md файл определяется по расширению и вставляется inline."""
ctx = build_context(conn_md_attachment, "PRJ-001", "pm", "prj")
prompt = format_prompt(ctx, "pm", "You are PM.")
assert "# Title" in prompt
assert "Content of readme" in prompt
def test_format_prompt_inlines_json_file_by_mime(self, conn_json_attachment):
"""KIN-094: application/json файл вставляется inline по MIME-типу."""
ctx = build_context(conn_json_attachment, "PRJ-001", "pm", "prj")
prompt = format_prompt(ctx, "pm", "You are PM.")
assert '"key": "value"' in prompt
# ------------------------------------------------------------------
# NOT inlined: binary and large files
# ------------------------------------------------------------------
def test_format_prompt_does_not_inline_image_file(self, conn_image_attachment):
"""KIN-094: бинарный PNG файл НЕ вставляется inline."""
ctx = build_context(conn_image_attachment, "PRJ-001", "pm", "prj")
prompt = format_prompt(ctx, "pm", "You are PM.")
# File is listed in ## Attachments section but no ``` block with binary content
assert "screen.png" in prompt # listed
assert "image/png" in prompt
# Should not contain raw binary or ``` code block for the PNG
# We verify the file content (PNG header) is NOT inlined
assert "\x89PNG" not in prompt
def test_format_prompt_does_not_inline_large_text_file(self, conn_large_text_attachment):
"""KIN-094: текстовый файл > 32 KB НЕ вставляется inline."""
ctx = build_context(conn_large_text_attachment, "PRJ-001", "pm", "prj")
prompt = format_prompt(ctx, "pm", "You are PM.")
assert "big.txt" in prompt # listed
# Content should NOT be inlined (32KB+1 of 'x' chars)
assert "x" * 100 not in prompt
# ------------------------------------------------------------------
# Resilience: missing file on disk
# ------------------------------------------------------------------
def test_format_prompt_handles_missing_file_gracefully(self, tmp_path):
"""KIN-094: если файл отсутствует на диске, format_prompt не падает."""
c = init_db(":memory:")
models.create_project(c, "prj", "Prj", str(tmp_path))
models.create_task(c, "PRJ-001", "prj", "Task")
# Register attachment pointing to non-existent file
models.create_attachment(
c, "PRJ-001", "missing.txt",
str(tmp_path / "missing.txt"),
"text/plain", 100,
)
ctx = build_context(c, "PRJ-001", "pm", "prj")
# Should not raise — exception is caught silently
prompt = format_prompt(ctx, "pm", "You are PM.")
assert "missing.txt" in prompt # still listed
c.close()
# ------------------------------------------------------------------
# PM pipeline: attachments available in brief context
# ------------------------------------------------------------------
def test_pm_context_includes_attachment_paths_for_pipeline(self, conn_text_attachment):
"""KIN-094: PM-агент получает пути к вложениям в контексте для старта pipeline."""
ctx = build_context(conn_text_attachment, "PRJ-001", "pm", "prj")
assert len(ctx["attachments"]) == 1
att = ctx["attachments"][0]
assert att["filename"] == "spec.txt"
assert att["mime_type"] == "text/plain"
assert "path" in att

View file

@ -0,0 +1,551 @@
"""
Regression tests for KIN-091:
(1) Revise button feedback loop, revise_count, target_role, max limit
(2) Auto-test before review _run_project_tests, fix loop, block on exhaustion
(3) Spec-driven workflow route exists and has correct steps in specialists.yaml
(4) Git worktrees create/merge/cleanup/ensure_gitignore with mocked subprocess
(5) Auto-trigger pipeline task with label 'auto' triggers pipeline on creation
"""
import json
import subprocess
import pytest
from pathlib import Path
from unittest.mock import patch, MagicMock, call
import web.api as api_module
# ---------------------------------------------------------------------------
# Fixtures
# ---------------------------------------------------------------------------
@pytest.fixture
def client(tmp_path):
db_path = tmp_path / "test.db"
api_module.DB_PATH = db_path
from web.api import app
from fastapi.testclient import TestClient
c = TestClient(app)
c.post("/api/projects", json={"id": "p1", "name": "P1", "path": "/tmp/p1"})
c.post("/api/tasks", json={"project_id": "p1", "title": "Fix bug"})
return c
@pytest.fixture
def conn():
from core.db import init_db
from core import models
c = init_db(":memory:")
models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek",
tech_stack=["vue3"])
models.create_task(c, "VDOL-001", "vdol", "Fix bug",
brief={"route_type": "debug"})
yield c
c.close()
# ---------------------------------------------------------------------------
# (1) Revise button — revise_count, target_role, max limit
# ---------------------------------------------------------------------------
class TestReviseEndpoint:
def test_revise_increments_revise_count(self, client):
"""revise_count начинается с 0 и увеличивается на 1 при каждом вызове."""
r = client.post("/api/tasks/P1-001/revise", json={"comment": "ещё раз"})
assert r.status_code == 200
assert r.json()["revise_count"] == 1
r = client.post("/api/tasks/P1-001/revise", json={"comment": "и ещё"})
assert r.status_code == 200
assert r.json()["revise_count"] == 2
def test_revise_stores_target_role(self, client):
"""target_role сохраняется в задаче в БД."""
from core.db import init_db
from core import models
r = client.post("/api/tasks/P1-001/revise", json={
"comment": "доработай бэкенд",
"target_role": "backend_dev",
})
assert r.status_code == 200
conn = init_db(api_module.DB_PATH)
row = conn.execute(
"SELECT revise_target_role FROM tasks WHERE id = 'P1-001'"
).fetchone()
conn.close()
assert row["revise_target_role"] == "backend_dev"
def test_revise_target_role_builds_short_steps(self, client):
"""Если передан target_role, pipeline_steps = [target_role, reviewer]."""
r = client.post("/api/tasks/P1-001/revise", json={
"comment": "фикс",
"target_role": "frontend_dev",
})
assert r.status_code == 200
steps = r.json()["pipeline_steps"]
roles = [s["role"] for s in steps]
assert roles == ["frontend_dev", "reviewer"]
def test_revise_max_count_exceeded_returns_400(self, client):
"""После 5 ревизий следующий вызов возвращает 400."""
from core.db import init_db
from core import models
conn = init_db(api_module.DB_PATH)
models.update_task(conn, "P1-001", revise_count=5)
conn.close()
r = client.post("/api/tasks/P1-001/revise", json={"comment": "6-й"})
assert r.status_code == 400
assert "Max revisions" in r.json()["detail"]
def test_revise_sets_status_in_progress(self, client):
"""После /revise задача переходит в статус in_progress."""
r = client.post("/api/tasks/P1-001/revise", json={"comment": "исправь"})
assert r.status_code == 200
assert r.json()["status"] == "in_progress"
def test_revise_only_visible_for_review_done_tasks(self, client):
"""Задача со статусом 'review' возвращает 200, а не 404."""
from core.db import init_db
from core import models
conn = init_db(api_module.DB_PATH)
models.update_task(conn, "P1-001", status="review")
conn.close()
r = client.post("/api/tasks/P1-001/revise", json={"comment": "review→revise"})
assert r.status_code == 200
def test_revise_done_task_allowed(self, client):
"""Задача со статусом 'done' тоже может быть ревизована."""
from core.db import init_db
from core import models
conn = init_db(api_module.DB_PATH)
models.update_task(conn, "P1-001", status="done")
conn.close()
r = client.post("/api/tasks/P1-001/revise", json={"comment": "done→revise"})
assert r.status_code == 200
assert r.json()["status"] == "in_progress"
# ---------------------------------------------------------------------------
# (2) Auto-test before review — _run_project_tests, fix loop, block
# ---------------------------------------------------------------------------
class TestRunProjectTests:
def test_returns_success_when_make_exits_0(self):
"""_run_project_tests возвращает success=True при returncode=0."""
from agents.runner import _run_project_tests
mock_result = MagicMock()
mock_result.returncode = 0
mock_result.stdout = "All tests passed."
mock_result.stderr = ""
with patch("agents.runner.subprocess.run", return_value=mock_result):
result = _run_project_tests("/fake/path")
assert result["success"] is True
assert "All tests passed." in result["output"]
def test_returns_failure_when_make_exits_nonzero(self):
"""_run_project_tests возвращает success=False при returncode!=0."""
from agents.runner import _run_project_tests
mock_result = MagicMock()
mock_result.returncode = 2
mock_result.stdout = ""
mock_result.stderr = "FAILED 3 tests"
with patch("agents.runner.subprocess.run", return_value=mock_result):
result = _run_project_tests("/fake/path")
assert result["success"] is False
assert "FAILED" in result["output"]
def test_handles_make_not_found(self):
"""_run_project_tests возвращает success=False если make не найден."""
from agents.runner import _run_project_tests
with patch("agents.runner.subprocess.run", side_effect=FileNotFoundError):
result = _run_project_tests("/fake/path")
assert result["success"] is False
assert result["returncode"] == 127
def test_handles_timeout(self):
"""_run_project_tests возвращает success=False при таймауте."""
from agents.runner import _run_project_tests
with patch("agents.runner.subprocess.run",
side_effect=subprocess.TimeoutExpired(cmd="make", timeout=120)):
result = _run_project_tests("/fake/path", timeout=120)
assert result["success"] is False
assert result["returncode"] == 124
def _mock_success(output="done"):
m = MagicMock()
m.stdout = json.dumps({"result": output})
m.stderr = ""
m.returncode = 0
return m
def _mock_failure(msg="error"):
m = MagicMock()
m.stdout = ""
m.stderr = msg
m.returncode = 1
return m
class TestAutoTestInPipeline:
"""Pipeline с auto_test_enabled: тесты запускаются автоматически после dev-шага."""
@patch("agents.runner._run_autocommit")
@patch("agents.runner._run_project_tests")
@patch("agents.runner.subprocess.run")
def test_auto_test_passes_pipeline_continues(
self, mock_run, mock_tests, mock_autocommit, conn
):
"""Если авто-тест проходит — pipeline завершается успешно."""
from agents.runner import run_pipeline
from core import models
mock_run.return_value = _mock_success()
mock_tests.return_value = {"success": True, "output": "OK", "returncode": 0}
models.update_project(conn, "vdol", auto_test_enabled=True)
steps = [{"role": "backend_dev", "brief": "implement"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
mock_tests.assert_called_once()
@patch("agents.runner._run_autocommit")
@patch("agents.runner._run_project_tests")
@patch("agents.runner.subprocess.run")
def test_auto_test_disabled_not_called(
self, mock_run, mock_tests, mock_autocommit, conn
):
"""Если auto_test_enabled=False — make test не вызывается."""
from agents.runner import run_pipeline
from core import models
mock_run.return_value = _mock_success()
# auto_test_enabled по умолчанию 0
steps = [{"role": "backend_dev", "brief": "implement"}]
run_pipeline(conn, "VDOL-001", steps)
mock_tests.assert_not_called()
@patch("agents.runner._run_autocommit")
@patch("agents.runner._run_project_tests")
@patch("agents.runner.subprocess.run")
def test_auto_test_fail_triggers_fix_loop(
self, mock_run, mock_tests, mock_autocommit, conn
):
"""Если авто-тест падает — запускается fixer агент и тесты перезапускаются."""
from agents.runner import run_pipeline
from core import models
import os
mock_run.return_value = _mock_success()
# First test call fails, second passes
mock_tests.side_effect = [
{"success": False, "output": "FAILED: test_foo", "returncode": 1},
{"success": True, "output": "OK", "returncode": 0},
]
models.update_project(conn, "vdol", auto_test_enabled=True)
with patch.dict(os.environ, {"KIN_AUTO_TEST_MAX_ATTEMPTS": "3"}):
steps = [{"role": "backend_dev", "brief": "implement"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is True
# _run_project_tests called twice: initial check + after fix
assert mock_tests.call_count == 2
# subprocess.run called at least twice: backend_dev + fixer backend_dev
assert mock_run.call_count >= 2
@patch("agents.runner._run_autocommit")
@patch("agents.runner._run_project_tests")
@patch("agents.runner.subprocess.run")
def test_auto_test_exhausted_blocks_task(
self, mock_run, mock_tests, mock_autocommit, conn
):
"""Если авто-тест падает max_attempts раз — задача блокируется."""
from agents.runner import run_pipeline
from core import models
import os
mock_run.return_value = _mock_success()
# Тест всегда падает
mock_tests.return_value = {"success": False, "output": "FAILED", "returncode": 1}
models.update_project(conn, "vdol", auto_test_enabled=True)
with patch.dict(os.environ, {"KIN_AUTO_TEST_MAX_ATTEMPTS": "2"}):
steps = [{"role": "backend_dev", "brief": "implement"}]
result = run_pipeline(conn, "VDOL-001", steps)
assert result["success"] is False
task = models.get_task(conn, "VDOL-001")
assert task["status"] == "blocked"
assert "Auto-test" in (task.get("blocked_reason") or "")
@patch("agents.runner._run_autocommit")
@patch("agents.runner._run_project_tests")
@patch("agents.runner.subprocess.run")
def test_auto_test_not_triggered_for_non_dev_roles(
self, mock_run, mock_tests, mock_autocommit, conn
):
"""auto_test запускается только для backend_dev/frontend_dev, не для debugger."""
from agents.runner import run_pipeline
from core import models
mock_run.return_value = _mock_success()
models.update_project(conn, "vdol", auto_test_enabled=True)
steps = [{"role": "debugger", "brief": "find"}]
run_pipeline(conn, "VDOL-001", steps)
mock_tests.assert_not_called()
# ---------------------------------------------------------------------------
# (3) Spec-driven workflow route
# ---------------------------------------------------------------------------
class TestSpecDrivenRoute:
def _load_specialists(self):
import yaml
spec_path = Path(__file__).parent.parent / "agents" / "specialists.yaml"
with open(spec_path) as f:
return yaml.safe_load(f)
def test_spec_driven_route_exists(self):
"""Маршрут spec_driven должен быть объявлен в specialists.yaml."""
data = self._load_specialists()
assert "spec_driven" in data.get("routes", {})
def test_spec_driven_route_steps_order(self):
"""spec_driven route: шаги [constitution, spec, architect, task_decomposer]."""
data = self._load_specialists()
steps = data["routes"]["spec_driven"]["steps"]
assert steps == ["constitution", "spec", "architect", "task_decomposer"]
def test_spec_driven_all_roles_exist(self):
"""Все роли в spec_driven route должны быть объявлены в specialists."""
data = self._load_specialists()
specialists = data.get("specialists", {})
for role in data["routes"]["spec_driven"]["steps"]:
assert role in specialists, f"Role '{role}' missing from specialists"
def test_constitution_role_has_output_schema(self):
"""constitution должен иметь output_schema (principles, constraints, goals)."""
data = self._load_specialists()
schema = data["specialists"]["constitution"].get("output_schema", {})
assert "principles" in schema
assert "constraints" in schema
assert "goals" in schema
def test_spec_role_has_output_schema(self):
"""spec должен иметь output_schema (overview, features, api_contracts)."""
data = self._load_specialists()
schema = data["specialists"]["spec"].get("output_schema", {})
assert "overview" in schema
assert "features" in schema
assert "api_contracts" in schema
# ---------------------------------------------------------------------------
# (4) Git worktrees — create / merge / cleanup / ensure_gitignore
# ---------------------------------------------------------------------------
class TestCreateWorktree:
def test_create_worktree_success(self, tmp_path):
"""create_worktree возвращает путь при успешном git worktree add."""
from core.worktree import create_worktree
mock_r = MagicMock()
mock_r.returncode = 0
mock_r.stderr = ""
with patch("core.worktree.subprocess.run", return_value=mock_r):
path = create_worktree(str(tmp_path), "TASK-001", "backend_dev")
assert path is not None
assert "TASK-001-backend_dev" in path
def test_create_worktree_git_failure_returns_none(self, tmp_path):
"""create_worktree возвращает None если git worktree add провалился."""
from core.worktree import create_worktree
mock_r = MagicMock()
mock_r.returncode = 128
mock_r.stderr = "fatal: branch already exists"
with patch("core.worktree.subprocess.run", return_value=mock_r):
path = create_worktree(str(tmp_path), "TASK-001", "backend_dev")
assert path is None
def test_create_worktree_exception_returns_none(self, tmp_path):
"""create_worktree возвращает None при неожиданном исключении (не поднимает)."""
from core.worktree import create_worktree
with patch("core.worktree.subprocess.run", side_effect=OSError("no git")):
path = create_worktree(str(tmp_path), "TASK-001", "backend_dev")
assert path is None
def test_create_worktree_branch_name_sanitized(self, tmp_path):
"""Слэши и пробелы в имени шага заменяются на _."""
from core.worktree import create_worktree
mock_r = MagicMock()
mock_r.returncode = 0
mock_r.stderr = ""
calls_made = []
def capture(*args, **kwargs):
calls_made.append(args[0])
return mock_r
with patch("core.worktree.subprocess.run", side_effect=capture):
create_worktree(str(tmp_path), "TASK-001", "step/with spaces")
assert calls_made
cmd = calls_made[0]
branch = cmd[cmd.index("-b") + 1]
assert "/" not in branch
assert " " not in branch
class TestMergeWorktree:
def test_merge_success_returns_merged_files(self, tmp_path):
"""merge_worktree возвращает success=True и список файлов при успешном merge."""
from core.worktree import merge_worktree
worktree = str(tmp_path / "TASK-001-backend_dev")
merge_ok = MagicMock(returncode=0, stdout="", stderr="")
diff_ok = MagicMock(returncode=0, stdout="src/api.py\nsrc/models.py\n", stderr="")
with patch("core.worktree.subprocess.run", side_effect=[merge_ok, diff_ok]):
result = merge_worktree(worktree, str(tmp_path))
assert result["success"] is True
assert "src/api.py" in result["merged_files"]
assert result["conflicts"] == []
def test_merge_conflict_returns_conflict_list(self, tmp_path):
"""merge_worktree возвращает success=False и список конфликтных файлов."""
from core.worktree import merge_worktree
worktree = str(tmp_path / "TASK-001-backend_dev")
merge_fail = MagicMock(returncode=1, stdout="", stderr="CONFLICT")
conflict_files = MagicMock(returncode=0, stdout="src/models.py\n", stderr="")
abort = MagicMock(returncode=0)
with patch("core.worktree.subprocess.run",
side_effect=[merge_fail, conflict_files, abort]):
result = merge_worktree(worktree, str(tmp_path))
assert result["success"] is False
assert "src/models.py" in result["conflicts"]
def test_merge_exception_returns_success_false(self, tmp_path):
"""merge_worktree никогда не поднимает исключение."""
from core.worktree import merge_worktree
with patch("core.worktree.subprocess.run", side_effect=OSError("git died")):
result = merge_worktree("/fake/wt", str(tmp_path))
assert result["success"] is False
assert "error" in result
class TestCleanupWorktree:
def test_cleanup_calls_worktree_remove_and_branch_delete(self, tmp_path):
"""cleanup_worktree вызывает git worktree remove и git branch -D."""
from core.worktree import cleanup_worktree
calls = []
def capture(*args, **kwargs):
calls.append(args[0])
return MagicMock(returncode=0)
with patch("core.worktree.subprocess.run", side_effect=capture):
cleanup_worktree("/fake/path/TASK-branch", str(tmp_path))
assert len(calls) == 2
# первый: worktree remove
assert "worktree" in calls[0]
assert "remove" in calls[0]
# второй: branch -D
assert "branch" in calls[1]
assert "-D" in calls[1]
def test_cleanup_never_raises(self, tmp_path):
"""cleanup_worktree не поднимает исключение при ошибке."""
from core.worktree import cleanup_worktree
with patch("core.worktree.subprocess.run", side_effect=OSError("crashed")):
cleanup_worktree("/fake/wt", str(tmp_path)) # должно пройти тихо
class TestEnsureGitignore:
def test_adds_entry_to_existing_gitignore(self, tmp_path):
"""ensure_gitignore добавляет .kin_worktrees/ в существующий .gitignore."""
from core.worktree import ensure_gitignore
gi = tmp_path / ".gitignore"
gi.write_text("*.pyc\n__pycache__/\n")
ensure_gitignore(str(tmp_path))
assert ".kin_worktrees/" in gi.read_text()
def test_creates_gitignore_if_missing(self, tmp_path):
"""ensure_gitignore создаёт .gitignore если его нет."""
from core.worktree import ensure_gitignore
ensure_gitignore(str(tmp_path))
gi = tmp_path / ".gitignore"
assert gi.exists()
assert ".kin_worktrees/" in gi.read_text()
def test_skips_if_entry_already_present(self, tmp_path):
"""ensure_gitignore не дублирует запись."""
from core.worktree import ensure_gitignore
gi = tmp_path / ".gitignore"
gi.write_text(".kin_worktrees/\n")
ensure_gitignore(str(tmp_path))
content = gi.read_text()
assert content.count(".kin_worktrees/") == 1
def test_never_raises_on_permission_error(self, tmp_path):
"""ensure_gitignore не поднимает исключение при ошибке записи."""
from core.worktree import ensure_gitignore
with patch("core.worktree.Path.open", side_effect=PermissionError):
ensure_gitignore(str(tmp_path)) # должно пройти тихо
# ---------------------------------------------------------------------------
# (5) Auto-trigger pipeline — label 'auto'
# ---------------------------------------------------------------------------
class TestAutoTrigger:
def test_task_with_auto_label_triggers_pipeline(self, client):
"""Создание задачи с label 'auto' запускает pipeline в фоне."""
with patch("web.api._launch_pipeline_subprocess") as mock_launch:
r = client.post("/api/tasks", json={
"project_id": "p1",
"title": "Auto task",
"labels": ["auto"],
})
assert r.status_code == 200
mock_launch.assert_called_once()
called_task_id = mock_launch.call_args[0][0]
assert called_task_id.startswith("P1-")
def test_task_without_auto_label_does_not_trigger(self, client):
"""Создание задачи без label 'auto' НЕ запускает pipeline."""
with patch("web.api._launch_pipeline_subprocess") as mock_launch:
r = client.post("/api/tasks", json={
"project_id": "p1",
"title": "Manual task",
"labels": ["feature"],
})
assert r.status_code == 200
mock_launch.assert_not_called()
def test_task_without_labels_does_not_trigger(self, client):
"""Создание задачи без labels вообще НЕ запускает pipeline."""
with patch("web.api._launch_pipeline_subprocess") as mock_launch:
r = client.post("/api/tasks", json={
"project_id": "p1",
"title": "Plain task",
})
assert r.status_code == 200
mock_launch.assert_not_called()
def test_task_with_auto_among_multiple_labels_triggers(self, client):
"""Задача с несколькими метками включая 'auto' запускает pipeline."""
with patch("web.api._launch_pipeline_subprocess") as mock_launch:
r = client.post("/api/tasks", json={
"project_id": "p1",
"title": "Multi-label auto task",
"labels": ["feature", "auto", "backend"],
})
assert r.status_code == 200
mock_launch.assert_called_once()