2026-03-15 15:16:48 +02:00
|
|
|
|
"""Tests for core/followup.py — follow-up task generation with permission handling."""
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
|
|
|
|
|
import json
|
|
|
|
|
|
import pytest
|
|
|
|
|
|
from unittest.mock import patch, MagicMock
|
|
|
|
|
|
|
|
|
|
|
|
from core.db import init_db
|
|
|
|
|
|
from core import models
|
2026-03-15 15:16:48 +02:00
|
|
|
|
from core.followup import (
|
2026-03-15 19:49:34 +02:00
|
|
|
|
generate_followups, resolve_pending_action, auto_resolve_pending_actions,
|
2026-03-15 15:16:48 +02:00
|
|
|
|
_collect_pipeline_output, _next_task_id, _is_permission_blocked,
|
|
|
|
|
|
)
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
|
|
def conn():
|
|
|
|
|
|
c = init_db(":memory:")
|
|
|
|
|
|
models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek",
|
|
|
|
|
|
tech_stack=["vue3"], language="ru")
|
|
|
|
|
|
models.create_task(c, "VDOL-001", "vdol", "Security audit",
|
|
|
|
|
|
status="done", brief={"route_type": "security_audit"})
|
|
|
|
|
|
models.log_agent_run(c, "vdol", "security", "execute",
|
|
|
|
|
|
task_id="VDOL-001",
|
|
|
|
|
|
output_summary=json.dumps({
|
|
|
|
|
|
"summary": "8 уязвимостей найдено",
|
|
|
|
|
|
"findings": [
|
|
|
|
|
|
{"severity": "HIGH", "title": "Admin endpoint без auth",
|
|
|
|
|
|
"file": "index.js", "line": 42},
|
|
|
|
|
|
{"severity": "MEDIUM", "title": "Нет rate limiting на login",
|
|
|
|
|
|
"file": "auth.js", "line": 15},
|
|
|
|
|
|
],
|
|
|
|
|
|
}, ensure_ascii=False),
|
|
|
|
|
|
success=True)
|
|
|
|
|
|
yield c
|
|
|
|
|
|
c.close()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestCollectPipelineOutput:
|
|
|
|
|
|
def test_collects_all_steps(self, conn):
|
|
|
|
|
|
output = _collect_pipeline_output(conn, "VDOL-001")
|
|
|
|
|
|
assert "security" in output
|
|
|
|
|
|
assert "Admin endpoint" in output
|
|
|
|
|
|
|
|
|
|
|
|
def test_empty_for_no_logs(self, conn):
|
|
|
|
|
|
assert _collect_pipeline_output(conn, "NONEXISTENT") == ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestNextTaskId:
|
|
|
|
|
|
def test_increments(self, conn):
|
|
|
|
|
|
assert _next_task_id(conn, "vdol") == "VDOL-002"
|
|
|
|
|
|
|
|
|
|
|
|
def test_handles_obs_ids(self, conn):
|
|
|
|
|
|
models.create_task(conn, "VDOL-OBS-001", "vdol", "Obsidian task")
|
|
|
|
|
|
assert _next_task_id(conn, "vdol") == "VDOL-002"
|
|
|
|
|
|
|
|
|
|
|
|
|
2026-03-15 15:16:48 +02:00
|
|
|
|
class TestIsPermissionBlocked:
|
|
|
|
|
|
def test_detects_permission_denied(self):
|
|
|
|
|
|
assert _is_permission_blocked({"title": "Fix X", "brief": "permission denied on write"})
|
|
|
|
|
|
|
|
|
|
|
|
def test_detects_manual_application_ru(self):
|
|
|
|
|
|
assert _is_permission_blocked({"title": "Ручное применение фикса для auth.js"})
|
|
|
|
|
|
|
|
|
|
|
|
def test_detects_no_write_permission_ru(self):
|
|
|
|
|
|
assert _is_permission_blocked({"title": "X", "brief": "не получили разрешение на запись"})
|
|
|
|
|
|
|
|
|
|
|
|
def test_detects_read_only(self):
|
|
|
|
|
|
assert _is_permission_blocked({"title": "Apply manually", "brief": "file is read-only"})
|
|
|
|
|
|
|
|
|
|
|
|
def test_normal_item_not_blocked(self):
|
|
|
|
|
|
assert not _is_permission_blocked({"title": "Fix admin auth", "brief": "Add requireAuth"})
|
|
|
|
|
|
|
|
|
|
|
|
def test_empty_item(self):
|
|
|
|
|
|
assert not _is_permission_blocked({})
|
|
|
|
|
|
|
|
|
|
|
|
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
class TestGenerateFollowups:
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_creates_followup_tasks(self, mock_claude, conn):
|
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
|
"output": json.dumps([
|
|
|
|
|
|
{"title": "Fix admin auth", "type": "hotfix", "priority": 2,
|
|
|
|
|
|
"brief": "Add requireAuth to admin endpoints"},
|
|
|
|
|
|
{"title": "Add rate limiting", "type": "feature", "priority": 4,
|
|
|
|
|
|
"brief": "Rate limit login to 5/15min"},
|
|
|
|
|
|
]),
|
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
2026-03-15 15:16:48 +02:00
|
|
|
|
result = generate_followups(conn, "VDOL-001")
|
|
|
|
|
|
|
|
|
|
|
|
assert len(result["created"]) == 2
|
|
|
|
|
|
assert len(result["pending_actions"]) == 0
|
|
|
|
|
|
assert result["created"][0]["id"] == "VDOL-002"
|
|
|
|
|
|
assert result["created"][0]["parent_task_id"] == "VDOL-001"
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
2026-03-15 15:16:48 +02:00
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_separates_permission_items(self, mock_claude, conn):
|
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
|
"output": json.dumps([
|
|
|
|
|
|
{"title": "Fix admin auth", "type": "hotfix", "priority": 2,
|
|
|
|
|
|
"brief": "Add requireAuth"},
|
|
|
|
|
|
{"title": "Ручное применение .dockerignore",
|
|
|
|
|
|
"type": "hotfix", "priority": 3,
|
|
|
|
|
|
"brief": "Не получили разрешение на запись в файл"},
|
|
|
|
|
|
{"title": "Apply CSP headers manually",
|
|
|
|
|
|
"type": "feature", "priority": 4,
|
|
|
|
|
|
"brief": "Permission denied writing nginx.conf"},
|
|
|
|
|
|
]),
|
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
|
}
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
2026-03-15 15:16:48 +02:00
|
|
|
|
result = generate_followups(conn, "VDOL-001")
|
|
|
|
|
|
|
|
|
|
|
|
assert len(result["created"]) == 1 # Only "Fix admin auth"
|
|
|
|
|
|
assert result["created"][0]["title"] == "Fix admin auth"
|
|
|
|
|
|
assert len(result["pending_actions"]) == 2
|
|
|
|
|
|
assert result["pending_actions"][0]["type"] == "permission_fix"
|
|
|
|
|
|
assert "options" in result["pending_actions"][0]
|
|
|
|
|
|
assert "rerun" in result["pending_actions"][0]["options"]
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_handles_empty_response(self, mock_claude, conn):
|
|
|
|
|
|
mock_claude.return_value = {"output": "[]", "returncode": 0}
|
2026-03-15 15:16:48 +02:00
|
|
|
|
result = generate_followups(conn, "VDOL-001")
|
|
|
|
|
|
assert result["created"] == []
|
|
|
|
|
|
assert result["pending_actions"] == []
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_handles_wrapped_response(self, mock_claude, conn):
|
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
|
"output": json.dumps({"tasks": [
|
|
|
|
|
|
{"title": "Fix X", "priority": 3},
|
|
|
|
|
|
]}),
|
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
|
}
|
2026-03-15 15:16:48 +02:00
|
|
|
|
result = generate_followups(conn, "VDOL-001")
|
|
|
|
|
|
assert len(result["created"]) == 1
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_handles_invalid_json(self, mock_claude, conn):
|
|
|
|
|
|
mock_claude.return_value = {"output": "not json", "returncode": 0}
|
2026-03-15 15:16:48 +02:00
|
|
|
|
result = generate_followups(conn, "VDOL-001")
|
|
|
|
|
|
assert result["created"] == []
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
|
|
|
|
|
def test_no_logs_returns_empty(self, conn):
|
|
|
|
|
|
models.create_task(conn, "VDOL-999", "vdol", "Empty task")
|
2026-03-15 15:16:48 +02:00
|
|
|
|
result = generate_followups(conn, "VDOL-999")
|
|
|
|
|
|
assert result["created"] == []
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
|
|
|
|
|
def test_nonexistent_task(self, conn):
|
2026-03-15 15:16:48 +02:00
|
|
|
|
result = generate_followups(conn, "NOPE")
|
|
|
|
|
|
assert result["created"] == []
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
|
|
|
|
|
def test_dry_run(self, conn):
|
|
|
|
|
|
result = generate_followups(conn, "VDOL-001", dry_run=True)
|
2026-03-15 15:16:48 +02:00
|
|
|
|
assert len(result["created"]) == 1
|
|
|
|
|
|
assert result["created"][0]["_dry_run"] is True
|
Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-15 15:02:58 +02:00
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_logs_generation(self, mock_claude, conn):
|
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
|
"output": json.dumps([{"title": "Fix A", "priority": 2}]),
|
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
|
}
|
|
|
|
|
|
generate_followups(conn, "VDOL-001")
|
|
|
|
|
|
|
|
|
|
|
|
logs = conn.execute(
|
|
|
|
|
|
"SELECT * FROM agent_logs WHERE agent_role='followup_pm'"
|
|
|
|
|
|
).fetchall()
|
|
|
|
|
|
assert len(logs) == 1
|
|
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_prompt_includes_language(self, mock_claude, conn):
|
|
|
|
|
|
mock_claude.return_value = {"output": "[]", "returncode": 0}
|
|
|
|
|
|
generate_followups(conn, "VDOL-001")
|
|
|
|
|
|
prompt = mock_claude.call_args[0][0]
|
|
|
|
|
|
assert "Russian" in prompt
|
2026-03-15 15:16:48 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestResolvePendingAction:
|
|
|
|
|
|
def test_skip_returns_none(self, conn):
|
|
|
|
|
|
action = {"type": "permission_fix", "original_item": {"title": "X"}}
|
|
|
|
|
|
assert resolve_pending_action(conn, "VDOL-001", action, "skip") is None
|
|
|
|
|
|
|
|
|
|
|
|
def test_manual_task_creates_task(self, conn):
|
|
|
|
|
|
action = {
|
|
|
|
|
|
"type": "permission_fix",
|
|
|
|
|
|
"original_item": {"title": "Fix .dockerignore", "type": "hotfix",
|
|
|
|
|
|
"priority": 3, "brief": "Create .dockerignore"},
|
|
|
|
|
|
}
|
|
|
|
|
|
result = resolve_pending_action(conn, "VDOL-001", action, "manual_task")
|
|
|
|
|
|
assert result is not None
|
|
|
|
|
|
assert result["title"] == "Fix .dockerignore"
|
|
|
|
|
|
assert result["parent_task_id"] == "VDOL-001"
|
|
|
|
|
|
assert result["priority"] == 3
|
|
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_rerun_launches_pipeline(self, mock_claude, conn):
|
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
|
"output": json.dumps({"result": "applied fix"}),
|
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
|
}
|
|
|
|
|
|
action = {
|
|
|
|
|
|
"type": "permission_fix",
|
|
|
|
|
|
"original_item": {"title": "Fix X", "type": "frontend_dev",
|
|
|
|
|
|
"brief": "Apply the fix"},
|
|
|
|
|
|
}
|
|
|
|
|
|
result = resolve_pending_action(conn, "VDOL-001", action, "rerun")
|
|
|
|
|
|
assert "rerun_result" in result
|
|
|
|
|
|
|
|
|
|
|
|
# Verify --dangerously-skip-permissions was passed
|
|
|
|
|
|
call_args = mock_claude.call_args
|
|
|
|
|
|
cmd = call_args[0][0] if call_args[0] else None
|
|
|
|
|
|
# _run_claude is called with allow_write=True which adds the flag
|
|
|
|
|
|
# Check via the cmd list in subprocess.run mock... but _run_claude
|
|
|
|
|
|
# is mocked at a higher level. Let's check the allow_write param.
|
|
|
|
|
|
# The pipeline calls run_agent with allow_write=True which calls
|
|
|
|
|
|
# _run_claude with allow_write=True
|
|
|
|
|
|
assert result["rerun_result"]["success"] is True
|
|
|
|
|
|
|
2026-03-16 07:13:32 +02:00
|
|
|
|
def test_manual_task_brief_has_task_type_manual_escalation(self, conn):
|
|
|
|
|
|
"""brief["task_type"] должен быть 'manual_escalation' — KIN-020."""
|
|
|
|
|
|
action = {
|
|
|
|
|
|
"type": "permission_fix",
|
|
|
|
|
|
"original_item": {"title": "Fix .dockerignore", "type": "hotfix",
|
|
|
|
|
|
"priority": 3, "brief": "Create .dockerignore"},
|
|
|
|
|
|
}
|
|
|
|
|
|
result = resolve_pending_action(conn, "VDOL-001", action, "manual_task")
|
|
|
|
|
|
assert result is not None
|
|
|
|
|
|
assert result["brief"]["task_type"] == "manual_escalation"
|
|
|
|
|
|
|
|
|
|
|
|
def test_manual_task_brief_includes_source(self, conn):
|
|
|
|
|
|
"""brief["source"] должен содержать ссылку на родительскую задачу — KIN-020."""
|
|
|
|
|
|
action = {
|
|
|
|
|
|
"type": "permission_fix",
|
|
|
|
|
|
"original_item": {"title": "Fix X"},
|
|
|
|
|
|
}
|
|
|
|
|
|
result = resolve_pending_action(conn, "VDOL-001", action, "manual_task")
|
|
|
|
|
|
assert result["brief"]["source"] == "followup:VDOL-001"
|
|
|
|
|
|
|
|
|
|
|
|
def test_manual_task_brief_includes_description(self, conn):
|
|
|
|
|
|
"""brief["description"] копируется из original_item.brief — KIN-020."""
|
|
|
|
|
|
action = {
|
|
|
|
|
|
"type": "permission_fix",
|
|
|
|
|
|
"original_item": {"title": "Fix Y", "brief": "Detailed context here"},
|
|
|
|
|
|
}
|
|
|
|
|
|
result = resolve_pending_action(conn, "VDOL-001", action, "manual_task")
|
|
|
|
|
|
assert result["brief"]["description"] == "Detailed context here"
|
|
|
|
|
|
|
2026-03-15 15:16:48 +02:00
|
|
|
|
def test_nonexistent_task(self, conn):
|
|
|
|
|
|
action = {"type": "permission_fix", "original_item": {}}
|
|
|
|
|
|
assert resolve_pending_action(conn, "NOPE", action, "skip") is None
|
2026-03-15 19:49:34 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestAutoResolvePendingActions:
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_rerun_success_resolves_as_rerun(self, mock_claude, conn):
|
|
|
|
|
|
"""Успешный rerun должен резолвиться как 'rerun'."""
|
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
|
"output": json.dumps({"result": "fixed"}),
|
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
|
}
|
|
|
|
|
|
action = {
|
|
|
|
|
|
"type": "permission_fix",
|
|
|
|
|
|
"description": "Fix X",
|
|
|
|
|
|
"original_item": {"title": "Fix X", "type": "frontend_dev", "brief": "Apply fix"},
|
|
|
|
|
|
"options": ["rerun", "manual_task", "skip"],
|
|
|
|
|
|
}
|
|
|
|
|
|
results = auto_resolve_pending_actions(conn, "VDOL-001", [action])
|
|
|
|
|
|
|
|
|
|
|
|
assert len(results) == 1
|
|
|
|
|
|
assert results[0]["resolved"] == "rerun"
|
|
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_rerun_failure_escalates_to_manual_task(self, mock_claude, conn):
|
|
|
|
|
|
"""Провал rerun должен создавать manual_task для эскалации."""
|
|
|
|
|
|
mock_claude.return_value = {"output": "", "returncode": 1}
|
|
|
|
|
|
action = {
|
|
|
|
|
|
"type": "permission_fix",
|
|
|
|
|
|
"description": "Fix X",
|
|
|
|
|
|
"original_item": {"title": "Fix X", "type": "frontend_dev", "brief": "Apply fix"},
|
|
|
|
|
|
"options": ["rerun", "manual_task", "skip"],
|
|
|
|
|
|
}
|
|
|
|
|
|
results = auto_resolve_pending_actions(conn, "VDOL-001", [action])
|
|
|
|
|
|
|
|
|
|
|
|
assert len(results) == 1
|
|
|
|
|
|
assert results[0]["resolved"] == "manual_task"
|
|
|
|
|
|
# Manual task должна быть создана в DB
|
|
|
|
|
|
tasks = models.list_tasks(conn, project_id="vdol")
|
|
|
|
|
|
assert len(tasks) == 2 # VDOL-001 + новая manual task
|
|
|
|
|
|
|
2026-03-16 07:13:32 +02:00
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_escalated_manual_task_has_task_type_manual_escalation(self, mock_claude, conn):
|
|
|
|
|
|
"""При эскалации после провала rerun созданная задача имеет task_type='manual_escalation' — KIN-020."""
|
|
|
|
|
|
mock_claude.return_value = {"output": "", "returncode": 1}
|
|
|
|
|
|
action = {
|
|
|
|
|
|
"type": "permission_fix",
|
|
|
|
|
|
"description": "Fix X",
|
|
|
|
|
|
"original_item": {"title": "Fix X", "type": "frontend_dev", "brief": "Apply fix"},
|
|
|
|
|
|
"options": ["rerun", "manual_task", "skip"],
|
|
|
|
|
|
}
|
|
|
|
|
|
results = auto_resolve_pending_actions(conn, "VDOL-001", [action])
|
|
|
|
|
|
|
|
|
|
|
|
assert results[0]["resolved"] == "manual_task"
|
|
|
|
|
|
created_task = results[0]["result"]
|
|
|
|
|
|
assert created_task["brief"]["task_type"] == "manual_escalation"
|
|
|
|
|
|
|
2026-03-15 19:49:34 +02:00
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_empty_pending_actions(self, mock_claude, conn):
|
|
|
|
|
|
"""Пустой список — пустой результат."""
|
|
|
|
|
|
results = auto_resolve_pending_actions(conn, "VDOL-001", [])
|
|
|
|
|
|
assert results == []
|
|
|
|
|
|
mock_claude.assert_not_called()
|
2026-03-16 08:41:24 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
# KIN-068 — category наследуется при создании followup и manual задач
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
class TestNextTaskIdWithCategory:
|
|
|
|
|
|
"""_next_task_id с category генерирует ID в формате PROJ-CAT-NNN."""
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("category,expected_prefix", [
|
|
|
|
|
|
("SEC", "VDOL-SEC-"),
|
|
|
|
|
|
("UI", "VDOL-UI-"),
|
|
|
|
|
|
("API", "VDOL-API-"),
|
|
|
|
|
|
("INFRA", "VDOL-INFRA-"),
|
|
|
|
|
|
("BIZ", "VDOL-BIZ-"),
|
|
|
|
|
|
])
|
|
|
|
|
|
def test_with_category_produces_cat_format(self, conn, category, expected_prefix):
|
|
|
|
|
|
"""_next_task_id с category возвращает PROJ-CAT-NNN."""
|
|
|
|
|
|
result = _next_task_id(conn, "vdol", category=category)
|
|
|
|
|
|
assert result.startswith(expected_prefix)
|
|
|
|
|
|
suffix = result[len(expected_prefix):]
|
|
|
|
|
|
assert suffix.isdigit() and len(suffix) == 3
|
|
|
|
|
|
|
|
|
|
|
|
def test_with_none_category_produces_plain_format(self, conn):
|
|
|
|
|
|
"""_next_task_id без category возвращает PROJ-NNN (backward compat)."""
|
|
|
|
|
|
result = _next_task_id(conn, "vdol", category=None)
|
|
|
|
|
|
# VDOL-001 already exists → next is VDOL-002
|
|
|
|
|
|
assert result == "VDOL-002"
|
|
|
|
|
|
parts = result.split("-")
|
|
|
|
|
|
assert len(parts) == 2
|
|
|
|
|
|
assert parts[1].isdigit()
|
|
|
|
|
|
|
|
|
|
|
|
def test_first_cat_task_is_001(self, conn):
|
|
|
|
|
|
"""Первая задача категории всегда получает номер 001."""
|
|
|
|
|
|
result = _next_task_id(conn, "vdol", category="DB")
|
|
|
|
|
|
assert result == "VDOL-DB-001"
|
|
|
|
|
|
|
|
|
|
|
|
def test_cat_counter_is_per_category(self, conn):
|
|
|
|
|
|
"""Счётчик независим для каждой категории."""
|
|
|
|
|
|
models.create_task(conn, "VDOL-SEC-001", "vdol", "Security task", category="SEC")
|
|
|
|
|
|
assert _next_task_id(conn, "vdol", category="SEC") == "VDOL-SEC-002"
|
|
|
|
|
|
assert _next_task_id(conn, "vdol", category="UI") == "VDOL-UI-001"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class TestFollowupCategoryInheritance:
|
|
|
|
|
|
"""Регрессионный тест KIN-068: followup задачи наследуют category родителя."""
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("category", ["SEC", "UI", "API", "INFRA", "BIZ", None])
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_generate_followups_followup_inherits_category(
|
|
|
|
|
|
self, mock_claude, category, conn
|
|
|
|
|
|
):
|
|
|
|
|
|
"""Followup задача наследует category родительской задачи (включая None)."""
|
|
|
|
|
|
# Установить category на родительской задаче
|
|
|
|
|
|
models.update_task(conn, "VDOL-001", category=category)
|
|
|
|
|
|
|
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
|
"output": json.dumps([
|
|
|
|
|
|
{"title": "Followup task", "type": "feature", "priority": 3},
|
|
|
|
|
|
]),
|
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
result = generate_followups(conn, "VDOL-001")
|
|
|
|
|
|
|
|
|
|
|
|
assert len(result["created"]) == 1
|
|
|
|
|
|
followup = result["created"][0]
|
|
|
|
|
|
|
|
|
|
|
|
# category должен совпадать с родительской задачей
|
|
|
|
|
|
assert followup["category"] == category
|
|
|
|
|
|
|
|
|
|
|
|
# ID должен иметь правильный формат
|
|
|
|
|
|
if category:
|
|
|
|
|
|
assert followup["id"].startswith(f"VDOL-{category}-"), (
|
|
|
|
|
|
f"Ожидался ID вида VDOL-{category}-NNN, получен {followup['id']!r}"
|
|
|
|
|
|
)
|
|
|
|
|
|
else:
|
|
|
|
|
|
# Без категории: старый формат VDOL-NNN
|
|
|
|
|
|
parts = followup["id"].split("-")
|
|
|
|
|
|
assert len(parts) == 2, (
|
|
|
|
|
|
f"Ожидался ID вида VDOL-NNN (2 части), получен {followup['id']!r}"
|
|
|
|
|
|
)
|
|
|
|
|
|
assert parts[1].isdigit()
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize("category", ["SEC", "UI", "API", "INFRA", "BIZ", None])
|
|
|
|
|
|
def test_resolve_pending_action_manual_task_inherits_category(
|
|
|
|
|
|
self, category, conn
|
|
|
|
|
|
):
|
|
|
|
|
|
"""manual_task при resolve_pending_action наследует category родителя."""
|
|
|
|
|
|
models.update_task(conn, "VDOL-001", category=category)
|
|
|
|
|
|
|
|
|
|
|
|
action = {
|
|
|
|
|
|
"type": "permission_fix",
|
|
|
|
|
|
"original_item": {
|
|
|
|
|
|
"title": "Fix manually",
|
|
|
|
|
|
"type": "hotfix",
|
|
|
|
|
|
"priority": 4,
|
|
|
|
|
|
"brief": "Apply permissions fix",
|
|
|
|
|
|
},
|
|
|
|
|
|
}
|
|
|
|
|
|
result = resolve_pending_action(conn, "VDOL-001", action, "manual_task")
|
|
|
|
|
|
|
|
|
|
|
|
assert result is not None
|
|
|
|
|
|
assert result["category"] == category
|
|
|
|
|
|
|
|
|
|
|
|
if category:
|
|
|
|
|
|
assert result["id"].startswith(f"VDOL-{category}-"), (
|
|
|
|
|
|
f"Ожидался ID вида VDOL-{category}-NNN, получен {result['id']!r}"
|
|
|
|
|
|
)
|
|
|
|
|
|
else:
|
|
|
|
|
|
parts = result["id"].split("-")
|
|
|
|
|
|
assert len(parts) == 2
|
|
|
|
|
|
assert parts[1].isdigit()
|
|
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_generate_followups_sec_category_id_format(self, mock_claude, conn):
|
|
|
|
|
|
"""Регрессионный тест KIN-068: followup задача с category=SEC получает ID VDOL-SEC-001."""
|
|
|
|
|
|
models.update_task(conn, "VDOL-001", category="SEC")
|
|
|
|
|
|
|
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
|
"output": json.dumps([{"title": "Fix SQL injection", "priority": 2}]),
|
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
result = generate_followups(conn, "VDOL-001")
|
|
|
|
|
|
|
|
|
|
|
|
assert len(result["created"]) == 1
|
|
|
|
|
|
followup = result["created"][0]
|
|
|
|
|
|
assert followup["id"] == "VDOL-SEC-001"
|
|
|
|
|
|
assert followup["category"] == "SEC"
|
|
|
|
|
|
|
|
|
|
|
|
@patch("agents.runner._run_claude")
|
|
|
|
|
|
def test_generate_followups_multiple_followups_same_category(self, mock_claude, conn):
|
|
|
|
|
|
"""Несколько followup задач с одной category получают инкрементальные номера."""
|
|
|
|
|
|
models.update_task(conn, "VDOL-001", category="API")
|
|
|
|
|
|
|
|
|
|
|
|
mock_claude.return_value = {
|
|
|
|
|
|
"output": json.dumps([
|
|
|
|
|
|
{"title": "Add auth header", "priority": 2},
|
|
|
|
|
|
{"title": "Add rate limit", "priority": 3},
|
|
|
|
|
|
]),
|
|
|
|
|
|
"returncode": 0,
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
result = generate_followups(conn, "VDOL-001")
|
|
|
|
|
|
|
|
|
|
|
|
assert len(result["created"]) == 2
|
|
|
|
|
|
ids = [t["id"] for t in result["created"]]
|
|
|
|
|
|
assert ids[0] == "VDOL-API-001"
|
|
|
|
|
|
assert ids[1] == "VDOL-API-002"
|
|
|
|
|
|
for t in result["created"]:
|
|
|
|
|
|
assert t["category"] == "API"
|