Add follow-up task generation on approve
When approving a task, PM agent analyzes pipeline output and creates
follow-up tasks automatically (e.g. security audit → 8 fix tasks).
core/followup.py:
generate_followups() — collects pipeline output, runs followup agent,
parses JSON task list, creates tasks with parent_task_id linkage.
Handles: bare arrays, {tasks:[...]} wrappers, invalid JSON, empty.
agents/prompts/followup.md — PM prompt for analyzing results and
creating actionable follow-up tasks with priority from severity.
CLI: kin approve <task_id> [--followup] [--decision "text"]
API: POST /api/tasks/{id}/approve {create_followups: true}
Returns {status, decision, followup_tasks: [...]}
Frontend (TaskDetail approve modal):
- Checkbox "Create follow-up tasks" (default ON)
- Loading state during generation
- Results view: list of created tasks with links to /task/:id
ProjectView: tasks show "from VDOL-001" for follow-ups.
13 new tests (followup), 125 total, all passing.
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
f7830d484c
commit
9264415776
8 changed files with 426 additions and 17 deletions
141
tests/test_followup.py
Normal file
141
tests/test_followup.py
Normal file
|
|
@ -0,0 +1,141 @@
|
|||
"""Tests for core/followup.py — follow-up task generation."""
|
||||
|
||||
import json
|
||||
import pytest
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from core.db import init_db
|
||||
from core import models
|
||||
from core.followup import generate_followups, _collect_pipeline_output, _next_task_id
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def conn():
|
||||
c = init_db(":memory:")
|
||||
models.create_project(c, "vdol", "ВДОЛЬ", "~/projects/vdolipoperek",
|
||||
tech_stack=["vue3"], language="ru")
|
||||
models.create_task(c, "VDOL-001", "vdol", "Security audit",
|
||||
status="done", brief={"route_type": "security_audit"})
|
||||
# Add some pipeline logs
|
||||
models.log_agent_run(c, "vdol", "security", "execute",
|
||||
task_id="VDOL-001",
|
||||
output_summary=json.dumps({
|
||||
"summary": "8 уязвимостей найдено",
|
||||
"findings": [
|
||||
{"severity": "HIGH", "title": "Admin endpoint без auth",
|
||||
"file": "index.js", "line": 42},
|
||||
{"severity": "HIGH", "title": "SEO endpoints без auth",
|
||||
"file": "index.js", "line": 88},
|
||||
{"severity": "MEDIUM", "title": "Нет rate limiting на login",
|
||||
"file": "auth.js", "line": 15},
|
||||
],
|
||||
}, ensure_ascii=False),
|
||||
success=True)
|
||||
yield c
|
||||
c.close()
|
||||
|
||||
|
||||
class TestCollectPipelineOutput:
|
||||
def test_collects_all_steps(self, conn):
|
||||
output = _collect_pipeline_output(conn, "VDOL-001")
|
||||
assert "security" in output
|
||||
assert "Admin endpoint" in output
|
||||
|
||||
def test_empty_for_no_logs(self, conn):
|
||||
assert _collect_pipeline_output(conn, "NONEXISTENT") == ""
|
||||
|
||||
|
||||
class TestNextTaskId:
|
||||
def test_increments(self, conn):
|
||||
assert _next_task_id(conn, "vdol") == "VDOL-002"
|
||||
|
||||
def test_handles_obs_ids(self, conn):
|
||||
# OBS tasks shouldn't interfere with numbering
|
||||
models.create_task(conn, "VDOL-OBS-001", "vdol", "Obsidian task")
|
||||
assert _next_task_id(conn, "vdol") == "VDOL-002"
|
||||
|
||||
|
||||
class TestGenerateFollowups:
|
||||
@patch("agents.runner._run_claude")
|
||||
def test_creates_followup_tasks(self, mock_claude, conn):
|
||||
mock_claude.return_value = {
|
||||
"output": json.dumps([
|
||||
{"title": "Fix admin auth", "type": "hotfix", "priority": 2,
|
||||
"brief": "Add requireAuth to admin endpoints"},
|
||||
{"title": "Add rate limiting", "type": "feature", "priority": 4,
|
||||
"brief": "Rate limit login to 5/15min"},
|
||||
]),
|
||||
"returncode": 0,
|
||||
}
|
||||
|
||||
created = generate_followups(conn, "VDOL-001")
|
||||
|
||||
assert len(created) == 2
|
||||
assert created[0]["id"] == "VDOL-002"
|
||||
assert created[1]["id"] == "VDOL-003"
|
||||
assert created[0]["title"] == "Fix admin auth"
|
||||
assert created[0]["parent_task_id"] == "VDOL-001"
|
||||
assert created[0]["priority"] == 2
|
||||
assert created[1]["parent_task_id"] == "VDOL-001"
|
||||
|
||||
# Brief should contain source reference
|
||||
assert created[0]["brief"]["source"] == "followup:VDOL-001"
|
||||
assert created[0]["brief"]["route_type"] == "hotfix"
|
||||
|
||||
@patch("agents.runner._run_claude")
|
||||
def test_handles_empty_response(self, mock_claude, conn):
|
||||
mock_claude.return_value = {"output": "[]", "returncode": 0}
|
||||
assert generate_followups(conn, "VDOL-001") == []
|
||||
|
||||
@patch("agents.runner._run_claude")
|
||||
def test_handles_wrapped_response(self, mock_claude, conn):
|
||||
"""PM might return {tasks: [...]} instead of bare array."""
|
||||
mock_claude.return_value = {
|
||||
"output": json.dumps({"tasks": [
|
||||
{"title": "Fix X", "priority": 3},
|
||||
]}),
|
||||
"returncode": 0,
|
||||
}
|
||||
created = generate_followups(conn, "VDOL-001")
|
||||
assert len(created) == 1
|
||||
|
||||
@patch("agents.runner._run_claude")
|
||||
def test_handles_invalid_json(self, mock_claude, conn):
|
||||
mock_claude.return_value = {"output": "not json", "returncode": 0}
|
||||
assert generate_followups(conn, "VDOL-001") == []
|
||||
|
||||
def test_no_logs_returns_empty(self, conn):
|
||||
models.create_task(conn, "VDOL-999", "vdol", "Empty task")
|
||||
assert generate_followups(conn, "VDOL-999") == []
|
||||
|
||||
def test_nonexistent_task(self, conn):
|
||||
assert generate_followups(conn, "NOPE") == []
|
||||
|
||||
def test_dry_run(self, conn):
|
||||
result = generate_followups(conn, "VDOL-001", dry_run=True)
|
||||
assert len(result) == 1
|
||||
assert result[0]["_dry_run"] is True
|
||||
assert "followup" in result[0]["_prompt"].lower() or "Previous step output" in result[0]["_prompt"]
|
||||
|
||||
@patch("agents.runner._run_claude")
|
||||
def test_logs_generation(self, mock_claude, conn):
|
||||
mock_claude.return_value = {
|
||||
"output": json.dumps([{"title": "Fix A", "priority": 2}]),
|
||||
"returncode": 0,
|
||||
}
|
||||
generate_followups(conn, "VDOL-001")
|
||||
|
||||
logs = conn.execute(
|
||||
"SELECT * FROM agent_logs WHERE agent_role='followup_pm'"
|
||||
).fetchall()
|
||||
assert len(logs) == 1
|
||||
assert logs[0]["task_id"] == "VDOL-001"
|
||||
|
||||
@patch("agents.runner._run_claude")
|
||||
def test_prompt_includes_language(self, mock_claude, conn):
|
||||
"""Followup prompt should include language instruction."""
|
||||
mock_claude.return_value = {"output": "[]", "returncode": 0}
|
||||
generate_followups(conn, "VDOL-001")
|
||||
|
||||
prompt = mock_claude.call_args[0][0]
|
||||
assert "Russian" in prompt
|
||||
Loading…
Add table
Add a link
Reference in a new issue